repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
meghana1995/sympy | doc/ext/sympylive.py | 104 | 1289 | """
sympylive
~~~~~~~~~
Allow `SymPy Live <http://live.sympy.org/>`_ to be used for interactive
evaluation of SymPy's code examples.
:copyright: Copyright 2014 by the SymPy Development Team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def builder_inited(app):
if not app.config.sympylive_url:
raise ExtensionError('sympylive_url config value must be set'
' for the sympylive extension to work')
app.add_javascript(app.config.sympylive_url + '/static/utilities.js')
app.add_javascript(app.config.sympylive_url + '/static/external/classy.js')
app.add_stylesheet(app.config.sympylive_url + '/static/live-core.css')
app.add_stylesheet(app.config.sympylive_url +
'/static/live-autocomplete.css')
app.add_stylesheet(app.config.sympylive_url + '/static/live-sphinx.css')
app.add_javascript(app.config.sympylive_url + '/static/live-core.js')
app.add_javascript(app.config.sympylive_url +
'/static/live-autocomplete.js')
app.add_javascript(app.config.sympylive_url + '/static/live-sphinx.js')
def setup(app):
app.add_config_value('sympylive_url', 'http://live.sympy.org', False)
app.connect('builder-inited', builder_inited)
| bsd-3-clause |
jakirkham/bokeh | bokeh/application/tests/test_application.py | 3 | 6422 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import logging
# External imports
import mock
# Bokeh imports
from bokeh.application.handlers import CodeHandler, FunctionHandler
from bokeh.core.properties import Int, Instance
from bokeh.document import Document
from bokeh.model import Model
from bokeh.plotting import figure
from bokeh.util.logconfig import basicConfig
# Module under test
import bokeh.application.application as baa
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
class AnotherModelInTestApplication(Model):
baar = Int(1)
class SomeModelInTestApplication(Model):
foo = Int(2)
child = Instance(Model)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_Application(object):
# Public methods ----------------------------------------------------------
def test_empty(self):
a = baa.Application()
doc = a.create_document()
assert not doc.roots
def test_invalid_kwarg(self):
with pytest.raises(TypeError):
baa.Application(junk="foo")
def test_one_handler(self):
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
doc = a.create_document()
assert len(doc.roots) == 2
def test_two_handlers(self):
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
doc = a.create_document()
assert len(doc.roots) == 3
def test_failed_handler(self, caplog):
a = baa.Application()
handler = CodeHandler(filename="junk", source="bad(")
a.add(handler)
d = Document()
with caplog.at_level(logging.ERROR):
assert len(caplog.records) == 0
a.initialize_document(d)
assert len(caplog.records) == 1
def test_no_static_path(self):
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
assert a.static_path == None
def test_static_path(self):
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler._static = "foo"
a.add(handler)
handler2 = FunctionHandler(add_one_root)
a.add(handler2)
assert a.static_path == "foo"
def test_excess_static_path(self):
a = baa.Application()
def add_roots(doc):
doc.add_root(AnotherModelInTestApplication())
doc.add_root(SomeModelInTestApplication())
def add_one_root(doc):
doc.add_root(AnotherModelInTestApplication())
handler = FunctionHandler(add_roots)
handler._static = "foo"
a.add(handler)
handler2 = FunctionHandler(add_one_root)
handler2._static = "bar"
with pytest.raises(RuntimeError) as e:
a.add(handler2)
assert "More than one static path" in str(e)
@mock.patch('bokeh.document.document.check_integrity')
def test_application_validates_document_by_default(self, check_integrity):
a = baa.Application()
d = Document()
d.add_root(figure())
a.initialize_document(d)
assert check_integrity.called
@mock.patch('bokeh.document.document.check_integrity')
def test_application_doesnt_validate_document_due_to_env_var(self, check_integrity, monkeypatch):
monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
a = baa.Application()
d = Document()
d.add_root(figure())
a.initialize_document(d)
assert not check_integrity.called
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_ServerContext(object):
# Public methods ----------------------------------------------------------
def test_abstract(self):
with pytest.raises(TypeError):
baa.ServerContext()
class Test_SessionContext(object):
def test_abstract(self):
with pytest.raises(TypeError):
baa.SessionContext()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/examples/get_started/regression/linear_regression_categorical.py | 52 | 4228 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression with categorical features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import imports85 # pylint: disable=g-bad-import-order
STEPS = 1000
PRICE_NORM_FACTOR = 1000
def main(argv):
"""Builds, trains, and evaluates the model."""
assert len(argv) == 1
(train, test) = imports85.dataset()
# Switch the labels to units of thousands for better convergence.
def normalize_price(features, labels):
return features, labels / PRICE_NORM_FACTOR
train = train.map(normalize_price)
test = test.map(normalize_price)
# Build the training input_fn.
def input_train():
return (
# Shuffling with a buffer larger than the data set ensures
# that the examples are well mixed.
train.shuffle(1000).batch(128)
# Repeat forever
.repeat().make_one_shot_iterator().get_next())
# Build the validation input_fn.
def input_test():
return (test.shuffle(1000).batch(128)
.make_one_shot_iterator().get_next())
# The following code demonstrates two of the ways that `feature_columns` can
# be used to build a model with categorical inputs.
# The first way assigns a unique weight to each category. To do this, you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero).
# Alternatively, you can define the vocabulary in a file (by calling
# `categorical_column_with_vocabulary_file`) or as a range of positive
# integers (by calling `categorical_column_with_identity`)
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
# The second way, appropriate for an unspecified vocabulary, is to create a
# hashed column. It will create a fixed length list of weights, and
# automatically assign each input category to a weight. Due to the
# pseudo-randomness of the process, some weights may be shared between
# categories, while others will remain unused.
make_column = tf.feature_column.categorical_column_with_hash_bucket(
key="make", hash_bucket_size=50)
feature_columns = [
# This model uses the same two numeric features as `linear_regressor.py`
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# This model adds two categorical colums that will adjust the price based
# on "make" and "body-style".
body_style_column,
make_column,
]
# Build the Estimator.
model = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# Train the model.
# By default, the Estimators log output every 100 steps.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}"
.format(PRICE_NORM_FACTOR * average_loss**0.5))
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| apache-2.0 |
adrienbrault/home-assistant | homeassistant/components/cast/helpers.py | 5 | 6729 | """Helpers to deal with Cast devices."""
from __future__ import annotations
from typing import Optional
import attr
from pychromecast import dial
from pychromecast.const import CAST_MANUFACTURERS
@attr.s(slots=True, frozen=True)
class ChromecastInfo:
"""Class to hold all data about a chromecast for creating connections.
This also has the same attributes as the mDNS fields by zeroconf.
"""
services: set | None = attr.ib()
uuid: str | None = attr.ib(
converter=attr.converters.optional(str), default=None
) # always convert UUID to string if not None
_manufacturer = attr.ib(type=Optional[str], default=None)
model_name: str = attr.ib(default="")
friendly_name: str | None = attr.ib(default=None)
is_audio_group = attr.ib(type=Optional[bool], default=False)
is_dynamic_group = attr.ib(type=Optional[bool], default=None)
@property
def is_information_complete(self) -> bool:
"""Return if all information is filled out."""
want_dynamic_group = self.is_audio_group
have_dynamic_group = self.is_dynamic_group is not None
have_all_except_dynamic_group = all(
attr.astuple(
self,
filter=attr.filters.exclude(
attr.fields(ChromecastInfo).is_dynamic_group
),
)
)
return have_all_except_dynamic_group and (
not want_dynamic_group or have_dynamic_group
)
@property
def manufacturer(self) -> str:
"""Return the manufacturer."""
if self._manufacturer:
return self._manufacturer
if not self.model_name:
return None
return CAST_MANUFACTURERS.get(self.model_name.lower(), "Google Inc.")
def fill_out_missing_chromecast_info(self) -> ChromecastInfo:
"""Return a new ChromecastInfo object with missing attributes filled in.
Uses blocking HTTP / HTTPS.
"""
if self.is_information_complete:
# We have all information, no need to check HTTP API.
return self
# Fill out missing group information via HTTP API.
if self.is_audio_group:
is_dynamic_group = False
http_group_status = None
if self.uuid:
http_group_status = dial.get_multizone_status(
None,
services=self.services,
zconf=ChromeCastZeroconf.get_zeroconf(),
)
if http_group_status is not None:
is_dynamic_group = any(
str(g.uuid) == self.uuid
for g in http_group_status.dynamic_groups
)
return ChromecastInfo(
services=self.services,
uuid=self.uuid,
friendly_name=self.friendly_name,
model_name=self.model_name,
is_audio_group=True,
is_dynamic_group=is_dynamic_group,
)
# Fill out some missing information (friendly_name, uuid) via HTTP dial.
http_device_status = dial.get_device_status(
None, services=self.services, zconf=ChromeCastZeroconf.get_zeroconf()
)
if http_device_status is None:
# HTTP dial didn't give us any new information.
return self
return ChromecastInfo(
services=self.services,
uuid=(self.uuid or http_device_status.uuid),
friendly_name=(self.friendly_name or http_device_status.friendly_name),
manufacturer=(self.manufacturer or http_device_status.manufacturer),
model_name=(self.model_name or http_device_status.model_name),
)
class ChromeCastZeroconf:
"""Class to hold a zeroconf instance."""
__zconf = None
@classmethod
def set_zeroconf(cls, zconf):
"""Set zeroconf."""
cls.__zconf = zconf
@classmethod
def get_zeroconf(cls):
"""Get zeroconf."""
return cls.__zconf
class CastStatusListener:
"""Helper class to handle pychromecast status callbacks.
Necessary because a CastDevice entity can create a new socket client
and therefore callbacks from multiple chromecast connections can
potentially arrive. This class allows invalidating past chromecast objects.
"""
def __init__(self, cast_device, chromecast, mz_mgr, mz_only=False):
"""Initialize the status listener."""
self._cast_device = cast_device
self._uuid = chromecast.uuid
self._valid = True
self._mz_mgr = mz_mgr
if cast_device._cast_info.is_audio_group:
self._mz_mgr.add_multizone(chromecast)
if mz_only:
return
chromecast.register_status_listener(self)
chromecast.socket_client.media_controller.register_status_listener(self)
chromecast.register_connection_listener(self)
if not cast_device._cast_info.is_audio_group:
self._mz_mgr.register_listener(chromecast.uuid, self)
def new_cast_status(self, cast_status):
"""Handle reception of a new CastStatus."""
if self._valid:
self._cast_device.new_cast_status(cast_status)
def new_media_status(self, media_status):
"""Handle reception of a new MediaStatus."""
if self._valid:
self._cast_device.new_media_status(media_status)
def new_connection_status(self, connection_status):
"""Handle reception of a new ConnectionStatus."""
if self._valid:
self._cast_device.new_connection_status(connection_status)
@staticmethod
def added_to_multizone(group_uuid):
"""Handle the cast added to a group."""
def removed_from_multizone(self, group_uuid):
"""Handle the cast removed from a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, None)
def multizone_new_cast_status(self, group_uuid, cast_status):
"""Handle reception of a new CastStatus for a group."""
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle reception of a new MediaStatus for a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, media_status)
def invalidate(self):
"""Invalidate this status listener.
All following callbacks won't be forwarded.
"""
# pylint: disable=protected-access
if self._cast_device._cast_info.is_audio_group:
self._mz_mgr.remove_multizone(self._uuid)
else:
self._mz_mgr.deregister_listener(self._uuid, self)
self._valid = False
| mit |
kaustubhhiware/coala-bears | tests/js/JSONFormatBearTest.py | 20 | 2931 | from queue import Queue
from bears.js.JSONFormatBear import JSONFormatBear
from coalib.testing.LocalBearTestHelper import (verify_local_bear,
LocalBearTestHelper)
from coalib.results.Result import Result
from coalib.settings.Section import Section
test_file1 = """{
"a": 5,
"b": 5
}"""
test_file2 = """{
"b": 5,
"a": 5
}"""
test_file3 = """{
"b": 5,
"a": 5
}"""
unicode_file = """{
"⌘": 5
}"""
test_file4 = """{
a: 5
}"""
class JSONTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('')
self.uut = JSONFormatBear(self.section, Queue())
def test_exception_result(self):
self.check_results(
self.uut,
test_file4.split('\n'),
[Result.from_values('JSONFormatBear',
'This file does not contain parsable JSON. '
'Expecting property name enclosed in '
'double quotes.',
file='default',
line=2,
column=5)],
filename='default')
def test_exception_empty_file(self):
self.check_results(
self.uut,
[],
[Result.from_values('JSONFormatBear',
'This file is empty.',
file='default')],
filename='default')
JSONFormatBearTest = verify_local_bear(JSONFormatBear,
valid_files=(test_file1, test_file2),
invalid_files=(test_file3,
unicode_file,
'',
'random stuff',
'{"a":5,"b":5}'))
JSONFormatBearSortTest = verify_local_bear(JSONFormatBear,
valid_files=(test_file1,),
invalid_files=(test_file2,),
settings={'json_sort': 'true'})
JSONFormatBearTabWidthTest = verify_local_bear(JSONFormatBear,
valid_files=(test_file3,),
invalid_files=(test_file2,),
settings={
'indent_size': '3'})
JSONFormatBearUnicodeTest = verify_local_bear(JSONFormatBear,
valid_files=(unicode_file,),
invalid_files=(),
settings={'escape_unicode':
'false'})
| agpl-3.0 |
mifl/android_kernel_pantech_ef52s | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
apple/llvm-project | lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py | 4 | 4165 | """Test the RunCommandInterpreter API."""
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class CommandRunInterpreterLegacyAPICase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@skipIfReproducer # lldb::FileSP used in typemap cannot be instrumented.
def setUp(self):
TestBase.setUp(self)
self.stdin_path = self.getBuildArtifact("stdin.txt")
with open(self.stdin_path, 'w') as input_handle:
input_handle.write("nonexistingcommand\nquit")
# Python will close the file descriptor if all references
# to the filehandle object lapse, so we need to keep one
# around.
self.filehandle = open(self.stdin_path, 'r')
self.dbg.SetInputFileHandle(self.filehandle, False)
# No need to track the output
self.devnull = open(os.devnull, 'w')
self.dbg.SetOutputFileHandle(self.devnull, False)
self.dbg.SetErrorFileHandle (self.devnull, False)
def test_run_session_with_error_and_quit_legacy(self):
"""Run non-existing and quit command returns appropriate values"""
n_errors, quit_requested, has_crashed = self.dbg.RunCommandInterpreter(
True, False, lldb.SBCommandInterpreterRunOptions(), 0, False,
False)
self.assertGreater(n_errors, 0)
self.assertTrue(quit_requested)
self.assertFalse(has_crashed)
class CommandRunInterpreterAPICase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@skipIfReproducer # lldb::FileSP used in typemap cannot be instrumented.
def setUp(self):
TestBase.setUp(self)
self.stdin_path = self.getBuildArtifact("stdin.txt")
with open(self.stdin_path, 'w') as input_handle:
input_handle.write("nonexistingcommand\nquit")
self.dbg.SetInputFile(open(self.stdin_path, 'r'))
# No need to track the output
devnull = open(os.devnull, 'w')
self.dbg.SetOutputFile(devnull)
self.dbg.SetErrorFile(devnull)
def test_run_session_with_error_and_quit(self):
"""Run non-existing and quit command returns appropriate values"""
n_errors, quit_requested, has_crashed = self.dbg.RunCommandInterpreter(
True, False, lldb.SBCommandInterpreterRunOptions(), 0, False,
False)
self.assertGreater(n_errors, 0)
self.assertTrue(quit_requested)
self.assertFalse(has_crashed)
class SBCommandInterpreterRunOptionsCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
def test_command_interpreter_run_options(self):
"""Test SBCommandInterpreterRunOptions default values, getters & setters """
opts = lldb.SBCommandInterpreterRunOptions()
# Check getters with default values
self.assertEqual(opts.GetStopOnContinue(), False)
self.assertEqual(opts.GetStopOnError(), False)
self.assertEqual(opts.GetStopOnCrash(), False)
self.assertEqual(opts.GetEchoCommands(), True)
self.assertEqual(opts.GetPrintResults(), True)
self.assertEqual(opts.GetPrintErrors(), True)
self.assertEqual(opts.GetAddToHistory(), True)
# Invert values
opts.SetStopOnContinue(not opts.GetStopOnContinue())
opts.SetStopOnError(not opts.GetStopOnError())
opts.SetStopOnCrash(not opts.GetStopOnCrash())
opts.SetEchoCommands(not opts.GetEchoCommands())
opts.SetPrintResults(not opts.GetPrintResults())
opts.SetPrintErrors(not opts.GetPrintErrors())
opts.SetAddToHistory(not opts.GetAddToHistory())
# Check the value changed
self.assertEqual(opts.GetStopOnContinue(), True)
self.assertEqual(opts.GetStopOnError(), True)
self.assertEqual(opts.GetStopOnCrash(), True)
self.assertEqual(opts.GetEchoCommands(), False)
self.assertEqual(opts.GetPrintResults(), False)
self.assertEqual(opts.GetPrintErrors(), False)
self.assertEqual(opts.GetAddToHistory(), False)
| apache-2.0 |
lokirius/python-for-android | python-modules/twisted/twisted/conch/test/test_checkers.py | 59 | 10446 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
try:
import pwd
except ImportError:
pwd = None
import os, base64
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase
from twisted.test.test_process import MockOS
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
SSHPublicKeyDatabase = None
else:
from twisted.conch.ssh import keys
from twisted.conch.checkers import SSHPublicKeyDatabase, SSHProtocolChecker
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
class SSHPublicKeyDatabaseTestCase(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
if pwd is None:
skip = "Cannot run without pwd module"
elif SSHPublicKeyDatabase is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.checker = SSHPublicKeyDatabase()
self.key1 = base64.encodestring("foobar")
self.key2 = base64.encodestring("eggspam")
self.content = "t1 %s foo\nt2 %s egg\n" % (self.key1, self.key2)
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser('user', 'password', 1, 2, 'first last',
self.mockos.path.path, '/bin/shell')
self.patch(pwd, "getpwnam", userdb.getpwnam)
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = "eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = "notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEquals(self.mockos.seteuidCalls, [0, 1, 0, os.getuid()])
self.assertEquals(self.mockos.setegidCalls, [2, os.getgid()])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh,
'foo', keys.Key.fromString(keydata.privateRSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEquals(avatarId, 'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh,
'foo', keys.Key.fromString(keydata.privateDSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', None, 'blob', 'sigData', 'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTestCase(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
if SSHPublicKeyDatabase is None:
skip = "Cannot run without PyCrypto"
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = SSHProtocolChecker()
self.assertEquals(checker.credentialInterfaces, [])
checker.registerChecker(SSHPublicKeyDatabase(), )
self.assertEquals(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a apecific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = SSHProtocolChecker()
self.assertEquals(checker.credentialInterfaces, [])
checker.registerChecker(SSHPublicKeyDatabase(), IUsernamePassword)
self.assertEquals(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
def _callback(avatarId):
self.assertEquals(avatarId, 'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertEquals(SSHProtocolChecker().areDone(None), True)
| apache-2.0 |
kenrobbins/tardyrush | tardyrush/helpers/__init__.py | 1 | 1140 | from flask import request, flash, url_for, render_template
from flask import url_for, get_flashed_messages
from flask import redirect as flask_redirect
from flask import jsonify as flask_jsonify
def abs_url_for(*args, **kwargs):
return "http://tardyrush.com%s" % url_for(*args, **kwargs)
def jsonify(*args, **kwargs):
kwargs.setdefault('flashes', get_flashed_messages(with_categories=True))
return flask_jsonify(*args, **kwargs)
def rt(*args, **kwargs):
if request.values.get('api') == '1':
csrf = None
errors = []
if 'form' in kwargs and kwargs['form']:
csrf = kwargs['form'].csrf_token.data
errors = kwargs['form'].errors
return jsonify(success=False, errors=errors, csrf=csrf)
form = kwargs.get('form')
if form and form.errors:
flash("Please fix the errors below and try again.", "error")
kwargs.setdefault('page', {'top':'main', 'sub':''})
return render_template(*args, **kwargs)
def redirect(*args, **kwargs):
if request.values.get('api') == '1':
return jsonify(success=False)
return flask_redirect(*args, **kwargs)
| mit |
christoph-buente/phantomjs | src/qt/qtwebkit/Source/WebKit2/Scripts/generate-message-receiver.py | 145 | 1809 | #!/usr/bin/env python
#
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import sys
import webkit2.messages
def main(argv=None):
if not argv:
argv = sys.argv
input_path = argv[1]
with open(input_path) as input_file:
# Python 3, change to: print(webkit2.messages.generate_message_handler(input_file), end='')
sys.stdout.write(webkit2.messages.generate_message_handler(input_file))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sandeepgupta2k4/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py | 52 | 4007 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.python.platform import test
class AffineLinearOperatorTest(test.TestCase):
def testIdentity(self):
with self.test_session():
affine = AffineLinearOperator(
validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = x
ildj = 0.
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
def testDiag(self):
with self.test_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = diag * x + shift
ildj = -np.sum(np.log(np.abs(diag)), axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
def testTriL(self):
with self.test_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
tril = np.array([[[1, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorTriL(tril, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril) + shift.
y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))),
axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(y).eval())
self.assertAllClose(-affine.inverse_log_det_jacobian(y).eval(),
affine.forward_log_det_jacobian(x).eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
blueboxgroup/keystone | keystone/tests/test_backend_sql.py | 2 | 34278 | # -*- coding: utf-8 -*-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import uuid
import mock
from oslo.db import exception as db_exception
from oslo.db import options
import sqlalchemy
from sqlalchemy import exc
from testtools import matchers
from keystone.common import driver_hints
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone.identity.backends import sql as identity_sql
from keystone.openstack.common import log as logging
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests.ksfixtures import database
from keystone.tests import test_backend
from keystone.token.persistence.backends import sql as token_sql
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlTests(tests.SQLDriverOverrides, tests.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
self.useFixture(database.Database())
self.load_backends()
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_files(self):
config_files = super(SqlTests, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
class SqlModels(SqlTests):
def select_table(self, name):
table = sqlalchemy.Table(name,
sql.ModelBase.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertExpectedSchema(self, table, cols):
table = self.select_table(table)
for col, type_, length in cols:
self.assertIsInstance(table.c[col].type, type_)
if length:
self.assertEqual(length, table.c[col].type.length)
def test_user_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 255),
('password', sql.String, 128),
('domain_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('user', cols)
def test_group_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('description', sql.Text, None),
('domain_id', sql.String, 64),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('group', cols)
def test_domain_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('enabled', sql.Boolean, None))
self.assertExpectedSchema('domain', cols)
def test_project_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
('description', sql.Text, None),
('domain_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('project', cols)
def test_role_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 255))
self.assertExpectedSchema('role', cols)
def test_role_assignment_model(self):
cols = (('type', sql.Enum, None),
('actor_id', sql.String, 64),
('target_id', sql.String, 64),
('role_id', sql.String, 64),
('inherited', sql.Boolean, False))
self.assertExpectedSchema('assignment', cols)
def test_user_group_membership(self):
cols = (('group_id', sql.String, 64),
('user_id', sql.String, 64))
self.assertExpectedSchema('user_group_membership', cols)
class SqlIdentity(SqlTests, test_backend.IdentityTests):
def test_password_hashed(self):
session = sql.get_session()
user_ref = self.identity_api._get_user(session, self.user_foo['id'])
self.assertNotEqual(user_ref['password'], self.user_foo['password'])
def test_delete_user_with_project_association(self):
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.assignment_api.list_projects_for_user,
user['id'])
def test_create_null_user_name(self):
user = {'name': None,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
self.identity_api.create_user,
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_project_name(self):
tenant = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID}
self.assertRaises(exception.ValidationError,
self.assignment_api.create_project,
tenant['id'],
tenant)
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
tenant['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project_by_name,
tenant['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_role_name(self):
role = {'id': uuid.uuid4().hex,
'name': None}
self.assertRaises(exception.UnexpectedError,
self.role_api.create_role,
role['id'],
role)
self.assertRaises(exception.RoleNotFound,
self.role_api.get_role,
role['id'])
def test_delete_project_with_user_association(self):
user = {'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.assignment_api.delete_project(self.tenant_bar['id'])
tenants = self.assignment_api.list_projects_for_user(user['id'])
self.assertEqual([], tenants)
def test_metadata_removed_on_delete_user(self):
# A test to check that the internal representation
# or roles is correctly updated when a user is deleted
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
user['id'],
self.tenant_bar['id'],
role['id'])
self.identity_api.delete_user(user['id'])
# Now check whether the internal representation of roles
# has been deleted
self.assertRaises(exception.MetadataNotFound,
self.assignment_api._get_metadata,
user['id'],
self.tenant_bar['id'])
def test_metadata_removed_on_delete_project(self):
# A test to check that the internal representation
# or roles is correctly updated when a project is deleted
user = {'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
user = self.identity_api.create_user(user)
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
user['id'],
self.tenant_bar['id'],
role['id'])
self.assignment_api.delete_project(self.tenant_bar['id'])
# Now check whether the internal representation of roles
# has been deleted
self.assertRaises(exception.MetadataNotFound,
self.assignment_api._get_metadata,
user['id'],
self.tenant_bar['id'])
def test_update_project_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
tenant_id = uuid.uuid4().hex
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
tenant = {
'id': tenant_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
arbitrary_key: arbitrary_value}
ref = self.assignment_api.create_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('extra'))
tenant['name'] = uuid.uuid4().hex
ref = self.assignment_api.update_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_update_user_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
user = {
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex,
arbitrary_key: arbitrary_value}
ref = self.identity_api.create_user(user)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref.get('extra'))
user['name'] = uuid.uuid4().hex
user['password'] = uuid.uuid4().hex
ref = self.identity_api.update_user(ref['id'], user)
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref['extra'].get('password'))
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_sql_user_to_dict_null_default_project_id(self):
user = {
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user)
session = sql.get_session()
query = session.query(identity_sql.User)
query = query.filter_by(id=user['id'])
raw_user_ref = query.one()
self.assertIsNone(raw_user_ref.default_project_id)
user_ref = raw_user_ref.to_dict()
self.assertNotIn('default_project_id', user_ref)
session.close()
def test_list_domains_for_user(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain['id'], domain)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(test_domain1['id'], test_domain1)
test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(test_domain2['id'], test_domain2)
user = self.identity_api.create_user(user)
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertEqual(0, len(user_domains))
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain2['id'],
role_id=self.role_member['id'])
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertThat(user_domains, matchers.HasLength(2))
def test_list_domains_for_user_with_grants(self):
# Create two groups each with a role on a different domain, and
# make user1 a member of both groups. Both these new domains
# should now be included, along with any direct user grants.
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain['id'], domain)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user = self.identity_api.create_user(user)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group2 = self.identity_api.create_group(group2)
test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(test_domain1['id'], test_domain1)
test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(test_domain2['id'], test_domain2)
test_domain3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(test_domain3['id'], test_domain3)
self.identity_api.add_user_to_group(user['id'], group1['id'])
self.identity_api.add_user_to_group(user['id'], group2['id'])
# Create 3 grants, one user grant, the other two as group grants
self.assignment_api.create_grant(user_id=user['id'],
domain_id=test_domain1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
domain_id=test_domain2['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
domain_id=test_domain3['id'],
role_id=self.role_admin['id'])
user_domains = self.assignment_api.list_domains_for_user(user['id'])
self.assertThat(user_domains, matchers.HasLength(3))
def test_list_domains_for_user_with_inherited_grants(self):
"""Test that inherited roles on the domain are excluded.
Test Plan:
- Create two domains, one user, group and role
- Domain1 is given an inherited user role, Domain2 an inherited
group role (for a group of which the user is a member)
- When listing domains for user, neither domain should be returned
"""
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
domain1 = self.assignment_api.create_domain(domain1['id'], domain1)
domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
domain2 = self.assignment_api.create_domain(domain2['id'], domain2)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain1['id'], 'enabled': True}
user = self.identity_api.create_user(user)
group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(user['id'], group['id'])
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
# Create a grant on each domain, one user grant, one group grant,
# both inherited.
self.assignment_api.create_grant(user_id=user['id'],
domain_id=domain1['id'],
role_id=role['id'],
inherited_to_projects=True)
self.assignment_api.create_grant(group_id=group['id'],
domain_id=domain2['id'],
role_id=role['id'],
inherited_to_projects=True)
user_domains = self.assignment_api.list_domains_for_user(user['id'])
# No domains should be returned since both domains have only inherited
# roles assignments.
self.assertThat(user_domains, matchers.HasLength(0))
class SqlTrust(SqlTests, test_backend.TrustTests):
pass
class SqlToken(SqlTests, test_backend.TokenTests):
def test_token_revocation_list_uses_right_columns(self):
# This query used to be heavy with too many columns. We want
# to make sure it is only running with the minimum columns
# necessary.
expected_query_args = (token_sql.TokenModel.id,
token_sql.TokenModel.expires)
with mock.patch.object(token_sql, 'sql') as mock_sql:
tok = token_sql.Token()
tok.list_revoked_tokens()
mock_query = mock_sql.get_session().query
mock_query.assert_called_with(*expected_query_args)
def test_flush_expired_tokens_batch(self):
# TODO(dstanek): This test should be rewritten to be less
# brittle. The code will likely need to be changed first. I
# just copied the spirit of the existing test when I rewrote
# mox -> mock. These tests are brittle because they have the
# call structure for SQLAlchemy encoded in them.
# test sqlite dialect
with mock.patch.object(token_sql, 'sql') as mock_sql:
mock_sql.get_session().bind.dialect.name = 'sqlite'
tok = token_sql.Token()
tok.flush_expired_tokens()
filter_mock = mock_sql.get_session().query().filter()
self.assertFalse(filter_mock.limit.called)
self.assertTrue(filter_mock.delete.called_once)
def test_flush_expired_tokens_batch_mysql(self):
# test mysql dialect, we don't need to test IBM DB SA separately, since
# other tests below test the differences between how they use the batch
# strategy
with mock.patch.object(token_sql, 'sql') as mock_sql:
mock_sql.get_session().query().filter().delete.return_value = 0
mock_sql.get_session().bind.dialect.name = 'mysql'
tok = token_sql.Token()
expiry_mock = mock.Mock()
ITERS = [1, 2, 3]
expiry_mock.return_value = iter(ITERS)
token_sql._expiry_range_batched = expiry_mock
tok.flush_expired_tokens()
# The expiry strategy is only invoked once, the other calls are via
# the yield return.
self.assertEqual(1, expiry_mock.call_count)
mock_delete = mock_sql.get_session().query().filter().delete
self.assertThat(mock_delete.call_args_list,
matchers.HasLength(len(ITERS)))
def test_expiry_range_batched(self):
upper_bound_mock = mock.Mock(side_effect=[1, "final value"])
sess_mock = mock.Mock()
query_mock = sess_mock.query().filter().order_by().offset().limit()
query_mock.one.side_effect = [['test'], sql.NotFound()]
for i, x in enumerate(token_sql._expiry_range_batched(sess_mock,
upper_bound_mock,
batch_size=50)):
if i == 0:
# The first time the batch iterator returns, it should return
# the first result that comes back from the database.
self.assertEqual(x, 'test')
elif i == 1:
# The second time, the database range function should return
# nothing, so the batch iterator returns the result of the
# upper_bound function
self.assertEqual(x, "final value")
else:
self.fail("range batch function returned more than twice")
def test_expiry_range_strategy_sqlite(self):
tok = token_sql.Token()
sqlite_strategy = tok._expiry_range_strategy('sqlite')
self.assertEqual(token_sql._expiry_range_all, sqlite_strategy)
def test_expiry_range_strategy_ibm_db_sa(self):
tok = token_sql.Token()
db2_strategy = tok._expiry_range_strategy('ibm_db_sa')
self.assertIsInstance(db2_strategy, functools.partial)
self.assertEqual(db2_strategy.func, token_sql._expiry_range_batched)
self.assertEqual(db2_strategy.keywords, {'batch_size': 100})
def test_expiry_range_strategy_mysql(self):
tok = token_sql.Token()
mysql_strategy = tok._expiry_range_strategy('mysql')
self.assertIsInstance(mysql_strategy, functools.partial)
self.assertEqual(mysql_strategy.func, token_sql._expiry_range_batched)
self.assertEqual(mysql_strategy.keywords, {'batch_size': 1000})
class SqlCatalog(SqlTests, test_backend.CatalogTests):
def test_catalog_ignored_malformed_urls(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
malformed_url = "http://192.168.1.104:8774/v2/$(tenant)s"
endpoint = {
'id': uuid.uuid4().hex,
'region_id': None,
'service_id': service['id'],
'interface': 'public',
'url': malformed_url,
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
# NOTE(dstanek): there are no valid URLs, so nothing is in the catalog
catalog = self.catalog_api.get_catalog('fake-user', 'fake-tenant')
self.assertEqual({}, catalog)
def test_get_catalog_with_empty_public_url(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region_id': None,
'interface': 'public',
'url': '',
'service_id': service['id'],
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
catalog = self.catalog_api.get_catalog('user', 'tenant')
catalog_endpoint = catalog[endpoint['region_id']][service['type']]
self.assertEqual(service['name'], catalog_endpoint['name'])
self.assertEqual(endpoint['id'], catalog_endpoint['id'])
self.assertEqual('', catalog_endpoint['publicURL'])
self.assertIsNone(catalog_endpoint.get('adminURL'))
self.assertIsNone(catalog_endpoint.get('internalURL'))
def test_create_endpoint_region_404(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region_id': uuid.uuid4().hex,
'service_id': service['id'],
'interface': 'public',
'url': uuid.uuid4().hex,
}
self.assertRaises(exception.ValidationError,
self.catalog_api.create_endpoint,
endpoint['id'],
endpoint.copy())
def test_create_region_invalid_id(self):
region = {
'id': '0' * 256,
'description': '',
'extra': {},
}
self.assertRaises(exception.StringLengthExceeded,
self.catalog_api.create_region,
region.copy())
def test_create_region_invalid_parent_id(self):
region = {
'id': uuid.uuid4().hex,
'parent_region_id': '0' * 256,
}
self.assertRaises(exception.RegionNotFound,
self.catalog_api.create_region,
region)
def test_delete_region_with_endpoint(self):
# create a region
region = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_region(region)
# create a child region
child_region = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_id': region['id']
}
self.catalog_api.create_region(child_region)
# create a service
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service and child region
child_endpoint = {
'id': uuid.uuid4().hex,
'region_id': child_region['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'service_id': service['id'],
}
self.catalog_api.create_endpoint(child_endpoint['id'], child_endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
child_region['id'])
# create an endpoint attached to the service and parent region
endpoint = {
'id': uuid.uuid4().hex,
'region_id': region['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'service_id': service['id'],
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
region['id'])
class SqlPolicy(SqlTests, test_backend.PolicyTests):
pass
class SqlInheritance(SqlTests, test_backend.InheritanceTests):
pass
class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
def setUp(self):
super(SqlTokenCacheInvalidation, self).setUp()
self._create_test_data()
class SqlFilterTests(SqlTests, test_backend.FilterTests):
def test_filter_sql_injection_attack(self):
"""Test against sql injection attack on filters
Test Plan:
- Attempt to get all entities back by passing a two-term attribute
- Attempt to piggyback filter to damage DB (e.g. drop table)
"""
# Check we have some users
users = self.identity_api.list_users()
self.assertTrue(len(users) > 0)
hints = driver_hints.Hints()
hints.add_filter('name', "anything' or 'x'='x")
users = self.identity_api.list_users(hints=hints)
self.assertEqual(0, len(users))
# See if we can add a SQL command...use the group table instead of the
# user table since 'user' is reserved word for SQLAlchemy.
group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
group = self.identity_api.create_group(group)
hints = driver_hints.Hints()
hints.add_filter('name', "x'; drop table group")
groups = self.identity_api.list_groups(hints=hints)
self.assertEqual(0, len(groups))
groups = self.identity_api.list_groups()
self.assertTrue(len(groups) > 0)
class SqlLimitTests(SqlTests, test_backend.LimitTests):
def setUp(self):
super(SqlLimitTests, self).setUp()
test_backend.LimitTests.setUp(self)
class FakeTable(sql.ModelBase):
__tablename__ = 'test_table'
col = sql.Column(sql.String(32), primary_key=True)
@sql.handle_conflicts('keystone')
def insert(self):
raise db_exception.DBDuplicateEntry
@sql.handle_conflicts('keystone')
def update(self):
raise db_exception.DBError(
inner_exception=exc.IntegrityError('a', 'a', 'a'))
@sql.handle_conflicts('keystone')
def lookup(self):
raise KeyError
class SqlDecorators(tests.TestCase):
def test_initialization_fail(self):
self.assertRaises(exception.StringLengthExceeded,
FakeTable, col='a' * 64)
def test_initialization(self):
tt = FakeTable(col='a')
self.assertEqual('a', tt.col)
def test_non_ascii_init(self):
# NOTE(I159): Non ASCII characters must cause UnicodeDecodeError
# if encoding is not provided explicitly.
self.assertRaises(UnicodeDecodeError, FakeTable, col='Я')
def test_conflict_happend(self):
self.assertRaises(exception.Conflict, FakeTable().insert)
self.assertRaises(exception.UnexpectedError, FakeTable().update)
def test_not_conflict_error(self):
self.assertRaises(KeyError, FakeTable().lookup)
class SqlModuleInitialization(tests.TestCase):
@mock.patch.object(sql.core, 'CONF')
@mock.patch.object(options, 'set_defaults')
def test_initialize_module(self, set_defaults, CONF):
sql.initialize()
set_defaults.assert_called_with(CONF,
connection='sqlite:///keystone.db')
class SqlCredential(SqlTests):
def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex):
credential_id = uuid.uuid4().hex
new_credential = {
'id': credential_id,
'user_id': user_id,
'project_id': uuid.uuid4().hex,
'blob': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': uuid.uuid4().hex
}
self.credential_api.create_credential(credential_id, new_credential)
return new_credential
def _validateCredentialList(self, retrieved_credentials,
expected_credentials):
self.assertEqual(len(retrieved_credentials), len(expected_credentials))
retrived_ids = [c['id'] for c in retrieved_credentials]
for cred in expected_credentials:
self.assertIn(cred['id'], retrived_ids)
def setUp(self):
super(SqlCredential, self).setUp()
self.credentials = []
for _ in range(3):
self.credentials.append(
self._create_credential_with_user_id())
self.user_credentials = []
for _ in range(3):
cred = self._create_credential_with_user_id(self.user_foo['id'])
self.user_credentials.append(cred)
self.credentials.append(cred)
def test_list_credentials(self):
credentials = self.credential_api.list_credentials()
self._validateCredentialList(credentials, self.credentials)
# test filtering using hints
hints = driver_hints.Hints()
hints.add_filter('user_id', self.user_foo['id'])
credentials = self.credential_api.list_credentials(hints)
self._validateCredentialList(credentials, self.user_credentials)
def test_list_credentials_for_user(self):
credentials = self.credential_api.list_credentials_for_user(
self.user_foo['id'])
self._validateCredentialList(credentials, self.user_credentials)
class DeprecatedDecorators(SqlTests):
def test_assignment_to_role_api(self):
"""Test that calling one of the methods does call LOG.deprecated.
This method is really generic to the type of backend, but we need
one to execute the test, so the SQL backend is as good as any.
"""
# Rather than try and check that a log message is issued, we
# enable fatal_deprecations so that we can check for the
# raising of the exception.
# First try to create a role without enabling fatal deprecations,
# which should work due to the cross manager deprecated calls.
role_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_ref['id'], role_ref)
self.role_api.get_role(role_ref['id'])
# Now enable fatal exceptions - creating a role by calling the
# old manager should now fail.
self.config_fixture.config(fatal_deprecations=True)
role_ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(logging.DeprecatedConfig,
self.assignment_api.create_role,
role_ref['id'], role_ref)
| apache-2.0 |
polimediaupv/edx-platform | openedx/core/lib/api/plugins.py | 154 | 1517 | """
Adds support for first class features that can be added to the edX platform.
"""
from stevedore.extension import ExtensionManager
class PluginError(Exception):
"""
Base Exception for when an error was found regarding features.
"""
pass
class PluginManager(object):
"""
Base class that manages plugins to the edX platform.
"""
@classmethod
def get_available_plugins(cls):
"""
Returns a dict of all the plugins that have been made available through the platform.
"""
# Note: we're creating the extension manager lazily to ensure that the Python path
# has been correctly set up. Trying to create this statically will fail, unfortunately.
if not hasattr(cls, "_plugins"):
plugins = {}
extension_manager = ExtensionManager(namespace=cls.NAMESPACE) # pylint: disable=no-member
for plugin_name in extension_manager.names():
plugins[plugin_name] = extension_manager[plugin_name].plugin
cls._plugins = plugins
return cls._plugins
@classmethod
def get_plugin(cls, name):
"""
Returns the plugin with the given name.
"""
plugins = cls.get_available_plugins()
if name not in plugins:
raise PluginError("No such plugin {name} for entry point {namespace}".format(
name=name,
namespace=cls.NAMESPACE # pylint: disable=no-member
))
return plugins[name]
| agpl-3.0 |
onepas/xbmc-addons | script.video.F4mProxy/lib/flvlib/astypes.py | 98 | 8332 | import os
import calendar
import datetime
import logging
from primitives import *
from constants import *
from helpers import OrderedAttrDict, utc
"""
The AS types and their FLV representations.
"""
log = logging.getLogger('flvlib.astypes')
class MalformedFLV(Exception):
pass
# Number
def get_number(f, max_offset=None):
return get_double(f)
def make_number(num):
return make_double(num)
# Boolean
def get_boolean(f, max_offset=None):
value = get_ui8(f)
return bool(value)
def make_boolean(value):
return make_ui8((value and 1) or 0)
# String
def get_string(f, max_offset=None):
# First 16 bits are the string's length
length = get_ui16(f)
# Then comes the string itself
ret = f.read(length)
return ret
def make_string(string):
if isinstance(string, unicode):
# We need a blob, not unicode. Arbitrarily choose UTF-8
string = string.encode('UTF-8')
length = make_ui16(len(string))
return length + string
# Longstring
def get_longstring(f, max_offset=None):
# First 32 bits are the string's length
length = get_ui32(f)
# Then comes the string itself
ret = f.read(length)
return ret
def make_longstring(string):
if isinstance(string, unicode):
# We need a blob, not unicode. Arbitrarily choose UTF-8
string = string.encode('UTF-8')
length = make_ui32(len(string))
return length + string
# ECMA Array
class ECMAArray(OrderedAttrDict):
pass
def get_ecma_array(f, max_offset=None):
length = get_ui32(f)
log.debug("The ECMA array has approximately %d elements", length)
array = ECMAArray()
while True:
if max_offset and (f.tell() == max_offset):
log.debug("Prematurely terminating reading an ECMA array")
break
marker = get_ui24(f)
if marker == 9:
log.debug("Marker!")
break
else:
f.seek(-3, os.SEEK_CUR)
name, value = get_script_data_variable(f, max_offset=max_offset)
array[name] = value
return array
def make_ecma_array(d):
length = make_ui32(len(d))
rest = ''.join([make_script_data_variable(name, value)
for name, value in d.iteritems()])
marker = make_ui24(9)
return length + rest + marker
# Strict Array
def get_strict_array(f, max_offset=None):
length = get_ui32(f)
log.debug("The length is %d", length)
elements = [get_script_data_value(f, max_offset=max_offset)
for _ in xrange(length)]
return elements
def make_strict_array(l):
ret = make_ui32(len(l))
rest = ''.join([make_script_data_value(value) for value in l])
return ret + rest
# Date
def get_date(f, max_offset=None):
timestamp = get_number(f) / 1000.0
# From the following document:
# http://opensource.adobe.com/wiki/download/
# attachments/1114283/amf0_spec_121207.pdf
#
# Section 2.13 Date Type
#
# (...) While the design of this type reserves room for time zone offset
# information, it should not be filled in, nor used (...)
_ignored = get_si16(f)
return datetime.datetime.fromtimestamp(timestamp, utc)
def make_date(date):
if date.tzinfo:
utc_date = date.astimezone(utc)
else:
# assume it's UTC
utc_date = date.replace(tzinfo=utc)
ret = make_number(calendar.timegm(utc_date.timetuple()) * 1000)
offset = 0
return ret + make_si16(offset)
# Null
def get_null(f, max_offset=None):
return None
def make_null(none):
return ''
# Object
class FLVObject(OrderedAttrDict):
pass
def get_object(f, max_offset=None):
ret = FLVObject()
while True:
if max_offset and (f.tell() == max_offset):
log.debug("Prematurely terminating reading an object")
break
marker = get_ui24(f)
if marker == 9:
log.debug("Marker!")
break
else:
f.seek(-3, os.SEEK_CUR)
name, value = get_script_data_variable(f)
setattr(ret, name, value)
return ret
def make_object(obj):
# If the object is iterable, serialize keys/values. If not, fall
# back on iterating over __dict__.
# This makes sure that make_object(get_object(StringIO(blob))) == blob
try:
iterator = obj.iteritems()
except AttributeError:
iterator = obj.__dict__.iteritems()
ret = ''.join([make_script_data_variable(name, value)
for name, value in iterator])
marker = make_ui24(9)
return ret + marker
# MovieClip
class MovieClip(object):
def __init__(self, path):
self.path = path
def __eq__(self, other):
return isinstance(other, MovieClip) and self.path == other.path
def __repr__(self):
return "<MovieClip at %s>" % self.path
def get_movieclip(f, max_offset=None):
ret = get_string(f)
return MovieClip(ret)
def make_movieclip(clip):
return make_string(clip.path)
# Undefined
class Undefined(object):
def __eq__(self, other):
return isinstance(other, Undefined)
def __repr__(self):
return '<Undefined>'
def get_undefined(f, max_offset=None):
return Undefined()
def make_undefined(undefined):
return ''
# Reference
class Reference(object):
def __init__(self, ref):
self.ref = ref
def __eq__(self, other):
return isinstance(other, Reference) and self.ref == other.ref
def __repr__(self):
return "<Reference to %d>" % self.ref
def get_reference(f, max_offset=None):
ret = get_ui16(f)
return Reference(ret)
def make_reference(reference):
return make_ui16(reference.ref)
as_type_to_getter_and_maker = {
VALUE_TYPE_NUMBER: (get_number, make_number),
VALUE_TYPE_BOOLEAN: (get_boolean, make_boolean),
VALUE_TYPE_STRING: (get_string, make_string),
VALUE_TYPE_OBJECT: (get_object, make_object),
VALUE_TYPE_MOVIECLIP: (get_movieclip, make_movieclip),
VALUE_TYPE_NULL: (get_null, make_null),
VALUE_TYPE_UNDEFINED: (get_undefined, make_undefined),
VALUE_TYPE_REFERENCE: (get_reference, make_reference),
VALUE_TYPE_ECMA_ARRAY: (get_ecma_array, make_ecma_array),
VALUE_TYPE_STRICT_ARRAY: (get_strict_array, make_strict_array),
VALUE_TYPE_DATE: (get_date, make_date),
VALUE_TYPE_LONGSTRING: (get_longstring, make_longstring)
}
type_to_as_type = {
bool: VALUE_TYPE_BOOLEAN,
int: VALUE_TYPE_NUMBER,
long: VALUE_TYPE_NUMBER,
float: VALUE_TYPE_NUMBER,
# WARNING: not supporting Longstrings here.
# With a max length of 65535 chars, noone will notice.
str: VALUE_TYPE_STRING,
unicode: VALUE_TYPE_STRING,
list: VALUE_TYPE_STRICT_ARRAY,
dict: VALUE_TYPE_ECMA_ARRAY,
ECMAArray: VALUE_TYPE_ECMA_ARRAY,
datetime.datetime: VALUE_TYPE_DATE,
Undefined: VALUE_TYPE_UNDEFINED,
MovieClip: VALUE_TYPE_MOVIECLIP,
Reference: VALUE_TYPE_REFERENCE,
type(None): VALUE_TYPE_NULL
}
# SCRIPTDATAVARIABLE
def get_script_data_variable(f, max_offset=None):
name = get_string(f)
log.debug("The name is %s", name)
value = get_script_data_value(f, max_offset=max_offset)
log.debug("The value is %r", value)
return (name, value)
def make_script_data_variable(name, value):
log.debug("The name is %s", name)
log.debug("The value is %r", value)
ret = make_string(name) + make_script_data_value(value)
return ret
# SCRIPTDATAVALUE
def get_script_data_value(f, max_offset=None):
value_type = get_ui8(f)
log.debug("The value type is %r", value_type)
try:
get_value = as_type_to_getter_and_maker[value_type][0]
except KeyError:
raise MalformedFLV("Invalid script data value type: %d", value_type)
log.debug("The getter function is %r", get_value)
value = get_value(f, max_offset=max_offset)
return value
def make_script_data_value(value):
value_type = type_to_as_type.get(value.__class__, VALUE_TYPE_OBJECT)
log.debug("The value type is %r", value_type)
# KeyError can't happen here, because we always fall back on
# VALUE_TYPE_OBJECT when determining value_type
make_value = as_type_to_getter_and_maker[value_type][1]
log.debug("The maker function is %r", make_value)
type_tag = make_ui8(value_type)
ret = make_value(value)
return type_tag + ret
| gpl-2.0 |
JingJunYin/tensorflow | tensorflow/python/eager/graph_only_ops_test.py | 53 | 1712 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_only_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import graph_only_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GraphOnlyOpsTest(test_util.TensorFlowTestCase):
def testGraphZerosLike(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
z_tf = graph_only_ops.graph_zeros_like(x)
with self.test_session():
self.assertAllClose(np.zeros((2, 3)), z_tf.eval())
def testGraphPlaceholder(self):
x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,))
y_tf = math_ops.square(x_tf)
with self.test_session() as sess:
x = np.array([42])
y = sess.run(y_tf, feed_dict={x_tf: np.array([42])})
self.assertAllClose(np.square(x), y)
if __name__ == '__main__':
test.main()
| apache-2.0 |
dgellis90/nipype | nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py | 1 | 1615 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..model import Label2Label
def test_Label2Label_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
hemisphere=dict(argstr='--hemi %s',
mandatory=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
label=dict(mandatory=True,
),
registration_method=dict(argstr='--regmethod %s',
genfile=True,
mandatory=False,
),
source_label=dict(argstr='--srclabel %s',
genfile=True,
mandatory=False,
),
source_subject=dict(argstr='--srcsubject %s',
genfile=True,
mandatory=False,
),
sphere_reg=dict(mandatory=True,
),
subject_id=dict(argstr='--trgsubject %s',
mandatory=True,
),
subjects_dir=dict(),
target_label=dict(argstr='--trglabel %s',
genfile=True,
mandatory=False,
),
terminal_output=dict(nohash=True,
),
threshold=dict(mandatory=False,
),
white=dict(mandatory=True,
),
)
inputs = Label2Label.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Label2Label_outputs():
output_map = dict(out_file=dict(),
)
outputs = Label2Label.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
yonatang/networks-pa1 | pox/nom_l2_switch_controller/__init__.py | 7 | 2390 | # Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This package contains a nom-based L2 learning switch.
"""
from pox.core import core
log = core.getLogger("nom_l2_switch_controller.init")
def launch (distributed=False):
# TODO: need a more transparent mechanism for specifying the debug flag...
"""
Starts a NOM-based L2 learning switch, along with the discovery and topology modules
"""
if type(distributed) == bool and distributed:
distributed = 1
elif type(distributed) == str:
distributed = int(distributed)
import pox.openflow.connection_arbiter
pox.openflow.connection_arbiter.launch()
import pox.openflow.topology
pox.openflow.topology.launch()
import pox.topology
pox.topology.launch()
import pox.openflow.discovery
pox.openflow.discovery.launch()
from pox.core import core
if distributed:
import pox.controllers.nom_server as nom_server
# creates listening tcp socket via messenger
# sends serialized master topology to connecting client controllers
nom_server.launch()
import distributed_nom_l2_switch_controller
for id in range(0, distributed):
# TODO: no sure if I should be registering these with core
# (name conflict, and not suitable for emulation with true distrbuted controller)
# for now this is just to keep the controllers from being garbage collected
name = "controller#%d" % id
# each controller sends a get request to the nom_server on inititalization
core.register(name, distributed_nom_l2_switch_controller.nom_l2_switch_controller(name))
else:
# not distributed
import nom_l2_switch_controller
core.registerNew(nom_l2_switch_controller.nom_l2_switch_controller)
log.info("l2_switch_controller launch completed")
| gpl-3.0 |
i/thrift | test/crossrunner/prepare.py | 50 | 1686 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import subprocess
from crossrunner.collect import collect_testlibs
def prepare(config_dict, testdir, server_match, client_match):
libs, libs2 = collect_testlibs(config_dict, server_match, client_match)
libs.extend(libs2)
def prepares():
for lib in libs:
pre = lib.get('prepare')
if pre:
yield pre, lib['workdir']
def files():
for lib in libs:
workdir = os.path.join(testdir, lib['workdir'])
for c in lib['command']:
if not c.startswith('-'):
p = os.path.join(workdir, c)
if not os.path.exists(p):
yield os.path.split(p)
def make(p):
d, f = p
with open(os.devnull, 'w') as devnull:
return subprocess.Popen(['make', f], cwd=d, stderr=devnull)
for pre, d in prepares():
subprocess.Popen(pre, cwd=d).wait()
for p in list(map(make, set(files()))):
p.wait()
return True
| apache-2.0 |
stitchfix/pybossa | test/factories/blogpost_factory.py | 2 | 1283 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from pybossa.model.blogpost import Blogpost
from . import BaseFactory, factory
class BlogpostFactory(BaseFactory):
class Meta:
model = Blogpost
id = factory.Sequence(lambda n: n)
title = u'Blogpost title'
body = u'Blogpost body text'
app = factory.SubFactory('factories.AppFactory')
app_id = factory.LazyAttribute(lambda blogpost: blogpost.app.id)
owner = factory.SelfAttribute('app.owner')
user_id = factory.LazyAttribute(
lambda blogpost: blogpost.owner.id if blogpost.owner else None)
| agpl-3.0 |
linux2400/django-recurrence | tests/test_managers_recurrence.py | 5 | 3947 | from datetime import datetime
from django.utils.timezone import make_aware
from recurrence import choices
from recurrence.models import Date, Recurrence, Rule
import pytest
import pytz
import recurrence
def aware(dt):
return make_aware(dt, pytz.utc)
@pytest.mark.django_db
def test_recurrence_to_recurrence_object():
limits = Recurrence.objects.create()
Rule.objects.create(
recurrence=limits,
mode=choices.INCLUSION,
freq=recurrence.WEEKLY
)
object = limits.to_recurrence_object()
assert [r.to_text() for r in object.rrules] == ['weekly']
assert object.exrules == []
assert object.rdates == []
assert object.exdates == []
@pytest.mark.django_db
def test_recurrence_to_recurrence_object_complex():
limits = Recurrence.objects.create(
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 12, 31, 0, 0, 0),
)
Rule.objects.create(
recurrence=limits,
mode=choices.INCLUSION,
freq=recurrence.WEEKLY,
until=aware(datetime(2014, 12, 31, 0, 0, 0))
)
Rule.objects.create(
recurrence=limits,
mode=choices.EXCLUSION,
freq=recurrence.MONTHLY,
until=aware(datetime(2013, 12, 31, 0, 0, 0))
)
Date.objects.create(
recurrence=limits,
mode=choices.INCLUSION,
dt=aware(datetime(2012, 12, 31, 0, 0, 0))
)
Date.objects.create(
recurrence=limits,
mode=choices.EXCLUSION,
dt=aware(datetime(2011, 12, 31, 0, 0, 0))
)
object = limits.to_recurrence_object()
assert object.dtstart == aware(datetime(2014, 1, 1, 0, 0, 0))
assert object.dtend == aware(datetime(2014, 12, 31, 0, 0, 0))
assert len(object.rrules) == 1
output_rule = object.rrules[0]
assert output_rule.freq == recurrence.WEEKLY
assert output_rule.until == aware(datetime(2014, 12, 31, 0, 0, 0))
assert len(object.exrules) == 1
output_rule = object.exrules[0]
assert output_rule.freq == recurrence.MONTHLY
assert output_rule.until == aware(datetime(2013, 12, 31, 0, 0, 0))
@pytest.mark.django_db
def test_recurrence_to_recurrence_object_non_naive_sd_ed():
limits = Recurrence.objects.create(
dtstart=aware(datetime(2014, 1, 1, 0, 0, 0)),
dtend=aware(datetime(2014, 12, 31, 0, 0, 0)),
)
object = limits.to_recurrence_object()
assert object.dtstart == aware(datetime(2014, 1, 1, 0, 0, 0))
assert object.dtend == aware(datetime(2014, 12, 31, 0, 0, 0))
@pytest.mark.django_db
def test_create_from_recurrence_object():
inrule = recurrence.Rule(
recurrence.WEEKLY
)
exrule = recurrence.Rule(
recurrence.MONTHLY
)
limits = recurrence.Recurrence(
dtstart=datetime(2014, 1, 1, 0, 0, 0),
dtend=datetime(2014, 2, 3, 0, 0, 0),
rrules=[inrule],
exrules=[exrule],
rdates=[datetime(2014, 2, 15, 0, 0, 0)],
exdates=[aware(datetime(2014, 11, 29, 0, 0, 0))]
)
object = Recurrence.objects.create_from_recurrence_object(limits)
assert object.dtstart == aware(datetime(2014, 1, 1, 0, 0, 0))
assert object.dtend == aware(datetime(2014, 2, 3, 0, 0, 0))
rules = object.rules.all()
assert len(rules) == 2
in_rules = [r for r in rules if r.mode == choices.INCLUSION]
out_rules = [r for r in rules if r.mode == choices.EXCLUSION]
assert len(in_rules) == 1
assert len(out_rules) == 1
assert in_rules[0].freq == recurrence.WEEKLY
assert out_rules[0].freq == recurrence.MONTHLY
dates = object.dates.all()
assert len(dates) == 2
in_dates = [d for d in dates if d.mode == choices.INCLUSION]
out_dates = [d for d in dates if d.mode == choices.EXCLUSION]
assert len(in_dates) == 1
assert len(out_dates) == 1
assert in_dates[0].dt == aware(datetime(2014, 2, 15, 0, 0, 0))
assert out_dates[0].dt == aware(datetime(2014, 11, 29, 0, 0, 0))
| bsd-3-clause |
argriffing/numpy | numpy/core/numerictypes.py | 56 | 28786 | """
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
import types as _types
import sys
import numbers
from numpy.compat import bytes, long
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name == 'void':
char = 'V'
base = 'void'
elif name == 'object_':
char = 'O'
base = 'object'
bits = 0
elif name == 'datetime64':
char = 'M'
elif name == 'timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name == 'bytes_':
char = 'S'
base = 'bytes'
elif name == 'str_':
char = 'U'
base = 'str'
else:
if name == 'string_':
char = 'S'
base = 'string'
elif name == 'unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui':
continue
if base != '':
myname = "%s%d" % (base, bit)
if ((name != 'longdouble' and name != 'clongdouble') or
myname not in allTypes.keys()):
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers are handled so that the int32 and int64 types should agree
# exactly with NPY_INT32, NPY_INT64. We need to enforce the same checking
# as is done in arrayobject.h where the order of getting a bit-width match
# is long, longlong, int, short, char.
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p', 'P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, bytes, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int: 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,), key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a, b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
_register_types()
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc, maxa)
else:
return maxa
| bsd-3-clause |
zhm/s3_cmd_bin | resources/S3/BidirMap.py | 15 | 1069 | ## Amazon S3 manager
## Author: Michal Ludvig <michal@logix.cz>
## http://www.logix.cz/michal
## License: GPL Version 2
class BidirMap(object):
def __init__(self, **map):
self.k2v = {}
self.v2k = {}
for key in map:
self.__setitem__(key, map[key])
def __setitem__(self, key, value):
if self.v2k.has_key(value):
if self.v2k[value] != key:
raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
try:
del(self.v2k[self.k2v[key]])
except KeyError:
pass
self.k2v[key] = value
self.v2k[value] = key
def __getitem__(self, key):
return self.k2v[key]
def __str__(self):
return self.v2k.__str__()
def getkey(self, value):
return self.v2k[value]
def getvalue(self, key):
return self.k2v[key]
def keys(self):
return [key for key in self.k2v]
def values(self):
return [value for value in self.v2k]
# vim:et:ts=4:sts=4:ai
| mit |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/ec2/autoscale/__init__.py | 114 | 1098 | # Copyright (c) 2011 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| bsd-3-clause |
yongshengwang/builthue | desktop/core/ext-py/pysqlite/pysqlite2/dump.py | 247 | 2350 | # Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <kippesp@gmail.com>
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type == 'table'
"""
schema_res = cu.execute(q)
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM sqlite_sequence;')
elif table_name == 'sqlite_stat1':
yield('ANALYZE sqlite_master;')
elif table_name.startswith('sqlite_'):
continue
# NOTE: Virtual table support not implemented
#elif sql.startswith('CREATE VIRTUAL TABLE'):
# qtable = table_name.replace("'", "''")
# yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\
# "VALUES('table','%s','%s',0,'%s');" %
# qtable,
# qtable,
# sql.replace("''"))
else:
yield('%s;' % sql)
# Build the insert statement for each row of the current table
res = cu.execute("PRAGMA table_info('%s')" % table_name)
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES("
q += ",".join(["'||quote(" + col + ")||'" for col in column_names])
q += ")' FROM '%(tbl_name)s'"
query_res = cu.execute(q % {'tbl_name': table_name})
for row in query_res:
yield("%s;" % row[0])
# Now when the type is 'index', 'trigger', or 'view'
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type IN ('index', 'trigger', 'view')
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield('%s;' % sql)
yield('COMMIT;')
| apache-2.0 |
Manojkumar91/odoo_inresto | addons/hr/hr.py | 4 | 19828 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.modules.module import get_module_resource
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class hr_employee_category(osv.Model):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "hr.employee.category"
_description = "Employee Category"
_columns = {
'name': fields.char("Employee Tag", required=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('hr.employee.category', 'Parent Employee Tag', select=True),
'child_ids': fields.one2many('hr.employee.category', 'parent_id', 'Child Categories'),
'employee_ids': fields.many2many('hr.employee', 'employee_category_rel', 'category_id', 'emp_id', 'Employees'),
}
_constraints = [
(osv.osv._check_recursion, _('Error! You cannot create recursive category.'), ['parent_id'])
]
class hr_job(osv.Model):
def _get_nbr_employees(self, cr, uid, ids, name, args, context=None):
res = {}
for job in self.browse(cr, uid, ids, context=context):
nb_employees = len(job.employee_ids or [])
res[job.id] = {
'no_of_employee': nb_employees,
'expected_employees': nb_employees + job.no_of_recruitment,
}
return res
def _get_job_position(self, cr, uid, ids, context=None):
res = []
for employee in self.pool.get('hr.employee').browse(cr, uid, ids, context=context):
if employee.job_id:
res.append(employee.job_id.id)
return res
_name = "hr.job"
_description = "Job Position"
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Job Name', required=True, select=True, translate=True),
'expected_employees': fields.function(_get_nbr_employees, string='Total Forecasted Employees',
help='Expected number of employees for this job position after new recruitment.',
store = {
'hr.job': (lambda self,cr,uid,ids,c=None: ids, ['no_of_recruitment'], 10),
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_employee': fields.function(_get_nbr_employees, string="Current Number of Employees",
help='Number of employees currently occupying this job position.',
store = {
'hr.employee': (_get_job_position, ['job_id'], 10),
}, type='integer',
multi='_get_nbr_employees'),
'no_of_recruitment': fields.integer('Expected New Employees', copy=False,
help='Number of new employees you expect to recruit.'),
'no_of_hired_employee': fields.integer('Hired Employees', copy=False,
help='Number of hired employees for this job position during recruitment phase.'),
'employee_ids': fields.one2many('hr.employee', 'job_id', 'Employees', groups='base.group_user'),
'description': fields.text('Job Description'),
'requirements': fields.text('Requirements'),
'department_id': fields.many2one('hr.department', 'Department'),
'company_id': fields.many2one('res.company', 'Company'),
'state': fields.selection([('recruit', 'Recruitment in Progress'), ('open', 'Recruitment Closed')],
string='Status', readonly=True, required=True,
track_visibility='always', copy=False,
help="Set whether the recruitment process is open or closed for this job position."),
'write_date': fields.datetime('Update Date', readonly=True),
}
_defaults = {
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.job', context=ctx),
'state': 'recruit',
'no_of_recruitment' : 1,
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, department_id)', 'The name of the job position must be unique per department in company!'),
]
def set_recruit(self, cr, uid, ids, context=None):
for job in self.browse(cr, uid, ids, context=context):
no_of_recruitment = job.no_of_recruitment == 0 and 1 or job.no_of_recruitment
self.write(cr, uid, [job.id], {'state': 'recruit', 'no_of_recruitment': no_of_recruitment}, context=context)
return True
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'open',
'no_of_recruitment': 0,
'no_of_hired_employee': 0
}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if 'name' not in default:
job = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (job.name)
return super(hr_job, self).copy(cr, uid, id, default=default, context=context)
# ----------------------------------------
# Compatibility methods
# ----------------------------------------
_no_of_employee = _get_nbr_employees # v7 compatibility
job_open = set_open # v7 compatibility
job_recruitment = set_recruit # v7 compatibility
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_order = 'name_related'
_inherits = {'resource.resource': "resource_id"}
_inherit = ['mail.thread']
_mail_post_access = 'read'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
#we need a related field in order to be able to sort the employee by name
'name_related': fields.related('resource_id', 'name', type='char', string='Name', readonly=True, store=True),
'country_id': fields.many2one('res.country', 'Nationality (Country)'),
'birthday': fields.date("Date of Birth"),
'ssnid': fields.char('SSN No', help='Social Security Number'),
'sinid': fields.char('SIN No', help="Social Insurance Number"),
'identification_id': fields.char('Identification No'),
'otherid': fields.char('Other Id'),
'gender': fields.selection([('male', 'Male'), ('female', 'Female'), ('other', 'Other')], 'Gender'),
'marital': fields.selection([('single', 'Single'), ('married', 'Married'), ('widower', 'Widower'), ('divorced', 'Divorced')], 'Marital Status'),
'department_id': fields.many2one('hr.department', 'Department'),
'address_id': fields.many2one('res.partner', 'Working Address'),
'address_home_id': fields.many2one('res.partner', 'Home Address'),
'bank_account_id': fields.many2one('res.partner.bank', 'Bank Account Number', domain="[('partner_id','=',address_home_id)]", help="Employee bank salary account"),
'work_phone': fields.char('Work Phone', readonly=False),
'mobile_phone': fields.char('Work Mobile', readonly=False),
'work_email': fields.char('Work Email', size=240),
'work_location': fields.char('Work Location'),
'notes': fields.text('Notes'),
'parent_id': fields.many2one('hr.employee', 'Manager'),
'category_ids': fields.many2many('hr.employee.category', 'employee_category_rel', 'emp_id', 'category_id', 'Tags'),
'child_ids': fields.one2many('hr.employee', 'parent_id', 'Subordinates'),
'resource_id': fields.many2one('resource.resource', 'Resource', ondelete='cascade', required=True, auto_join=True),
'coach_id': fields.many2one('hr.employee', 'Coach'),
'job_id': fields.many2one('hr.job', 'Job Title'),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the employee, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the employee. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store = {
'hr.employee': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the employee. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'passport_id': fields.char('Passport No'),
'color': fields.integer('Color Index'),
'city': fields.related('address_id', 'city', type='char', string='City'),
'login': fields.related('user_id', 'login', type='char', string='Login', readonly=1),
'last_login': fields.related('user_id', 'date', type='datetime', string='Latest Connection', readonly=1),
}
def _get_default_image(self, cr, uid, context=None):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
defaults = {
'active': 1,
'image': _get_default_image,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
resource_ids = []
for employee in self.browse(cr, uid, ids, context=context):
resource_ids.append(employee.resource_id.id)
super(hr_employee, self).unlink(cr, uid, ids, context=context)
return self.pool.get('resource.resource').unlink(cr, uid, resource_ids, context=context)
def onchange_address_id(self, cr, uid, ids, address, context=None):
if address:
address = self.pool.get('res.partner').browse(cr, uid, address, context=context)
return {'value': {'work_phone': address.phone, 'mobile_phone': address.mobile}}
return {'value': {}}
def onchange_company(self, cr, uid, ids, company, context=None):
address_id = False
if company:
company_id = self.pool.get('res.company').browse(cr, uid, company, context=context)
address = self.pool.get('res.partner').address_get(cr, uid, [company_id.partner_id.id], ['default'])
address_id = address and address['default'] or False
return {'value': {'address_id': address_id}}
def onchange_department_id(self, cr, uid, ids, department_id, context=None):
value = {'parent_id': False}
if department_id:
department = self.pool.get('hr.department').browse(cr, uid, department_id)
value['parent_id'] = department.manager_id.id
return {'value': value}
def onchange_user(self, cr, uid, ids, user_id, context=None):
work_email = False
if user_id:
work_email = self.pool.get('res.users').browse(cr, uid, user_id, context=context).email
return {'value': {'work_email': work_email}}
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of employees if display_employees_suggestions if the
user perference allows it. """
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_employees_suggestions:
return []
else:
return super(hr_employee, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Overwrite of the original method to always follow user_id field,
even when not track_visibility so that a user will follow it's employee
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
_constraints = [(osv.osv._check_recursion, _('Error! You cannot create recursive hierarchy of Employee(s).'), ['parent_id']),]
class hr_department(osv.osv):
_name = "hr.department"
_description = "HR Department"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _dept_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'name': fields.char('Department Name', required=True),
'complete_name': fields.function(_dept_name_get_fnc, type="char", string='Name'),
'company_id': fields.many2one('res.company', 'Company', select=True, required=False),
'parent_id': fields.many2one('hr.department', 'Parent Department', select=True),
'child_ids': fields.one2many('hr.department', 'parent_id', 'Child Departments'),
'manager_id': fields.many2one('hr.employee', 'Manager', track_visibility='onchange'),
'member_ids': fields.one2many('hr.employee', 'department_id', 'Members', readonly=True),
'jobs_ids': fields.one2many('hr.job', 'department_id', 'Jobs'),
'note': fields.text('Note'),
'color': fields.integer('Color Index'),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr.department', context=c),
}
_constraints = [
(osv.osv._check_recursion, _('Error! You cannot create recursive departments.'), ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
if context is None:
context = {}
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def create(self, cr, uid, vals, context=None):
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
manager_id = vals.get("manager_id")
new_id = super(hr_department, self).create(cr, uid, vals, context=context)
if manager_id:
employee = self.pool.get('hr.employee').browse(cr, uid, manager_id, context=context)
if employee.user_id:
self.message_subscribe_users(cr, uid, [new_id], user_ids=[employee.user_id.id], context=context)
return new_id
def write(self, cr, uid, ids, vals, context=None):
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
if isinstance(ids, (int, long)):
ids = [ids]
employee_ids = []
if 'manager_id' in vals:
manager_id = vals.get("manager_id")
if manager_id:
employee = self.pool['hr.employee'].browse(cr, uid, manager_id, context=context)
if employee.user_id:
self.message_subscribe_users(cr, uid, ids, user_ids=[employee.user_id.id], context=context)
for department in self.browse(cr, uid, ids, context=context):
employee_ids += self.pool['hr.employee'].search(
cr, uid, [
('id', '!=', manager_id),
('department_id', '=', department.id),
('parent_id', '=', department.manager_id.id)
], context=context)
self.pool['hr.employee'].write(cr, uid, employee_ids, {'parent_id': manager_id}, context=context)
return super(hr_department, self).write(cr, uid, ids, vals, context=context)
class res_users(osv.osv):
_name = 'res.users'
_inherit = 'res.users'
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = super(res_users, self).write(cr, uid, ids, vals, context=context)
employee_obj = self.pool.get('hr.employee')
if vals.get('name'):
for user_id in ids:
if user_id == SUPERUSER_ID:
employee_ids = employee_obj.search(cr, uid, [('user_id', '=', user_id)])
employee_obj.write(cr, uid, employee_ids, {'name': vals['name']}, context=context)
return result
| agpl-3.0 |
redhat-openstack/python-openstackclient | openstackclient/network/client.py | 1 | 1944 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from openstack import connection
from openstack import profile
from openstackclient.common import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
DEFAULT_API_VERSION = '2.0'
API_VERSION_OPTION = 'os_network_api_version'
API_NAME = "network"
API_VERSIONS = {
"2.0": "openstack.connection.Connection",
"2": "openstack.connection.Connection",
}
def make_client(instance):
"""Returns a network proxy"""
prof = profile.Profile()
prof.set_region(API_NAME, instance._region_name)
prof.set_version(API_NAME, instance._api_version[API_NAME])
conn = connection.Connection(authenticator=instance.session.auth,
verify=instance.session.verify,
cert=instance.session.cert,
profile=prof)
LOG.debug('Connection: %s', conn)
LOG.debug('Network client initialized using OpenStack SDK: %s',
conn.network)
return conn.network
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-network-api-version',
metavar='<network-api-version>',
default=utils.env('OS_NETWORK_API_VERSION'),
help=_("Network API version, default=%s "
"(Env: OS_NETWORK_API_VERSION)") % DEFAULT_API_VERSION
)
return parser
| apache-2.0 |
adamfisk/littleshoot-client | server/appengine/littleshoot/mediaTypeTranslator.py | 1 | 4161 |
import logging
import os
class MediaTypeTranslator():
def __init__(self):
self.types = {}
self.addTypes(self.DOCUMENTS, 'document')
self.addTypes(self.OSX_APPLICATIONS, 'application/mac')
self.addTypes(self.LINUX_APPLICATIONS, 'application/linux')
self.addTypes(self.WINDOWS_APPLICATIONS, 'application/win')
self.addTypes(self.AUDIO, 'audio')
self.addTypes(self.VIDEO, 'video')
self.addTypes(self.IMAGE, 'image')
self.addTypes(self.GENERAL_APPLICATION, 'application')
self.addTypes(self.ARCHIVE, 'archive')
def addTypes(self, extensions, category):
#logging.info('Adding types....')
for ext in extensions:
if self.types.has_key(ext):
logging.error('Duplicate type: %s', ext)
else:
self.types[ext] = category
def getType(self, fileName):
#logging.info('Getting ext for file name: %s', fileName)
(shortname, ext) = os.path.splitext(fileName)
# The splitext function leaves the '.'
ext = ext.strip('.').lower()
#logging.info('Found extension: %s', ext)
if ext is None or len(ext) > 7:
logging.debug('No extension: %s', ext)
return 'unknown'
elif not self.types.has_key(ext):
logging.warn('Unknown extension: %s', ext)
return 'unknown'
else:
return self.types.get(ext)
DOCUMENTS = [
'html', 'htm', 'xhtml', 'mht', 'mhtml', 'xml',
'txt', 'ans', 'asc', 'diz', 'eml',
'pdf', 'ps', 'epsf', 'dvi',
'rtf', 'wri', 'doc', 'mcw', 'wps',
'xls', 'wk1', 'dif', 'csv', 'ppt', 'tsv',
'hlp', 'chm', 'lit',
'tex', 'texi', 'latex', 'info', 'man',
'wp', 'wpd', 'wp5', 'wk3', 'wk4', 'shw',
'sdd', 'sdw', 'sdp', 'sdc',
'sxd', 'sxw', 'sxp', 'sxc',
'abw', 'kwd', 'js', 'java', 'cpp', 'c', 'py', 'php', 'ruby',
'pps', # PowerPoint show
'dll',
'jhtml', # Java in html
'mmap', # mind mapping document
'dat', # data file
'bash',
]
OSX_APPLICATIONS = [
'dmg', 'pkg'
]
LINUX_APPLICATIONS = [
'mdb', 'sh', 'csh', 'awk', 'pl',
'rpm', 'deb', 'z', 'zoo', 'tar',
'taz', 'shar', 'hqx', '7z',
]
WINDOWS_APPLICATIONS = [
'exe', 'cab', 'msi', 'msp',
'arj', 'ace',
'nsi', # Nullsoft installer.
]
AUDIO = [
'mp3', 'mpa', 'mp1', 'mpga', 'mp2',
'ra', 'rm', 'ram', 'rmj',
'wma', 'wav', 'm4a', 'm4p',
'lqt', 'ogg', 'med',
'aif', 'aiff', 'aifc',
'au', 'snd', 's3m', 'aud',
'mid', 'midi', 'rmi', 'mod', 'kar',
'ac3', 'shn', 'fla', 'flac', 'cda',
'mka',
]
VIDEO = [
'mpg', 'mpeg', 'mpe', 'mng', 'mpv', 'm1v',
'vob', 'mpv2', 'mp2v', 'm2p', 'm2v', 'm4v', 'mpgv',
'vcd', 'mp4', 'dv', 'dvd', 'div', 'divx', 'dvx',
'smi', 'smil', 'rv', 'rmm', 'rmvb',
'avi', 'asf', 'asx', 'wmv', 'qt', 'mov',
'fli', 'flc', 'flx', 'flv',
'wml', 'vrml', 'swf', 'dcr', 'jve', 'nsv',
'mkv', 'ogm',
'cdg', 'srt', 'sub', 'idx', 'msmedia',
'wvx', # This is a redirect to a wmv
]
IMAGE = [
'gif', 'png',
'jpg', 'jpeg', 'jpe', 'jif', 'jiff', 'jfif',
'tif', 'tiff', 'iff', 'lbm', 'ilbm', 'eps',
'mac', 'drw', 'pct', 'img',
'bmp', 'dib', 'rle', 'ico', 'ani', 'icl', 'cur',
'emf', 'wmf', 'pcx',
'pcd', 'tga', 'pic', 'fig',
'psd', 'wpg', 'dcx', 'cpt', 'mic',
'pbm', 'pnm', 'ppm', 'xbm', 'xpm', 'xwd',
'sgi', 'fax', 'rgb', 'ras'
]
GENERAL_APPLICATION = [
'jar', 'jnlp', 'iso', 'bin',
'nrg', # Nero CD image file.
'cue', # Another CD image file type.
]
ARCHIVE = [
'zip', 'sitx', 'sit', 'tgz', 'gz', 'gzip', 'bz2','rar', 'lzh','lha'
]
| gpl-2.0 |
ArcherSys/ArcherSys | entertainment/gheddobox/Lib/sre_constants.py | 185 | 7197 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
try:
from _sre import MAXREPEAT
except ImportError:
import _sre
MAXREPEAT = _sre.MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print "done"
| mit |
ElGatoSaez/pyCoBot | modules/calc/calc.py | 2 | 3450 | # -*- coding: utf-8 -*-
import math
import re
import textwrap
import multiprocessing
import sys
try:
from mpmath import mp
except:
pass
class calc:
def __init__(self, core, client):
core.addCommandHandler("calc", self, chelp="calc.help")
self.res = None
self.q = multiprocessing.Queue()
self.vrs = vars(math)
try:
mp
self.vrs = vars(mp)
mp.dps = 128
core.addCommandHandler("calcdps", self, cpriv=2, chelp=
"Ajusta la cantidad de decimales que mostrara calc. Sintaxis:"
" calcdps <numero>")
except:
pass
self.vrs['cosd'] = cosd
self.vrs['tand'] = tand
self.vrs['sind'] = sind
def calcdps(self, bot, cli, ev):
from mpmath import mp
mp.dps = int(ev.splitd[0])
cli.msg(ev.source, bot._(ev, self, "calcdps").format(ev.splitd[0]))
def calc(self, bot, cli, event):
#res = self.calculate(" ".join(event.splitd))
res = self.try_slow_thing(self.calculate, bot, event,
" ".join(event.splitd), self.q, bot, event)
if res is None:
cli.msg(event.target, bot._(event, self, "calcerr"))
else:
restr = res
restr = self.adjust_decimals(restr)
restr = self.adjust_decimals(restr)
cli.msg(event.target,
textwrap.wrap(restr, 800)[0])
def adjust_decimals(self, s):
if "." not in s:
return s
i = 0
while i != len(s):
ik = i + 1
if s[len(s) - ik:len(s) - i] == "0":
s = s[0:len(s) - ik]
elif s[len(s) - ik:len(s) - i] == ".":
s = s[0:len(s) - ik]
return s
else:
return s
i += 1
return s
integers_regex = re.compile(r'\b[\d\.]+\b')
def calculate(self, expr, q, bot, ev):
def safe_eval(expr, symbols={}):
if expr.find("_") != -1 or expr.find("=") != -1 or expr.find("'") != -1 or expr.find("\"") != -1:
return None
if expr.find("sys.") != -1 or expr.find("lambda") != -1 or expr.find("os.") != -1 or expr.find("import") != -1:
return "u h4x0r"
try:
return eval(expr, dict(__builtins__=None), symbols) # :(
except:
e = sys.exc_info()[0]
return bot._(ev, self, "syntaxerror").format(str(e))
expr = expr.replace('^', '**')
resp = safe_eval(expr, self.vrs)
try:
resp = mp.nstr(resp, mp.dps, min_fixed=-mp.inf)
except:
pass
q.put(str(resp))
def try_slow_thing(self, function, bot, ev, *args):
p = multiprocessing.Process(target=function, args=args)
p.start()
p.join(5)
if p.is_alive():
p.terminate()
return bot._(ev, self, "toolong")
else:
return self.q.get(True)
def cosd(x):
try:
return mp.cos(x * mp.pi / 180)
except:
return math.cos(x * math.pi / 180)
def tand(x):
try:
return mp.tan(x * mp.pi / 180)
except:
return math.tan(x * math.pi / 180)
def sind(x):
try:
return mp.sin(x * mp.pi / 180)
except:
return math.sin(x * math.pi / 180)
| mit |
LordSputnik/peevee | peevee/__init__.py | 1 | 1187 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ben Ockmore
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def validate(input_path):
pass
def transform(input_path):
pass
| mit |
gudcjfdldu/volatility | volatility/plugins/gui/userhandles.py | 58 | 3646 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.gui.sessions as sessions
import volatility.debug as debug
class UserHandles(sessions.Sessions):
"""Dump the USER handle tables"""
def __init__(self, config, *args, **kwargs):
sessions.Sessions.__init__(self, config, *args, **kwargs)
config.add_option('PID', short_option = 'p',
help = 'Pid filter', action = 'store',
type = 'int')
config.add_option('TYPE', short_option = 't',
help = 'Handle type', action = 'store',
type = 'string')
config.add_option('FREE', short_option = 'F',
help = 'Include free handles', action = 'store_true',
default = False)
def render_text(self, outfd, data):
for session in data:
shared_info = session.find_shared_info()
if not shared_info:
debug.debug("Cannot find win32k!gSharedInfo")
continue
outfd.write("*" * 50 + "\n")
outfd.write("SharedInfo: {0:#x}, SessionId: {1} Shared delta: {2}\n".format(
shared_info.obj_offset, session.SessionId,
shared_info.ulSharedDelta,
))
outfd.write("aheList: {0:#x}, Table size: {1:#x}, Entry size: {2:#x}\n".format(
shared_info.aheList.v(),
shared_info.psi.cbHandleTable,
shared_info.HeEntrySize if hasattr(shared_info, 'HeEntrySize') else shared_info.obj_vm.profile.get_obj_size("_HANDLEENTRY"),
))
outfd.write("\n")
filters = []
# Should we display freed handles
if not self._config.FREE:
filters.append(lambda x : not x.Free)
# Should we filter by process ID
if self._config.PID:
filters.append(lambda x : x.Process.UniqueProcessId == self._config.PID)
# Should we filter by object type
if self._config.TYPE:
filters.append(lambda x : str(x.bType) == self._config.TYPE)
self.table_header(outfd,
[("Object(V)", "[addrpad]"),
("Handle", "[addr]"),
("bType", "20"),
("Flags", "^8"),
("Thread", "^8"),
("Process", ""),
])
for handle in shared_info.handles(filters):
self.table_row(outfd,
handle.phead.v(),
handle.phead.h if handle.phead else 0,
handle.bType,
handle.bFlags,
handle.Thread.Cid.UniqueThread,
handle.Process.UniqueProcessId)
| gpl-2.0 |
killkill/fabric-bolt | src/fabric_bolt/projects/urls.py | 17 | 2236 | from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^$', views.ProjectList.as_view(), name='projects_project_list'),
url(r'^create/$', views.ProjectCreate.as_view(), name='projects_project_create'),
url(r'^view/(?P<pk>\w+)/$', views.ProjectDetail.as_view(), name='projects_project_view'),
url(r'^update/(?P<pk>\w+)/$', views.ProjectUpdate.as_view(), name='projects_project_update'),
url(r'^delete/(?P<pk>\w+)/$', views.ProjectDelete.as_view(), name='projects_project_delete'),
url(r'^(?P<project_id>\w+)/configuration/create/$', views.ProjectConfigurationCreate.as_view(), name='projects_configuration_create'),
url(r'^(?P<project_id>\w+)/configuration/stage/(?P<stage_id>\d+)/create/$', views.ProjectConfigurationCreate.as_view(), name='projects_configuration_stage_create'),
url(r'^configuration/update/(?P<pk>\w+)/$', views.ProjectConfigurationUpdate.as_view(), name='projects_configuration_update'),
url(r'^configuration/delete/(?P<pk>\w+)/$', views.ProjectConfigurationDelete.as_view(), name='projects_configuration_delete'),
url(r'^stage/(?P<pk>\d+)/deployment/(?P<task_name>\w+)/$', views.DeploymentCreate.as_view(), name='projects_deployment_create'),
url(r'^deployment/view/(?P<pk>\d+)', views.DeploymentDetail.as_view(), name='projects_deployment_detail'),
url(r'^deployment/output/(?P<pk>\d+)', views.DeploymentOutputStream.as_view(), name='projects_deployment_output'),
url(r'^(?P<project_id>\w+)/stage/create/$', views.ProjectStageCreate.as_view(), name='projects_stage_create'),
url(r'^(?P<project_id>\w+)/stage/update/(?P<pk>\w+)/$', views.ProjectStageUpdate.as_view(), name='projects_stage_update'),
url(r'^(?P<project_id>\w+)/stage/view/(?P<pk>\w+)/$', views.ProjectStageView.as_view(), name='projects_stage_view'),
url(r'^(?P<project_id>\w+)/stage/delete/(?P<pk>\w+)/$', views.ProjectStageDelete.as_view(), name='projects_stage_delete'),
url(r'^(?P<project_id>\w+)/stage/(?P<pk>\w+)/host/(?P<host_id>\w+)/$', views.ProjectStageMapHost.as_view(), name='projects_stage_maphost'),
url(r'^stage/(?P<pk>\w+)/host/(?P<host_id>\w+)/$', views.ProjectStageUnmapHost.as_view(), name='projects_stage_unmaphost'),
) | mit |
acshan/odoo | addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
py-geek/City-Air | venv/lib/python2.7/site-packages/django/templatetags/cache.py | 118 | 2303 | from __future__ import unicode_literals
from django.core.cache.utils import make_template_fragment_key
from django.template import Library, Node, TemplateSyntaxError, VariableDoesNotExist
from django.core.cache import cache
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
vary_on = [var.resolve(context) for var in self.vary_on]
cache_key = make_template_fragment_key(self.fragment_name, vary_on)
value = cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
cache.set(cache_key, value, expire_time)
return value
@register.tag('cache')
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(token) for token in tokens[3:]])
| mit |
rgs1/zktraffic | zktraffic/fle/message.py | 4 | 4791 | # ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from datetime import datetime
from zktraffic.base.network import BadPacket
from zktraffic.base.util import read_long, read_number, read_string
class PeerState(object):
LOOKING = 0
FOLLOWING = 1
LEADING = 2
OBSERVING = 3
STATES = [LOOKING, FOLLOWING, LEADING, OBSERVING]
NAMES = [
"looking",
"following",
"leading",
"observing",
]
@classmethod
def invalid(cls, state):
return state not in cls.STATES
@classmethod
def to_str(cls, state):
return "" if cls.invalid(state) else cls.NAMES[state]
class Message(object):
PROTO_VER = -65536
OLD_LEN = 28
WITH_VERSION_LEN = 36
__slots__ = ()
@classmethod
def from_payload(cls, data, src, dst, timestamp):
if len(data) < 16:
raise BadPacket("Too small")
proto, offset = read_long(data, 0)
if proto == cls.PROTO_VER:
server_id, offset = read_long(data, offset)
election_addr, _ = read_string(data, offset)
return Initial(timestamp, src, dst, server_id, election_addr)
if len(data) >= cls.OLD_LEN:
state, offset = read_number(data, 0)
if PeerState.invalid(state):
raise BadPacket("Invalid state: %d" % state)
leader, offset = read_long(data, offset)
zxid, offset = read_long(data, offset)
election_epoch, offset = read_long(data, offset)
peer_epoch, offset = read_long(data, offset) if len(data) > cls.OLD_LEN else (-1, offset)
version = 0
config = ""
if len(data) > cls.WITH_VERSION_LEN:
version, offset = read_number(data, offset)
if version == 2:
config, _ = read_string(data, offset)
return Notification(
timestamp,
src,
dst,
state,
leader,
zxid,
election_epoch,
peer_epoch,
version,
config
)
raise BadPacket("Unknown unknown")
@property
def timestr(self):
return datetime.fromtimestamp(self.timestamp).strftime("%H:%M:%S:%f")
class Initial(Message):
__slots__ = ("timestamp", "src", "dst", "server_id", "election_addr")
def __init__(self, timestamp, src, dst, server_id, election_addr):
self.timestamp = timestamp
self.src = src
self.dst = dst
self.server_id = server_id
self.election_addr = election_addr
def __str__(self):
return "%s(\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s\n)\n" % (
"Initial",
" " * 5 + "timestamp", self.timestr,
" " * 5 + "src", self.src,
" " * 5 + "dst", self.dst,
" " * 5 + "server_id", self.server_id,
" " * 5 + "election_addr", self.election_addr
)
class Notification(Message):
__slots__ = (
"timestamp",
"src",
"dst",
"state",
"leader",
"zxid",
"election_epoch",
"peer_epoch",
"version",
"config"
)
def __init__(self, timestamp, src, dst, state, leader, zxid, election_epoch, peer_epoch, version, config):
self.timestamp = timestamp
self.src = src
self.dst = dst
self.state = state
self.leader = leader
self.zxid = zxid
self.election_epoch = election_epoch
self.peer_epoch = peer_epoch
self.version = version
self.config = config
@property
def state_literal(self):
return PeerState.to_str(self.state)
def __str__(self):
config = [" " * 10 + cline for cline in self.config.split("\n")]
return "%s(\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=%s,\n%s=\n%s\n)\n" % (
"Notification",
" " * 5 + "timestamp", self.timestr,
" " * 5 + "src", self.src,
" " * 5 + "dst", self.dst,
" " * 5 + "state", self.state_literal,
" " * 5 + "leader", self.leader,
" " * 5 + "zxid", self.zxid,
" " * 5 + "election_epoch", self.election_epoch,
" " * 5 + "peer_epoch", self.peer_epoch,
" " * 5 + "version", self.version,
" " * 5 + "config", "\n".join(config),
)
| apache-2.0 |
gmargari/apache-cassandra-1.1.0-src | pylib/cqlshlib/wcwidth.py | 113 | 16049 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# adapted from http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# -thepaul
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# For some graphical characters, the Unicode standard explicitly
# defines a character-cell width via the definition of the East Asian
# FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
# In all these cases, there is no ambiguity about which width a
# terminal shall use. For characters in the East Asian Ambiguous (A)
# class, the width choice depends purely on a preference of backward
# compatibility with either historic CJK or Western practice.
# Choosing single-width for these characters is easy to justify as
# the appropriate long-term solution, as the CJK practice of
# displaying these characters as double-width comes from historic
# implementation simplicity (8-bit encoded characters were displayed
# single-width and 16-bit ones double-width, even for Greek,
# Cyrillic, etc.) and not any typographic considerations.
#
# Much less clear is the choice of width for the Not East Asian
# (Neutral) class. Existing practice does not dictate a width for any
# of these characters. It would nevertheless make sense
# typographically to allocate two character cells to characters such
# as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
# represented adequately with a single-width glyph. The following
# routines at present merely assign a single-cell width to all
# neutral characters, in the interest of simplicity. This is not
# entirely satisfactory and should be reconsidered before
# establishing a formal standard in this area. At the moment, the
# decision which Not East Asian (Neutral) characters should be
# represented by double-width glyphs cannot yet be answered by
# applying a simple rule from the Unicode database content. Setting
# up a proper standard for the behavior of UTF-8 character terminals
# will require a careful analysis not only of each Unicode character,
# but also of each presentation form, something the author of these
# routines has avoided to do so far.
#
# http://www.unicode.org/unicode/reports/tr11/
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest C version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# auxiliary function for binary search in interval table
def bisearch(ucs, table):
min = 0
max = len(table) - 1
if ucs < table[0][0] or ucs > table[max][1]:
return 0
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return 1
return 0
# The following two functions define the column width of an ISO 10646
# character as follows:
#
# - The null character (U+0000) has a column width of 0.
#
# - Other C0/C1 control characters and DEL will lead to a return
# value of -1.
#
# - Non-spacing and enclosing combining characters (general
# category code Mn or Me in the Unicode database) have a
# column width of 0.
#
# - SOFT HYPHEN (U+00AD) has a column width of 1.
#
# - Other format characters (general category code Cf in the Unicode
# database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
#
# - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
# have a column width of 0.
#
# - Spacing characters in the East Asian Wide (W) or East Asian
# Full-width (F) category as defined in Unicode Technical
# Report #11 have a column width of 2.
#
# - All remaining characters (including all printable
# ISO 8859-1 and WGL4 characters, Unicode control characters,
# etc.) have a column width of 1.
#
# This implementation assumes that wchar_t characters are encoded
# in ISO 10646.
# sorted list of non-overlapping intervals of non-spacing characters
# generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
combining = (
( 0x0300, 0x036F ), ( 0x0483, 0x0486 ), ( 0x0488, 0x0489 ),
( 0x0591, 0x05BD ), ( 0x05BF, 0x05BF ), ( 0x05C1, 0x05C2 ),
( 0x05C4, 0x05C5 ), ( 0x05C7, 0x05C7 ), ( 0x0600, 0x0603 ),
( 0x0610, 0x0615 ), ( 0x064B, 0x065E ), ( 0x0670, 0x0670 ),
( 0x06D6, 0x06E4 ), ( 0x06E7, 0x06E8 ), ( 0x06EA, 0x06ED ),
( 0x070F, 0x070F ), ( 0x0711, 0x0711 ), ( 0x0730, 0x074A ),
( 0x07A6, 0x07B0 ), ( 0x07EB, 0x07F3 ), ( 0x0901, 0x0902 ),
( 0x093C, 0x093C ), ( 0x0941, 0x0948 ), ( 0x094D, 0x094D ),
( 0x0951, 0x0954 ), ( 0x0962, 0x0963 ), ( 0x0981, 0x0981 ),
( 0x09BC, 0x09BC ), ( 0x09C1, 0x09C4 ), ( 0x09CD, 0x09CD ),
( 0x09E2, 0x09E3 ), ( 0x0A01, 0x0A02 ), ( 0x0A3C, 0x0A3C ),
( 0x0A41, 0x0A42 ), ( 0x0A47, 0x0A48 ), ( 0x0A4B, 0x0A4D ),
( 0x0A70, 0x0A71 ), ( 0x0A81, 0x0A82 ), ( 0x0ABC, 0x0ABC ),
( 0x0AC1, 0x0AC5 ), ( 0x0AC7, 0x0AC8 ), ( 0x0ACD, 0x0ACD ),
( 0x0AE2, 0x0AE3 ), ( 0x0B01, 0x0B01 ), ( 0x0B3C, 0x0B3C ),
( 0x0B3F, 0x0B3F ), ( 0x0B41, 0x0B43 ), ( 0x0B4D, 0x0B4D ),
( 0x0B56, 0x0B56 ), ( 0x0B82, 0x0B82 ), ( 0x0BC0, 0x0BC0 ),
( 0x0BCD, 0x0BCD ), ( 0x0C3E, 0x0C40 ), ( 0x0C46, 0x0C48 ),
( 0x0C4A, 0x0C4D ), ( 0x0C55, 0x0C56 ), ( 0x0CBC, 0x0CBC ),
( 0x0CBF, 0x0CBF ), ( 0x0CC6, 0x0CC6 ), ( 0x0CCC, 0x0CCD ),
( 0x0CE2, 0x0CE3 ), ( 0x0D41, 0x0D43 ), ( 0x0D4D, 0x0D4D ),
( 0x0DCA, 0x0DCA ), ( 0x0DD2, 0x0DD4 ), ( 0x0DD6, 0x0DD6 ),
( 0x0E31, 0x0E31 ), ( 0x0E34, 0x0E3A ), ( 0x0E47, 0x0E4E ),
( 0x0EB1, 0x0EB1 ), ( 0x0EB4, 0x0EB9 ), ( 0x0EBB, 0x0EBC ),
( 0x0EC8, 0x0ECD ), ( 0x0F18, 0x0F19 ), ( 0x0F35, 0x0F35 ),
( 0x0F37, 0x0F37 ), ( 0x0F39, 0x0F39 ), ( 0x0F71, 0x0F7E ),
( 0x0F80, 0x0F84 ), ( 0x0F86, 0x0F87 ), ( 0x0F90, 0x0F97 ),
( 0x0F99, 0x0FBC ), ( 0x0FC6, 0x0FC6 ), ( 0x102D, 0x1030 ),
( 0x1032, 0x1032 ), ( 0x1036, 0x1037 ), ( 0x1039, 0x1039 ),
( 0x1058, 0x1059 ), ( 0x1160, 0x11FF ), ( 0x135F, 0x135F ),
( 0x1712, 0x1714 ), ( 0x1732, 0x1734 ), ( 0x1752, 0x1753 ),
( 0x1772, 0x1773 ), ( 0x17B4, 0x17B5 ), ( 0x17B7, 0x17BD ),
( 0x17C6, 0x17C6 ), ( 0x17C9, 0x17D3 ), ( 0x17DD, 0x17DD ),
( 0x180B, 0x180D ), ( 0x18A9, 0x18A9 ), ( 0x1920, 0x1922 ),
( 0x1927, 0x1928 ), ( 0x1932, 0x1932 ), ( 0x1939, 0x193B ),
( 0x1A17, 0x1A18 ), ( 0x1B00, 0x1B03 ), ( 0x1B34, 0x1B34 ),
( 0x1B36, 0x1B3A ), ( 0x1B3C, 0x1B3C ), ( 0x1B42, 0x1B42 ),
( 0x1B6B, 0x1B73 ), ( 0x1DC0, 0x1DCA ), ( 0x1DFE, 0x1DFF ),
( 0x200B, 0x200F ), ( 0x202A, 0x202E ), ( 0x2060, 0x2063 ),
( 0x206A, 0x206F ), ( 0x20D0, 0x20EF ), ( 0x302A, 0x302F ),
( 0x3099, 0x309A ), ( 0xA806, 0xA806 ), ( 0xA80B, 0xA80B ),
( 0xA825, 0xA826 ), ( 0xFB1E, 0xFB1E ), ( 0xFE00, 0xFE0F ),
( 0xFE20, 0xFE23 ), ( 0xFEFF, 0xFEFF ), ( 0xFFF9, 0xFFFB ),
( 0x10A01, 0x10A03 ), ( 0x10A05, 0x10A06 ), ( 0x10A0C, 0x10A0F ),
( 0x10A38, 0x10A3A ), ( 0x10A3F, 0x10A3F ), ( 0x1D167, 0x1D169 ),
( 0x1D173, 0x1D182 ), ( 0x1D185, 0x1D18B ), ( 0x1D1AA, 0x1D1AD ),
( 0x1D242, 0x1D244 ), ( 0xE0001, 0xE0001 ), ( 0xE0020, 0xE007F ),
( 0xE0100, 0xE01EF )
)
# sorted list of non-overlapping intervals of East Asian Ambiguous
# characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c"
ambiguous = (
( 0x00A1, 0x00A1 ), ( 0x00A4, 0x00A4 ), ( 0x00A7, 0x00A8 ),
( 0x00AA, 0x00AA ), ( 0x00AE, 0x00AE ), ( 0x00B0, 0x00B4 ),
( 0x00B6, 0x00BA ), ( 0x00BC, 0x00BF ), ( 0x00C6, 0x00C6 ),
( 0x00D0, 0x00D0 ), ( 0x00D7, 0x00D8 ), ( 0x00DE, 0x00E1 ),
( 0x00E6, 0x00E6 ), ( 0x00E8, 0x00EA ), ( 0x00EC, 0x00ED ),
( 0x00F0, 0x00F0 ), ( 0x00F2, 0x00F3 ), ( 0x00F7, 0x00FA ),
( 0x00FC, 0x00FC ), ( 0x00FE, 0x00FE ), ( 0x0101, 0x0101 ),
( 0x0111, 0x0111 ), ( 0x0113, 0x0113 ), ( 0x011B, 0x011B ),
( 0x0126, 0x0127 ), ( 0x012B, 0x012B ), ( 0x0131, 0x0133 ),
( 0x0138, 0x0138 ), ( 0x013F, 0x0142 ), ( 0x0144, 0x0144 ),
( 0x0148, 0x014B ), ( 0x014D, 0x014D ), ( 0x0152, 0x0153 ),
( 0x0166, 0x0167 ), ( 0x016B, 0x016B ), ( 0x01CE, 0x01CE ),
( 0x01D0, 0x01D0 ), ( 0x01D2, 0x01D2 ), ( 0x01D4, 0x01D4 ),
( 0x01D6, 0x01D6 ), ( 0x01D8, 0x01D8 ), ( 0x01DA, 0x01DA ),
( 0x01DC, 0x01DC ), ( 0x0251, 0x0251 ), ( 0x0261, 0x0261 ),
( 0x02C4, 0x02C4 ), ( 0x02C7, 0x02C7 ), ( 0x02C9, 0x02CB ),
( 0x02CD, 0x02CD ), ( 0x02D0, 0x02D0 ), ( 0x02D8, 0x02DB ),
( 0x02DD, 0x02DD ), ( 0x02DF, 0x02DF ), ( 0x0391, 0x03A1 ),
( 0x03A3, 0x03A9 ), ( 0x03B1, 0x03C1 ), ( 0x03C3, 0x03C9 ),
( 0x0401, 0x0401 ), ( 0x0410, 0x044F ), ( 0x0451, 0x0451 ),
( 0x2010, 0x2010 ), ( 0x2013, 0x2016 ), ( 0x2018, 0x2019 ),
( 0x201C, 0x201D ), ( 0x2020, 0x2022 ), ( 0x2024, 0x2027 ),
( 0x2030, 0x2030 ), ( 0x2032, 0x2033 ), ( 0x2035, 0x2035 ),
( 0x203B, 0x203B ), ( 0x203E, 0x203E ), ( 0x2074, 0x2074 ),
( 0x207F, 0x207F ), ( 0x2081, 0x2084 ), ( 0x20AC, 0x20AC ),
( 0x2103, 0x2103 ), ( 0x2105, 0x2105 ), ( 0x2109, 0x2109 ),
( 0x2113, 0x2113 ), ( 0x2116, 0x2116 ), ( 0x2121, 0x2122 ),
( 0x2126, 0x2126 ), ( 0x212B, 0x212B ), ( 0x2153, 0x2154 ),
( 0x215B, 0x215E ), ( 0x2160, 0x216B ), ( 0x2170, 0x2179 ),
( 0x2190, 0x2199 ), ( 0x21B8, 0x21B9 ), ( 0x21D2, 0x21D2 ),
( 0x21D4, 0x21D4 ), ( 0x21E7, 0x21E7 ), ( 0x2200, 0x2200 ),
( 0x2202, 0x2203 ), ( 0x2207, 0x2208 ), ( 0x220B, 0x220B ),
( 0x220F, 0x220F ), ( 0x2211, 0x2211 ), ( 0x2215, 0x2215 ),
( 0x221A, 0x221A ), ( 0x221D, 0x2220 ), ( 0x2223, 0x2223 ),
( 0x2225, 0x2225 ), ( 0x2227, 0x222C ), ( 0x222E, 0x222E ),
( 0x2234, 0x2237 ), ( 0x223C, 0x223D ), ( 0x2248, 0x2248 ),
( 0x224C, 0x224C ), ( 0x2252, 0x2252 ), ( 0x2260, 0x2261 ),
( 0x2264, 0x2267 ), ( 0x226A, 0x226B ), ( 0x226E, 0x226F ),
( 0x2282, 0x2283 ), ( 0x2286, 0x2287 ), ( 0x2295, 0x2295 ),
( 0x2299, 0x2299 ), ( 0x22A5, 0x22A5 ), ( 0x22BF, 0x22BF ),
( 0x2312, 0x2312 ), ( 0x2460, 0x24E9 ), ( 0x24EB, 0x254B ),
( 0x2550, 0x2573 ), ( 0x2580, 0x258F ), ( 0x2592, 0x2595 ),
( 0x25A0, 0x25A1 ), ( 0x25A3, 0x25A9 ), ( 0x25B2, 0x25B3 ),
( 0x25B6, 0x25B7 ), ( 0x25BC, 0x25BD ), ( 0x25C0, 0x25C1 ),
( 0x25C6, 0x25C8 ), ( 0x25CB, 0x25CB ), ( 0x25CE, 0x25D1 ),
( 0x25E2, 0x25E5 ), ( 0x25EF, 0x25EF ), ( 0x2605, 0x2606 ),
( 0x2609, 0x2609 ), ( 0x260E, 0x260F ), ( 0x2614, 0x2615 ),
( 0x261C, 0x261C ), ( 0x261E, 0x261E ), ( 0x2640, 0x2640 ),
( 0x2642, 0x2642 ), ( 0x2660, 0x2661 ), ( 0x2663, 0x2665 ),
( 0x2667, 0x266A ), ( 0x266C, 0x266D ), ( 0x266F, 0x266F ),
( 0x273D, 0x273D ), ( 0x2776, 0x277F ), ( 0xE000, 0xF8FF ),
( 0xFFFD, 0xFFFD ), ( 0xF0000, 0xFFFFD ), ( 0x100000, 0x10FFFD )
)
def mk_wcwidth(ucs):
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return -1
# binary search in table of non-spacing characters
if bisearch(ucs, combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return 1 + \
int(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd)))
def mk_wcswidth(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth(c)
if w < 0:
return -1
else:
width += w
return width
# The following functions are the same as mk_wcwidth() and
# mk_wcswidth(), except that spacing characters in the East Asian
# Ambiguous (A) category as defined in Unicode Technical Report #11
# have a column width of 2. This variant might be useful for users of
# CJK legacy encodings who want to migrate to UCS without changing
# the traditional terminal character-width behaviour. It is not
# otherwise recommended for general use.
def mk_wcwidth_cjk(ucs):
# binary search in table of non-spacing characters
if bisearch(ucs, ambiguous):
return 2
return mk_wcwidth(ucs)
def mk_wcswidth_cjk(pwcs):
width = 0
for c in pwcs:
w = mk_wcwidth_cjk(c)
if w < 0:
return -1
width += w
return width
# python-y versions, dealing with unicode objects
def wcwidth(c):
return mk_wcwidth(ord(c))
def wcswidth(s):
return mk_wcswidth(map(ord, s))
def wcwidth_cjk(c):
return mk_wcwidth_cjk(ord(c))
def wcswidth_cjk(s):
return mk_wcswidth_cjk(map(ord, s))
if __name__ == "__main__":
samples = (
('MUSIC SHARP SIGN', 1),
('FULLWIDTH POUND SIGN', 2),
('FULLWIDTH LATIN CAPITAL LETTER P', 2),
('CJK RADICAL BOLT OF CLOTH', 2),
('LATIN SMALL LETTER A', 1),
('LATIN SMALL LETTER AE', 1),
('SPACE', 1),
('NO-BREAK SPACE', 1),
('CJK COMPATIBILITY IDEOGRAPH-F920', 2),
('MALAYALAM VOWEL SIGN UU', 0),
('ZERO WIDTH SPACE', 0),
('ZERO WIDTH NO-BREAK SPACE', 0),
('COMBINING PALATALIZED HOOK BELOW', 0),
('COMBINING GRAVE ACCENT', 0),
)
nonprinting = u'\r\n\t\a\b\f\v\x7f'
import unicodedata
for name, printwidth in samples:
uchr = unicodedata.lookup(name)
calculatedwidth = wcwidth(uchr)
assert calculatedwidth == printwidth, \
'width for %r should be %d, but is %d?' % (uchr, printwidth, calculatedwidth)
for c in nonprinting:
calculatedwidth = wcwidth(c)
assert calculatedwidth < 0, \
'%r is a control character, but wcwidth gives %d' % (c, calculatedwidth)
assert wcwidth('\0') == 0 # special case
# depending on how python is compiled, code points above U+FFFF may not be
# treated as single characters, so ord() won't work. test a few of these
# manually.
assert mk_wcwidth(0xe01ef) == 0
assert mk_wcwidth(0x10ffff) == 1
assert mk_wcwidth(0x3fffd) == 2
teststr = u'B\0ig br\u00f8wn moose\ub143\u200b'
calculatedwidth = wcswidth(teststr)
assert calculatedwidth == 17, 'expected 17, got %d' % calculatedwidth
calculatedwidth = wcswidth_cjk(teststr)
assert calculatedwidth == 18, 'expected 18, got %d' % calculatedwidth
assert wcswidth(u'foobar\u200b\a') < 0
print 'tests pass.'
| apache-2.0 |
Learningtribes/edx-platform | common/djangoapps/student/management/commands/manage_group.py | 52 | 4853 | """
Management command `manage_group` is used to idempotently create Django groups
and set their permissions by name.
"""
from django.apps import apps
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.utils.translation import gettext as _
class Command(BaseCommand):
# pylint: disable=missing-docstring
help = 'Creates the specified group, if it does not exist, and sets its permissions.'
def add_arguments(self, parser):
parser.add_argument('group_name')
parser.add_argument('--remove', dest='is_remove', action='store_true')
parser.add_argument('-p', '--permissions', nargs='*', default=[])
def _handle_remove(self, group_name):
try:
Group.objects.get(name=group_name).delete() # pylint: disable=no-member
self.stderr.write(_('Removed group: "{}"').format(group_name))
except Group.DoesNotExist:
self.stderr.write(_('Did not find a group with name "{}" - skipping.').format(group_name))
@transaction.atomic
def handle(self, group_name, is_remove, permissions=None, *args, **options):
if is_remove:
self._handle_remove(group_name)
return
old_permissions = set()
group, created = Group.objects.get_or_create(name=group_name) # pylint: disable=no-member
if created:
try:
# Needed for sqlite backend (i.e. in tests) because
# name.max_length won't be enforced by the db.
# See also http://www.sqlite.org/faq.html#q9
group.full_clean()
except ValidationError as exc:
# give a more helpful error
raise CommandError(
_(
'Invalid group name: "{group_name}". {messages}'
).format(
group_name=group_name,
messages=exc.messages[0]
)
)
self.stderr.write(_('Created new group: "{}"').format(group_name))
else:
self.stderr.write(_('Found existing group: "{}"').format(group_name))
old_permissions = set(group.permissions.all())
new_permissions = self._resolve_permissions(permissions or set())
add_permissions = new_permissions - old_permissions
remove_permissions = old_permissions - new_permissions
self.stderr.write(
_(
'Adding {codenames} permissions to group "{group}"'
).format(
codenames=[ap.name for ap in add_permissions],
group=group.name
)
)
self.stderr.write(
_(
'Removing {codenames} permissions from group "{group}"'
).format(
codenames=[rp.codename for rp in remove_permissions],
group=group.name
)
)
group.permissions = new_permissions
group.save()
def _resolve_permissions(self, permissions):
new_permissions = set()
for permission in permissions:
try:
app_label, model_name, codename = permission.split(':')
except ValueError:
# give a more helpful error
raise CommandError(_(
'Invalid permission option: "{}". Please specify permissions '
'using the format: app_label:model_name:permission_codename.'
).format(permission))
# this will raise a LookupError if it fails.
try:
model_class = apps.get_model(app_label, model_name)
except LookupError as exc:
raise CommandError(str(exc))
content_type = ContentType.objects.get_for_model(model_class)
try:
new_permission = Permission.objects.get( # pylint: disable=no-member
content_type=content_type,
codename=codename,
)
except Permission.DoesNotExist:
# give a more helpful error
raise CommandError(
_(
'Invalid permission codename: "{codename}". No such permission exists '
'for the model {module}.{model_name}.'
).format(
codename=codename,
module=model_class.__module__,
model_name=model_class.__name__,
)
)
new_permissions.add(new_permission)
return new_permissions
| agpl-3.0 |
Workday/OpenFrame | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/cognito/sync/__init__.py | 473 | 1123 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| bsd-3-clause |
ngouzy/smartchangelog | smartchangelog/gitcmd.py | 2 | 1093 | import subprocess
import os
from typing import cast, List
class GitCmdError(Exception):
"""
Git command error
"""
def git_command(*git_args: str) -> str:
args = ['git'] + cast(List[str], list(git_args))
cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cp.returncode == 0 and len(cp.stderr) == 0:
return cp.stdout.decode('utf-8').strip('\n')
else:
raise GitCmdError(cp.stderr.decode('utf-8').strip('\n'))
def is_inside_work_tree() -> bool:
try:
result = git_command('rev-parse', '--is-inside-work-tree')
return result == 'true'
except GitCmdError:
return False
def get_gitdir() -> str:
if is_inside_work_tree():
path = os.path.join(git_command('rev-parse', '--show-toplevel'), '.git')
return os.path.abspath(path)
else:
raise GitCmdError("You have to be inside a git work tree")
def log(revision_range: str) -> str:
return git_command("log", revision_range, "--date", "iso")
def tag() -> List[str]:
return git_command("tag").split("\n")
| mit |
j00bar/ansible | lib/ansible/inventory/host.py | 32 | 4553 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars, get_unique_id
__all__ = ['Host']
class Host:
''' a single ansible host '''
#__slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
if not isinstance(other, Host):
return False
return self._uuid == other._uuid
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
address=self.address,
uuid=self._uuid,
gathered_facts=self._gathered_facts,
groups=groups,
implicit=self.implicit,
)
def deserialize(self, data):
self.__init__(gen_uuid=False)
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.address = data.get('address', '')
self._uuid = data.get('uuid', None)
self.implicit= data.get('implicit', False)
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None, gen_uuid=True):
self.name = name
self.vars = {}
self.groups = []
self.address = name
if port:
self.set_variable('ansible_port', int(port))
self._gathered_facts = False
self._uuid = None
if gen_uuid:
self._uuid = get_unique_id()
self.implicit = False
def __repr__(self):
return self.get_name()
def get_name(self):
return self.name
@property
def gathered_facts(self):
return self._gathered_facts
def set_gathered_facts(self, gathered):
self._gathered_facts = gathered
def populate_ancestors(self):
# populate ancestors
for group in self.groups:
self.add_group(group)
def add_group(self, group):
# populate ancestors
for oldg in group.get_ancestors():
if oldg not in self.groups:
self.add_group(oldg)
if group not in self.groups:
self.groups.append(group)
def remove_group(self, group):
if group in self.groups:
self.groups.remove(group)
# remove exclusive ancestors, xcept all!
for oldg in group.get_ancestors():
if oldg.name != 'all':
for childg in self.groups:
if oldg in childg.get_ancestors():
break
else:
self.remove_group(oldg)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
return self.groups
def get_vars(self):
results = {}
results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in self.get_groups() if g.name != 'all'])
return results
def get_group_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: (g.depth, g.priority, g.name)):
results = combine_vars(results, group.get_vars())
return results
| gpl-3.0 |
siosio/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/cvsps.py | 91 | 31477 | # Mercurial built-in replacement for cvsps.
#
# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import re
import cPickle as pickle
from mercurial import util
from mercurial.i18n import _
from mercurial import hook
from mercurial import util
class logentry(object):
'''Class logentry has the following attributes:
.author - author name as CVS knows it
.branch - name of branch this revision is on
.branches - revision tuple of branches starting at this revision
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time, tz) tuple
.dead - true if file revision is dead
.file - Name of file
.lines - a tuple (+lines, -lines) or None
.parent - Previous revision of this entry
.rcs - name of file as returned from CVS
.revision - revision number as tuple
.tags - list of tags on the file
.synthetic - is this a synthetic "file ... added on ..." revision?
.mergepoint - the branch that has been merged from (if present in
rlog output) or None
.branchpoints - the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
class logerror(Exception):
pass
def getrepopath(cvspath):
"""Return the repository path from a CVS path.
>>> getrepopath('/foo/bar')
'/foo/bar'
>>> getrepopath('c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
'/foo/bar'
>>> getrepopath('user@server/path/to/repository')
'/path/to/repository'
"""
# According to CVS manual, CVS paths are expressed like:
# [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
#
# CVSpath is splitted into parts and then position of the first occurrence
# of the '/' char after the '@' is located. The solution is the rest of the
# string after that '/' sign including it
parts = cvspath.split(':')
atposition = parts[-1].find('@')
start = 0
if atposition != -1:
start = atposition
repopath = parts[-1][parts[-1].find('/', start):]
return repopath
def createlog(ui, directory=None, root="", rlog=True, cache=None):
'''Collect the CVS rlog'''
# Because we store many duplicate commit log messages, reusing strings
# saves a lot of memory and pickle storage space.
_scache = {}
def scache(s):
"return a shared version of a string"
return _scache.setdefault(s, s)
ui.status(_('collecting CVS rlog\n'))
log = [] # list of logentry objects containing the CVS state
# patterns to match in CVS (r)log output, by state of use
re_00 = re.compile('RCS file: (.+)$')
re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
re_02 = re.compile('cvs (r?log|server): (.+)\n$')
re_03 = re.compile("(Cannot access.+CVSROOT)|"
"(can't create temporary directory.+)$")
re_10 = re.compile('Working file: (.+)$')
re_20 = re.compile('symbolic names:')
re_30 = re.compile('\t(.+): ([\\d.]+)$')
re_31 = re.compile('----------------------------$')
re_32 = re.compile('======================================='
'======================================$')
re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
r'(\s+commitid:\s+([^;]+);)?'
r'(.*mergepoint:\s+([^;]+);)?')
re_70 = re.compile('branches: (.+);$')
file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
prefix = '' # leading path to strip of what we get from CVS
if directory is None:
# Current working directory
# Get the real directory in the repository
try:
prefix = open(os.path.join('CVS','Repository')).read().strip()
directory = prefix
if prefix == ".":
prefix = ""
except IOError:
raise logerror(_('not a CVS sandbox'))
if prefix and not prefix.endswith(os.sep):
prefix += os.sep
# Use the Root file in the sandbox, if it exists
try:
root = open(os.path.join('CVS','Root')).read().strip()
except IOError:
pass
if not root:
root = os.environ.get('CVSROOT', '')
# read log cache if one exists
oldlog = []
date = None
if cache:
cachedir = os.path.expanduser('~/.hg.cvsps')
if not os.path.exists(cachedir):
os.mkdir(cachedir)
# The cvsps cache pickle needs a uniquified name, based on the
# repository location. The address may have all sort of nasties
# in it, slashes, colons and such. So here we take just the
# alphanumeric characters, concatenated in a way that does not
# mix up the various components, so that
# :pserver:user@server:/path
# and
# /pserver/user/server/path
# are mapped to different cache file names.
cachefile = root.split(":") + [directory, "cache"]
cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
cachefile = os.path.join(cachedir,
'.'.join([s for s in cachefile if s]))
if cache == 'update':
try:
ui.note(_('reading cvs log cache %s\n') % cachefile)
oldlog = pickle.load(open(cachefile))
for e in oldlog:
if not (util.safehasattr(e, 'branchpoints') and
util.safehasattr(e, 'commitid') and
util.safehasattr(e, 'mergepoint')):
ui.status(_('ignoring old cache\n'))
oldlog = []
break
ui.note(_('cache has %d log entries\n') % len(oldlog))
except Exception, e:
ui.note(_('error reading cache: %r\n') % e)
if oldlog:
date = oldlog[-1].date # last commit date as a (time,tz) tuple
date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
# build the CVS commandline
cmd = ['cvs', '-q']
if root:
cmd.append('-d%s' % root)
p = util.normpath(getrepopath(root))
if not p.endswith('/'):
p += '/'
if prefix:
# looks like normpath replaces "" by "."
prefix = p + util.normpath(prefix)
else:
prefix = p
cmd.append(['log', 'rlog'][rlog])
if date:
# no space between option and date string
cmd.append('-d>%s' % date)
cmd.append(directory)
# state machine begins here
tags = {} # dictionary of revisions on current file with their tags
branchmap = {} # mapping between branch names and revision numbers
state = 0
store = False # set when a new record can be appended
cmd = [util.shellquote(arg) for arg in cmd]
ui.note(_("running %s\n") % (' '.join(cmd)))
ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
pfp = util.popen(' '.join(cmd))
peek = pfp.readline()
while True:
line = peek
if line == '':
break
peek = pfp.readline()
if line.endswith('\n'):
line = line[:-1]
#ui.debug('state=%d line=%r\n' % (state, line))
if state == 0:
# initial state, consume input until we see 'RCS file'
match = re_00.match(line)
if match:
rcs = match.group(1)
tags = {}
if rlog:
filename = util.normpath(rcs[:-2])
if filename.startswith(prefix):
filename = filename[len(prefix):]
if filename.startswith('/'):
filename = filename[1:]
if filename.startswith('Attic/'):
filename = filename[6:]
else:
filename = filename.replace('/Attic/', '/')
state = 2
continue
state = 1
continue
match = re_01.match(line)
if match:
raise logerror(match.group(1))
match = re_02.match(line)
if match:
raise logerror(match.group(2))
if re_03.match(line):
raise logerror(line)
elif state == 1:
# expect 'Working file' (only when using log instead of rlog)
match = re_10.match(line)
assert match, _('RCS file must be followed by working file')
filename = util.normpath(match.group(1))
state = 2
elif state == 2:
# expect 'symbolic names'
if re_20.match(line):
branchmap = {}
state = 3
elif state == 3:
# read the symbolic names and store as tags
match = re_30.match(line)
if match:
rev = [int(x) for x in match.group(2).split('.')]
# Convert magic branch number to an odd-numbered one
revn = len(rev)
if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
rev = rev[:-2] + rev[-1:]
rev = tuple(rev)
if rev not in tags:
tags[rev] = []
tags[rev].append(match.group(1))
branchmap[match.group(1)] = match.group(2)
elif re_31.match(line):
state = 5
elif re_32.match(line):
state = 0
elif state == 4:
# expecting '------' separator before first revision
if re_31.match(line):
state = 5
else:
assert not re_32.match(line), _('must have at least '
'some revisions')
elif state == 5:
# expecting revision number and possibly (ignored) lock indication
# we create the logentry here from values stored in states 0 to 4,
# as this state is re-entered for subsequent revisions of a file.
match = re_50.match(line)
assert match, _('expected revision number')
e = logentry(rcs=scache(rcs),
file=scache(filename),
revision=tuple([int(x) for x in
match.group(1).split('.')]),
branches=[],
parent=None,
commitid=None,
mergepoint=None,
branchpoints=set())
state = 6
elif state == 6:
# expecting date, author, state, lines changed
match = re_60.match(line)
assert match, _('revision must be followed by date line')
d = match.group(1)
if d[2] == '/':
# Y2K
d = '19' + d
if len(d.split()) != 3:
# cvs log dates always in GMT
d = d + ' UTC'
e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S'])
e.author = scache(match.group(2))
e.dead = match.group(3).lower() == 'dead'
if match.group(5):
if match.group(6):
e.lines = (int(match.group(5)), int(match.group(6)))
else:
e.lines = (int(match.group(5)), 0)
elif match.group(6):
e.lines = (0, int(match.group(6)))
else:
e.lines = None
if match.group(7): # cvs 1.12 commitid
e.commitid = match.group(8)
if match.group(9): # cvsnt mergepoint
myrev = match.group(10).split('.')
if len(myrev) == 2: # head
e.mergepoint = 'HEAD'
else:
myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
branches = [b for b in branchmap if branchmap[b] == myrev]
assert len(branches) == 1, ('unknown branch: %s'
% e.mergepoint)
e.mergepoint = branches[0]
e.comment = []
state = 7
elif state == 7:
# read the revision numbers of branches that start at this revision
# or store the commit log message otherwise
m = re_70.match(line)
if m:
e.branches = [tuple([int(y) for y in x.strip().split('.')])
for x in m.group(1).split(';')]
state = 8
elif re_31.match(line) and re_50.match(peek):
state = 5
store = True
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
elif state == 8:
# store commit log message
if re_31.match(line):
cpeek = peek
if cpeek.endswith('\n'):
cpeek = cpeek[:-1]
if re_50.match(cpeek):
state = 5
store = True
else:
e.comment.append(line)
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
# When a file is added on a branch B1, CVS creates a synthetic
# dead trunk revision 1.1 so that the branch has a root.
# Likewise, if you merge such a file to a later branch B2 (one
# that already existed when the file was added on B1), CVS
# creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
# these revisions now, but mark them synthetic so
# createchangeset() can take care of them.
if (store and
e.dead and
e.revision[-1] == 1 and # 1.1 or 1.1.x.1
len(e.comment) == 1 and
file_added_re.match(e.comment[0])):
ui.debug('found synthetic revision in %s: %r\n'
% (e.rcs, e.comment[0]))
e.synthetic = True
if store:
# clean up the results and save in the log.
store = False
e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
e.comment = scache('\n'.join(e.comment))
revn = len(e.revision)
if revn > 3 and (revn % 2) == 0:
e.branch = tags.get(e.revision[:-1], [None])[0]
else:
e.branch = None
# find the branches starting from this revision
branchpoints = set()
for branch, revision in branchmap.iteritems():
revparts = tuple([int(i) for i in revision.split('.')])
if len(revparts) < 2: # bad tags
continue
if revparts[-2] == 0 and revparts[-1] % 2 == 0:
# normal branch
if revparts[:-2] == e.revision:
branchpoints.add(branch)
elif revparts == (1, 1, 1): # vendor branch
if revparts in e.branches:
branchpoints.add(branch)
e.branchpoints = branchpoints
log.append(e)
if len(log) % 100 == 0:
ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
log.sort(key=lambda x: (x.rcs, x.revision))
# find parent revisions of individual files
versions = {}
for e in log:
branch = e.revision[:-1]
p = versions.get((e.rcs, branch), None)
if p is None:
p = e.revision[:-2]
e.parent = p
versions[(e.rcs, branch)] = e.revision
# update the log cache
if cache:
if log:
# join up the old and new logs
log.sort(key=lambda x: x.date)
if oldlog and oldlog[-1].date >= log[0].date:
raise logerror(_('log cache overlaps with new log entries,'
' re-run without cache.'))
log = oldlog + log
# write the new cachefile
ui.note(_('writing cvs log cache %s\n') % cachefile)
pickle.dump(log, open(cachefile, 'w'))
else:
log = oldlog
ui.status(_('%d log entries\n') % len(log))
hook.hook(ui, None, "cvslog", True, log=log)
return log
class changeset(object):
'''Class changeset has the following attributes:
.id - integer identifying this changeset (list index)
.author - author name as CVS knows it
.branch - name of branch this changeset is on, or None
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time,tz) tuple
.entries - list of logentry objects in this changeset
.parents - list of one or two parent changesets
.tags - list of tags on this changeset
.synthetic - from synthetic revision "file ... added on branch ..."
.mergepoint- the branch that has been merged from or None
.branchpoints- the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
'''Convert log into changesets.'''
ui.status(_('creating changesets\n'))
# try to order commitids by date
mindate = {}
for e in log:
if e.commitid:
mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
# Merge changesets
log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
x.author, x.branch, x.date, x.branchpoints))
changesets = []
files = set()
c = None
for i, e in enumerate(log):
# Check if log entry belongs to the current changeset or not.
# Since CVS is file-centric, two different file revisions with
# different branchpoints should be treated as belonging to two
# different changesets (and the ordering is important and not
# honoured by cvsps at this point).
#
# Consider the following case:
# foo 1.1 branchpoints: [MYBRANCH]
# bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
#
# Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
# later version of foo may be in MYBRANCH2, so foo should be the
# first changeset and bar the next and MYBRANCH and MYBRANCH2
# should both start off of the bar changeset. No provisions are
# made to ensure that this is, in fact, what happens.
if not (c and e.branchpoints == c.branchpoints and
(# cvs commitids
(e.commitid is not None and e.commitid == c.commitid) or
(# no commitids, use fuzzy commit detection
(e.commitid is None or c.commitid is None) and
e.comment == c.comment and
e.author == c.author and
e.branch == c.branch and
((c.date[0] + c.date[1]) <=
(e.date[0] + e.date[1]) <=
(c.date[0] + c.date[1]) + fuzz) and
e.file not in files))):
c = changeset(comment=e.comment, author=e.author,
branch=e.branch, date=e.date,
entries=[], mergepoint=e.mergepoint,
branchpoints=e.branchpoints, commitid=e.commitid)
changesets.append(c)
files = set()
if len(changesets) % 100 == 0:
t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
ui.status(util.ellipsis(t, 80) + '\n')
c.entries.append(e)
files.add(e.file)
c.date = e.date # changeset date is date of latest commit in it
# Mark synthetic changesets
for c in changesets:
# Synthetic revisions always get their own changeset, because
# the log message includes the filename. E.g. if you add file3
# and file4 on a branch, you get four log entries and three
# changesets:
# "File file3 was added on branch ..." (synthetic, 1 entry)
# "File file4 was added on branch ..." (synthetic, 1 entry)
# "Add file3 and file4 to fix ..." (real, 2 entries)
# Hence the check for 1 entry here.
c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
# Sort files in each changeset
def entitycompare(l, r):
'Mimic cvsps sorting order'
l = l.file.split('/')
r = r.file.split('/')
nl = len(l)
nr = len(r)
n = min(nl, nr)
for i in range(n):
if i + 1 == nl and nl < nr:
return -1
elif i + 1 == nr and nl > nr:
return +1
elif l[i] < r[i]:
return -1
elif l[i] > r[i]:
return +1
return 0
for c in changesets:
c.entries.sort(entitycompare)
# Sort changesets by date
def cscmp(l, r):
d = sum(l.date) - sum(r.date)
if d:
return d
# detect vendor branches and initial commits on a branch
le = {}
for e in l.entries:
le[e.rcs] = e.revision
re = {}
for e in r.entries:
re[e.rcs] = e.revision
d = 0
for e in l.entries:
if re.get(e.rcs, None) == e.parent:
assert not d
d = 1
break
for e in r.entries:
if le.get(e.rcs, None) == e.parent:
assert not d
d = -1
break
return d
changesets.sort(cscmp)
# Collect tags
globaltags = {}
for c in changesets:
for e in c.entries:
for tag in e.tags:
# remember which is the latest changeset to have this tag
globaltags[tag] = c
for c in changesets:
tags = set()
for e in c.entries:
tags.update(e.tags)
# remember tags only if this is the latest changeset to have it
c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
# Find parent changesets, handle {{mergetobranch BRANCHNAME}}
# by inserting dummy changesets with two parents, and handle
# {{mergefrombranch BRANCHNAME}} by setting two parents.
if mergeto is None:
mergeto = r'{{mergetobranch ([-\w]+)}}'
if mergeto:
mergeto = re.compile(mergeto)
if mergefrom is None:
mergefrom = r'{{mergefrombranch ([-\w]+)}}'
if mergefrom:
mergefrom = re.compile(mergefrom)
versions = {} # changeset index where we saw any particular file version
branches = {} # changeset index where we saw a branch
n = len(changesets)
i = 0
while i < n:
c = changesets[i]
for f in c.entries:
versions[(f.rcs, f.revision)] = i
p = None
if c.branch in branches:
p = branches[c.branch]
else:
# first changeset on a new branch
# the parent is a changeset with the branch in its
# branchpoints such that it is the latest possible
# commit without any intervening, unrelated commits.
for candidate in xrange(i):
if c.branch not in changesets[candidate].branchpoints:
if p is not None:
break
continue
p = candidate
c.parents = []
if p is not None:
p = changesets[p]
# Ensure no changeset has a synthetic changeset as a parent.
while p.synthetic:
assert len(p.parents) <= 1, \
_('synthetic changeset cannot have multiple parents')
if p.parents:
p = p.parents[0]
else:
p = None
break
if p is not None:
c.parents.append(p)
if c.mergepoint:
if c.mergepoint == 'HEAD':
c.mergepoint = None
c.parents.append(changesets[branches[c.mergepoint]])
if mergefrom:
m = mergefrom.search(c.comment)
if m:
m = m.group(1)
if m == 'HEAD':
m = None
try:
candidate = changesets[branches[m]]
except KeyError:
ui.warn(_("warning: CVS commit message references "
"non-existent branch %r:\n%s\n")
% (m, c.comment))
if m in branches and c.branch != m and not candidate.synthetic:
c.parents.append(candidate)
if mergeto:
m = mergeto.search(c.comment)
if m:
if m.groups():
m = m.group(1)
if m == 'HEAD':
m = None
else:
m = None # if no group found then merge to HEAD
if m in branches and c.branch != m:
# insert empty changeset for merge
cc = changeset(
author=c.author, branch=m, date=c.date,
comment='convert-repo: CVS merge from branch %s'
% c.branch,
entries=[], tags=[],
parents=[changesets[branches[m]], c])
changesets.insert(i + 1, cc)
branches[m] = i + 1
# adjust our loop counters now we have inserted a new entry
n += 1
i += 2
continue
branches[c.branch] = i
i += 1
# Drop synthetic changesets (safe now that we have ensured no other
# changesets can have them as parents).
i = 0
while i < len(changesets):
if changesets[i].synthetic:
del changesets[i]
else:
i += 1
# Number changesets
for i, c in enumerate(changesets):
c.id = i + 1
ui.status(_('%d changeset entries\n') % len(changesets))
hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
return changesets
def debugcvsps(ui, *args, **opts):
'''Read CVS rlog for current directory or named path in
repository, and convert the log to changesets based on matching
commit log entries and dates.
'''
if opts["new_cache"]:
cache = "write"
elif opts["update_cache"]:
cache = "update"
else:
cache = None
revisions = opts["revisions"]
try:
if args:
log = []
for d in args:
log += createlog(ui, d, root=opts["root"], cache=cache)
else:
log = createlog(ui, root=opts["root"], cache=cache)
except logerror, e:
ui.write("%r\n"%e)
return
changesets = createchangeset(ui, log, opts["fuzz"])
del log
# Print changesets (optionally filtered)
off = len(revisions)
branches = {} # latest version number in each branch
ancestors = {} # parent branch
for cs in changesets:
if opts["ancestors"]:
if cs.branch not in branches and cs.parents and cs.parents[0].id:
ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
cs.parents[0].id)
branches[cs.branch] = cs.id
# limit by branches
if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
continue
if not off:
# Note: trailing spaces on several lines here are needed to have
# bug-for-bug compatibility with cvsps.
ui.write('---------------------\n')
ui.write(('PatchSet %d \n' % cs.id))
ui.write(('Date: %s\n' % util.datestr(cs.date,
'%Y/%m/%d %H:%M:%S %1%2')))
ui.write(('Author: %s\n' % cs.author))
ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
','.join(cs.tags) or '(none)')))
if cs.branchpoints:
ui.write(('Branchpoints: %s \n') %
', '.join(sorted(cs.branchpoints)))
if opts["parents"] and cs.parents:
if len(cs.parents) > 1:
ui.write(('Parents: %s\n' %
(','.join([str(p.id) for p in cs.parents]))))
else:
ui.write(('Parent: %d\n' % cs.parents[0].id))
if opts["ancestors"]:
b = cs.branch
r = []
while b:
b, c = ancestors[b]
r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
if r:
ui.write(('Ancestors: %s\n' % (','.join(r))))
ui.write(('Log:\n'))
ui.write('%s\n\n' % cs.comment)
ui.write(('Members: \n'))
for f in cs.entries:
fn = f.file
if fn.startswith(opts["prefix"]):
fn = fn[len(opts["prefix"]):]
ui.write('\t%s:%s->%s%s \n' % (
fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
'.'.join([str(x) for x in f.revision]),
['', '(DEAD)'][f.dead]))
ui.write('\n')
# have we seen the start tag?
if revisions and off:
if revisions[0] == str(cs.id) or \
revisions[0] in cs.tags:
off = False
# see if we reached the end tag
if len(revisions) > 1 and not off:
if revisions[1] == str(cs.id) or \
revisions[1] in cs.tags:
break
| apache-2.0 |
jimi-c/ansible | lib/ansible/plugins/action/__init__.py | 1 | 49785 | # Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail
from ansible.executor.module_common import modify_module
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.six import binary_type, string_types, text_type, iteritems, with_metaclass
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import remove_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._cleanup_remote_tmp = False
self._supports_check_mode = True
self._supports_async = False
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._used_interpreter = None
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
result = {}
if tmp is not None:
result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir']
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('async is not supported for this task.')
elif self._play_context.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('check mode is not supported for this task.')
elif self._task.async_val and self._play_context.check_mode:
raise AnsibleActionFail('check mode and async cannot be used on same task.')
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git pull --rebase' to correct this problem." % (module_name))
# insert shared code and arguments into the module
final_environment = dict()
self._compute_environment_string(final_environment)
(module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
task_vars=task_vars,
module_compression=self._play_context.module_compression,
async_timeout=self._task.async_val,
become=self._play_context.become,
become_method=self._play_context.become_method,
become_user=self._play_context.become_user,
become_password=self._play_context.become_pass,
become_flags=self._play_context.become_flags,
environment=final_environment)
return (module_style, module_shebang, module_data, module_path)
def _compute_environment_string(self, raw_environment_out=None):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [environments]
# The order of environments matters to make sure we merge
# in the parent's values first so those in the block then
# task 'win' in precedence
for environment in environments:
if environment is None or len(environment) == 0:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
if len(final_environment) > 0:
final_environment = self._templar.template(final_environment)
if isinstance(raw_environment_out, dict):
raw_environment_out.clear()
raw_environment_out.update(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a tmp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _is_pipelining_enabled(self, module_style, wrap_async=False):
'''
Determines if we are required and can do pipelining
'''
# any of these require a true
for condition in [
self._connection.has_pipelining,
self._play_context.pipelining or self._connection.always_pipeline_modules, # pipelining enabled for play or connection requires it (eg winrm)
module_style == "new", # old style modules do not support pipelining
not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
not wrap_async or self._connection.always_pipeline_modules, # async does not normally support pipelining unless it does (eg winrm)
self._play_context.become_method != 'su', # su does not work with pipelining,
# FIXME: we might need to make become_method exclusion a configurable list
]:
if not condition:
return False
return True
def _get_admin_users(self):
'''
Returns a list of admin users that are configured for the current shell
plugin
'''
try:
admin_users = self._connection._shell.get_option('admin_users')
except AnsibleError:
# fallback for old custom plugins w/o get_option
admin_users = ['root']
return admin_users
def _is_become_unprivileged(self):
'''
The user is not the same as the connection user and is not part of the
shell configured admin users
'''
# if we don't use become then we know we aren't switching to a
# different unprivileged user
if not self._play_context.become:
return False
# if we use become and the user is not an admin (or same user) then
# we need to return become_unprivileged as True
admin_users = self._get_admin_users()
try:
remote_user = self._connection.get_option('remote_user')
except AnsibleError:
remote_user = self._play_context.remote_user
return bool(self._play_context.become_user not in admin_users + [remote_user])
def _make_tmp_path(self, remote_user=None):
'''
Create and return a temporary path on a remote box.
'''
become_unprivileged = self._is_become_unprivileged()
try:
remote_tmp = self._connection._shell.get_option('remote_tmp')
except AnsibleError:
remote_tmp = '~/.ansible/tmp'
# deal with tmpdir creation
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
tmpdir = C.DEFAULT_LOCAL_TMP
else:
tmpdir = self._remote_expand_user(remote_tmp, sudoable=False)
cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection. '
'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure. '
'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp". '
'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u", stdout output: %s" % result['stdout']
if self._play_context.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
output += u", stderr output: %s" % result['stderr']
raise AnsibleConnectionFailure(output)
else:
self._cleanup_remote_tmp = True
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
self._connection._shell.tmpdir = rc
return rc
def _should_remove_tmp_path(self, tmp_path):
'''Determine if temporary path should be deleted or kept by user request/config'''
return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path is None and self._connection._shell.tmpdir:
tmp_path = self._connection._shell.tmpdir
if self._should_remove_tmp_path(tmp_path):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
if tmp_rm_res.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
% (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
else:
self._connection._shell.tmpdir = None
def _transfer_file(self, local_path, remote_path):
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
data = to_bytes(data, errors='surrogate_or_strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the chown fails we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg
"""
if remote_user is None:
remote_user = self._play_context.remote_user
if self._connection._shell.SHELL_FAMILY == 'powershell':
# This won't work on Powershell as-is, so we'll just completely skip until
# we have a need for it, at which point we'll have to do something different.
return remote_paths
if self._is_become_unprivileged():
# Unprivileged user that's different than the ssh user. Let's get
# to work!
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
chmod_mode = 'rx'
setfacl_mode = 'r-x'
else:
chmod_mode = 'rX'
# NOTE: this form fails silently on freebsd. We currently
# never call _fixup_perms2() with execute=False but if we
# start to we'll have to fix this.
setfacl_mode = 'r-X'
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, setfacl_mode)
if res['rc'] != 0:
# File system acls failed; let's try to use chown next
# Set executable bit first as on some systems an
# unprivileged user can use chown
if execute:
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
res = self._remote_chown(remote_paths, self._play_context.become_user)
if res['rc'] != 0 and remote_user in self._get_admin_users():
# chown failed even if remote_user is administrator/root
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as a privileged user. '
'Unprivileged become user would be unable to read the file.')
elif res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# chown and fs acls failed -- do things this insecure
# way only if the user opted in in the config file
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user. '
'This may be insecure. For information on securing this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
else:
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user '
'(rc: %s, err: %s}). For information on working around this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
% (res['rc'], to_native(res['stderr'])))
elif execute:
# Can't depend on the file being transferred with execute permissions.
# Only need user perms because no become was used here
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set execute bit on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
return remote_paths
def _remote_chmod(self, paths, mode, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, paths, user, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
'''
Get information from remote file.
'''
if tmp is not None:
display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir')
del tmp # No longer used
module_args = dict(
path=path,
follow=follow,
get_checksum=checksum,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars,
wrap_async=False)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
if not msg:
msg = mystat.get('module_stdout')
if not msg:
msg = mystat.get('msg')
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if 'checksum' not in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], string_types):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_checksum(self, path, all_vars, follow=False):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
5 = appropriate json module not found
'''
x = "0" # unknown error has occurred
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_text(e)
if errormsg.endswith(u'Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith(u'MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
elif 'json' in errormsg:
x = "5" # json module needed
finally:
return x # pylint: disable=lost-exception
def _remote_expand_user(self, path, sudoable=True, pathsep=None):
''' takes a remote path and performs tilde/$HOME expansion on the remote host '''
# We only expand ~/path and ~username/path
if not path.startswith('~'):
return path
# Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
# dir there.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
pass
elif sudoable and self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
else:
# use remote user instead, if none set default to current user
expand_path = '~%s' % (self._play_context.remote_user or self._connection.default_user or '')
# use shell to construct appropriate command and execute
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
except IndexError:
initial_fragment = None
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Try using pwd, if not, return
# the original string
cmd = self._connection._shell.pwd()
pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
if pwd:
expanded = pwd
else:
expanded = path
elif len(split_path) > 1:
expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
expanded = initial_fragment
return expanded
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _update_module_args(self, module_name, module_args, task_vars):
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
module_args['_ansible_socket'] = task_vars.get('ansible_socket')
# make sure all commands use the designated shell executable
module_args['_ansible_shell_executable'] = self._play_context.executable
# make sure modules are aware if they need to keep the remote files
module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
# make sure all commands use the designated temporary directory if created
if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
module_args['_ansible_tmpdir'] = None
else:
module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
# make sure the remote_tmp value is sent through in case modules needs to create their own
try:
module_args['_ansible_remote_tmp'] = self._connection._shell.get_option('remote_tmp')
except KeyError:
# here for 3rd party shell plugin compatibility in case they do not define the remote_tmp option
module_args['_ansible_remote_tmp'] = '~/.ansible/tmp'
def _update_connection_options(self, options, variables=None):
''' ensures connections have the appropriate information '''
update = {}
if getattr(self.connection, 'glob_option_vars', False):
# if the connection allows for it, pass any variables matching it.
if variables is not None:
for varname in variables:
if varname.match('ansible_%s_' % self.connection._load_name):
update[varname] = variables[varname]
# always override existing with options
update.update(options)
self.connection.set_options(update)
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False):
'''
Transfer and run a module along with its arguments.
'''
if tmp is not None:
display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
' should set self._connection._shell.tmpdir to share the tmpdir')
del tmp # No longer used
if delete_remote_tmp is not None:
display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
' Action plugins should check self._connection._shell.tmpdir to'
' see if a tmpdir existed before they were called to determine'
' if they are responsible for removing it.')
del delete_remote_tmp # No longer used
tmpdir = self._connection._shell.tmpdir
# We set the module_style to new here so the remote_tmp is created
# before the module args are built if remote_tmp is needed (async).
# If the module_style turns out to not be new and we didn't create the
# remote tmp here, it will still be created. This must be done before
# calling self._update_module_args() so the module wrapper has the
# correct remote_tmp value set
if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
self._update_module_args(module_name, module_args, task_vars)
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
(module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
self._used_interpreter = shebang
remote_module_path = None
if not self._is_pipelining_enabled(module_style, wrap_async):
# we might need remote tmp dir
if tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
args_file_path = None
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a tmp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmpdir, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k, v in iteritems(module_args):
args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
remote_files = []
if tmpdir and remote_module_path:
remote_files = [tmpdir, remote_module_path]
if args_file_path:
remote_files.append(args_file_path)
sudoable = True
in_data = None
cmd = ""
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(),
task_vars=task_vars)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
remote_files.append(remote_async_module_path)
async_limit = self._task.async_val
async_jid = str(random.randint(0, 999999999999))
# call the interpreter for async_wrapper directly
# this permits use of a script for an interpreter on non-Linux platforms
# TODO: re-implement async_wrapper as a regular module to avoid this special case
interpreter = shebang.replace('#!', '').strip()
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
if environment_string:
async_cmd.insert(0, environment_string)
if args_file_path:
async_cmd.append(args_file_path)
else:
# maintain a fixed number of positional parameters for async_wrapper
async_cmd.append('_')
if not self._should_remove_tmp_path(tmpdir):
async_cmd.append("-preserve_tmp")
cmd = " ".join(to_text(x) for x in async_cmd)
else:
if self._is_pipelining_enabled(module_style):
in_data = module_data
else:
cmd = remote_module_path
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
# Fix permissions of the tmpdir path and tmpdir files. This should be called after all
# files have been transferred.
if remote_files:
# remove none/empty
remote_files = [x for x in remote_files if x]
self._fixup_perms2(remote_files, self._play_context.remote_user)
# actually execute
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
data = self._parse_returned_data(res)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
if data.pop("_ansible_suppress_tmpdir_delete", False):
self._cleanup_remote_tmp = False
# remove internal keys
remove_internal_keys(data)
if wrap_async:
# async_wrapper will clean up its tmpdir on its own so we want the controller side to
# forget about it now
self._connection._shell.tmpdir = None
# FIXME: for backwards compat, figure out if still makes sense
data['changed'] = True
# pre-split stdout/stderr into lines if needed
if 'stdout' in data and 'stdout_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stdout', None) or u''
data['stdout_lines'] = txt.splitlines()
if 'stderr' in data and 'stderr_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stderr', None) or u''
data['stderr_lines'] = txt.splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res):
try:
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
for w in warnings:
display.warning(w)
data = json.loads(filtered_output)
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
data['ansible_facts'] = wrap_var(data['ansible_facts'])
data['_ansible_parsed'] = True
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, _ansible_parsed=False)
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# try to figure out if we are missing interpreter
if self._used_interpreter is not None and '%s: No such file or directory' % self._used_interpreter.lstrip('!#') in data['module_stderr']:
data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
else:
data['msg'] = "MODULE FAILURE"
data['msg'] += '\nSee stdout/stderr for the exact error'
if 'rc' in res:
data['rc'] = res['rc']
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
:kwarg chdir: cd into this directory before executing the command.
'''
display.debug("_low_level_execute_command(): starting")
# if not cmd:
# # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
# display.debug("_low_level_execute_command(): no command, exiting")
# return dict(stdout='', stderr='', rc=254)
if chdir:
display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
if self._connection.transport != 'network_cli' and self._play_context.become_method != 'enable':
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + shlex_quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
cwd = os.getcwd()
os.chdir(self._loader.get_basedir())
try:
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
finally:
if self._connection.transport == 'local':
os.chdir(cwd)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_text(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_text(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, _diff_peek=True), task_vars=task_vars, persist_files=True)
if not peek_result.get('failed', False) or peek_result.get('rc', 0) == 0:
if peek_result.get('state') == 'absent':
diff['before'] = ''
elif peek_result.get('appears_binary'):
diff['dst_binary'] = 1
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
with open(source, 'rb') as src:
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if b"\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
return diff
def _find_needle(self, dirname, needle):
'''
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
'''
# dwim already deals with playbook basedirs
path_stack = self._task.get_search_path()
# if missing it will return a file not found exception
return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
| gpl-3.0 |
packagecontrol/package_reviewer | st_package_reviewer/check/file/check_resource_file_validity.py | 2 | 2062 | import plistlib
import xml.etree.ElementTree as ET
from xml.parsers.expat import ExpatError
from . import FileChecker
from ...lib import jsonc
class CheckJsoncFiles(FileChecker):
def check(self):
# All these files allow comments and trailing commas,
# which is why we'll call them "jsonc" (JSON with Comments)
jsonc_file_globs = {
"**/*.sublime-build",
"**/*.sublime-color-scheme",
"**/*.hidden-color-scheme",
"**/*.sublime-commands",
"**/*.sublime-completions",
"**/*.sublime-keymap",
"**/*.sublime-macro",
"**/*.sublime-menu",
"**/*.sublime-mousemap",
"**/*.sublime-settings",
"**/*.sublime-theme",
}
for file_path in self.globs(*jsonc_file_globs):
with self.file_context(file_path):
with file_path.open(encoding='utf-8') as f:
try:
jsonc.loads(f.read())
except ValueError as e:
self.fail("Invalid JSON (with comments)", exception=e)
class CheckPlistFiles(FileChecker):
def check(self):
plist_file_globs = {
"**/*.tmLanguage",
"**/*.tmPreferences",
"**/*.tmSnippet",
"**/*.tmTheme",
"**/*.hidden-tmTheme",
}
for file_path in self.globs(*plist_file_globs):
with self.file_context(file_path):
with file_path.open('rb') as f:
try:
plistlib.load(f)
except (ValueError, ExpatError) as e:
self.fail("Invalid Plist", exception=e)
class CheckXmlFiles(FileChecker):
def check(self):
for file_path in self.glob("**/*.sublime-snippet"):
with self.file_context(file_path):
try:
ET.parse(str(file_path))
except ET.ParseError as e:
self.fail("Invalid XML", exception=e)
| mit |
jayceyxc/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Hash/MD2.py | 124 | 2734 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD2 cryptographic hash algorithm.
MD2 is specified in RFC1319_ and it produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD2
>>>
>>> h = MD2.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD2 stand for Message Digest version 2, and it was invented by Rivest in 1989.
This algorithm is both slow and insecure. Do not use it for new designs.
.. _RFC1319: http://tools.ietf.org/html/rfc1319
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'MD2Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._MD2 as _MD2
hashFactory = _MD2
class MD2Hash(HashAlgo):
"""Class that implements an MD2 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-md2 OBJECT IDENTIFIER ::= {
#: iso(1) member-body(2) us(840) rsadsi(113549)
#: digestAlgorithm(2) 2
#: }
#:
#: This value uniquely identifies the MD2 algorithm.
oid = b('\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x02')
digest_size = 16
block_size = 16
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return MD2Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD2Hash.update()`.
Optional.
:Return: An `MD2Hash` object
"""
return MD2Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD2Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD2Hash.block_size
| apache-2.0 |
erkanay/django | django/db/models/sql/expressions.py | 22 | 4490 | import copy
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
class SQLEvaluator(object):
def __init__(self, expression, query, allow_joins=True, reuse=None):
self.expression = expression
self.opts = query.get_meta()
self.reuse = reuse
self.cols = []
self.expression.prepare(self, query, allow_joins)
def relabeled_clone(self, change_map):
clone = copy.copy(self)
clone.cols = []
for node, col in self.cols:
if hasattr(col, 'relabeled_clone'):
clone.cols.append((node, col.relabeled_clone(change_map)))
else:
clone.cols.append((node,
(change_map.get(col[0], col[0]), col[1])))
return clone
def get_group_by_cols(self):
cols = []
for node, col in self.cols:
if hasattr(node, 'get_group_by_cols'):
cols.extend(node.get_group_by_cols())
elif isinstance(col, tuple):
cols.append(col)
return cols
def prepare(self):
return self
def as_sql(self, qn, connection):
return self.expression.evaluate(self, qn, connection)
#####################################################
# Visitor methods for initial expression preparation #
#####################################################
def prepare_node(self, node, query, allow_joins):
for child in node.children:
if hasattr(child, 'prepare'):
child.prepare(self, query, allow_joins)
def prepare_leaf(self, node, query, allow_joins):
if not allow_joins and LOOKUP_SEP in node.name:
raise FieldError("Joined field references are not permitted in this query")
field_list = node.name.split(LOOKUP_SEP)
if node.name in query.aggregates:
self.cols.append((node, query.aggregate_select[node.name]))
else:
try:
_, sources, _, join_list, path = query.setup_joins(
field_list, query.get_meta(), query.get_initial_alias(),
can_reuse=self.reuse)
self._used_joins = join_list
targets, _, join_list = query.trim_joins(sources, join_list, path)
if self.reuse is not None:
self.reuse.update(join_list)
for t in targets:
self.cols.append((node, (join_list[-1], t.column)))
except FieldDoesNotExist:
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (self.name,
[f.name for f in self.opts.fields]))
##################################################
# Visitor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
expressions = []
expression_params = []
for child in node.children:
if hasattr(child, 'evaluate'):
sql, params = child.evaluate(self, qn, connection)
else:
sql, params = '%s', (child,)
if len(getattr(child, 'children', [])) > 1:
format = '(%s)'
else:
format = '%s'
if sql:
expressions.append(format % sql)
expression_params.extend(params)
return connection.ops.combine_expression(node.connector, expressions), expression_params
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
if hasattr(col, 'as_sql'):
return col.as_sql(qn, connection)
else:
return '%s.%s' % (qn(col[0]), qn(col[1])), []
def evaluate_date_modifier_node(self, node, qn, connection):
timedelta = node.children.pop()
sql, params = self.evaluate_node(node, qn, connection)
node.children.append(timedelta)
if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0):
return sql, params
return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
| bsd-3-clause |
emakis/erpnext | erpnext/selling/report/inactive_customers/inactive_customers.py | 62 | 2411 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe import _
def execute(filters=None):
if not filters: filters ={}
days_since_last_order = filters.get("days_since_last_order")
doctype = filters.get("doctype")
if cint(days_since_last_order) <= 0:
frappe.throw(_("'Days Since Last Order' must be greater than or equal to zero"))
columns = get_columns()
customers = get_sales_details(doctype)
data = []
for cust in customers:
if cint(cust[8]) >= cint(days_since_last_order):
cust.insert(7,get_last_sales_amt(cust[0], doctype))
data.append(cust)
return columns, data
def get_sales_details(doctype):
cond = """sum(so.base_net_total) as 'total_order_considered',
max(so.posting_date) as 'last_order_date',
DATEDIFF(CURDATE(), max(so.posting_date)) as 'days_since_last_order' """
if doctype == "Sales Order":
cond = """sum(if(so.status = "Stopped",
so.base_net_total * so.per_delivered/100,
so.base_net_total)) as 'total_order_considered',
max(so.transaction_date) as 'last_order_date',
DATEDIFF(CURDATE(), max(so.transaction_date)) as 'days_since_last_order'"""
return frappe.db.sql("""select
cust.name,
cust.customer_name,
cust.territory,
cust.customer_group,
count(distinct(so.name)) as 'num_of_order',
sum(base_net_total) as 'total_order_value', {0}
from `tabCustomer` cust, `tab{1}` so
where cust.name = so.customer and so.docstatus = 1
group by cust.name
order by 'days_since_last_order' desc """.format(cond, doctype), as_list=1)
def get_last_sales_amt(customer, doctype):
cond = "posting_date"
if doctype =="Sales Order":
cond = "transaction_date"
res = frappe.db.sql("""select base_net_total from `tab{0}`
where customer = %s and docstatus = 1 order by {1} desc
limit 1""".format(doctype, cond), customer)
return res and res[0][0] or 0
def get_columns():
return [
_("Customer") + ":Link/Customer:120",
_("Customer Name") + ":Data:120",
_("Territory") + "::120",
_("Customer Group") + "::120",
_("Number of Order") + "::120",
_("Total Order Value") + ":Currency:120",
_("Total Order Considered") + ":Currency:160",
_("Last Order Amount") + ":Currency:160",
_("Last Order Date") + ":Date:160",
_("Days Since Last Order") + "::160"
]
| gpl-3.0 |
mlmurray/TensorFlow-Experimentation | examples/2 - Basic Classifiers/nearest_neighbor.py | 2 | 1685 | '''
A nearest neighbor learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import numpy as np
import tensorflow as tf
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# In this example, we limit mnist data
Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates)
Xte, Yte = mnist.test.next_batch(200) #200 for testing
# Reshape images to 1D
Xtr = np.reshape(Xtr, newshape=(-1, 28*28))
Xte = np.reshape(Xte, newshape=(-1, 28*28))
# tf Graph Input
xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])
# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
# Predict: Get min distance index (Nearest neighbor)
pred = tf.arg_min(distance, 0)
accuracy = 0.
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# loop over test data
for i in range(len(Xte)):
# Get nearest neighbor
nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]})
# Get nearest neighbor class label and compare it to its true label
print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i])
# Calculate accuracy
if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
accuracy += 1./len(Xte)
print "Done!"
print "Accuracy:", accuracy
| mit |
todaychi/hue | desktop/core/ext-py/Pygments-1.3.1/external/rst-directive.py | 47 | 2597 | # -*- coding: utf-8 -*-
"""
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.5 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
| apache-2.0 |
Shrews/PyGerrit | webapp/django/db/__init__.py | 15 | 2673 | import os
from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import curry
__all__ = ('backend', 'connection', 'DatabaseError', 'IntegrityError')
if not settings.DATABASE_ENGINE:
settings.DATABASE_ENGINE = 'dummy'
try:
# Most of the time, the database backend will be one of the official
# backends that ships with Django, so look there first.
_import_path = 'django.db.backends.'
backend = __import__('%s%s.base' % (_import_path, settings.DATABASE_ENGINE), {}, {}, [''])
except ImportError, e:
# If the import failed, we might be looking for a database backend
# distributed external to Django. So we'll try that next.
try:
_import_path = ''
backend = __import__('%s.base' % settings.DATABASE_ENGINE, {}, {}, [''])
except ImportError, e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(__path__[0], 'backends')
try:
available_backends = [f for f in os.listdir(backend_dir) if not f.startswith('_') and not f.startswith('.') and not f.endswith('.py') and not f.endswith('.pyc')]
except EnvironmentError:
available_backends = []
available_backends.sort()
if settings.DATABASE_ENGINE not in available_backends:
raise ImproperlyConfigured, "%r isn't an available database backend. Available options are: %s\nError was: %s" % \
(settings.DATABASE_ENGINE, ", ".join(map(repr, available_backends)), e_user)
else:
raise # If there's some other error, this must be an error in Django itself.
# Convenient aliases for backend bits.
connection = backend.DatabaseWrapper(**settings.DATABASE_OPTIONS)
DatabaseError = backend.DatabaseError
IntegrityError = backend.IntegrityError
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
connection.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
connection.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connection
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
try:
transaction.rollback_unless_managed()
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
| apache-2.0 |
hwuiwon/namebench | nb_third_party/dns/rdtypes/IN/SRV.py | 248 | 3396 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.name
class SRV(dns.rdata.Rdata):
"""SRV record
@ivar priority: the priority
@type priority: int
@ivar weight: the weight
@type weight: int
@ivar port: the port of the service
@type port: int
@ivar target: the target host
@type target: dns.name.Name object
@see: RFC 2782"""
__slots__ = ['priority', 'weight', 'port', 'target']
def __init__(self, rdclass, rdtype, priority, weight, port, target):
super(SRV, self).__init__(rdclass, rdtype)
self.priority = priority
self.weight = weight
self.port = port
self.target = target
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return '%d %d %d %s' % (self.priority, self.weight, self.port,
target)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
priority = tok.get_uint16()
weight = tok.get_uint16()
port = tok.get_uint16()
target = tok.get_name(None)
target = target.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, priority, weight, port, target)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
file.write(three_ints)
self.target.to_wire(file, compress, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(priority, weight, port) = struct.unpack('!HHH',
wire[current : current + 6])
current += 6
rdlen -= 6
(target, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
target = target.relativize(origin)
return cls(rdclass, rdtype, priority, weight, port, target)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.target = self.target.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!HHH", self.priority, self.weight, self.port)
op = struct.pack("!HHH", other.priority, other.weight, other.port)
v = cmp(sp, op)
if v == 0:
v = cmp(self.target, other.target)
return v
| apache-2.0 |
zhangqi007/ZQ | Tools/autotest/pysim/fg_display.py | 229 | 1919 | #!/usr/bin/env python
import socket, struct, time, math, errno
from pymavlink import fgFDM
class udp_socket(object):
'''a UDP socket'''
def __init__(self, device, blocking=True, input=True):
a = device.split(':')
if len(a) != 2:
print("UDP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if input:
self.port.bind((a[0], int(a[1])))
self.destination_addr = None
else:
self.destination_addr = (a[0], int(a[1]))
if not blocking:
self.port.setblocking(0)
self.last_address = None
def recv(self,n=1000):
try:
data, self.last_address = self.port.recvfrom(n)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return ""
raise
return data
def write(self, buf):
try:
if self.destination_addr:
self.port.sendto(buf, self.destination_addr)
else:
self.port.sendto(buf, self.last_addr)
except socket.error:
pass
def ft2m(x):
return x * 0.3048
def m2ft(x):
return x / 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
udp = udp_socket("127.0.0.1:5123")
fgout = udp_socket("127.0.0.1:5124", input=False)
tlast = time.time()
count = 0
fg = fgFDM.fgFDM()
while True:
buf = udp.recv(1000)
fg.parse(buf)
fgout.write(fg.pack())
count += 1
if time.time() - tlast > 1.0:
print("%u FPS len=%u" % (count, len(buf)))
count = 0
tlast = time.time()
print(fg.get('latitude', units='degrees'),
fg.get('longitude', units='degrees'),
fg.get('altitude', units='meters'),
fg.get('vcas', units='mps'))
| gpl-3.0 |
hifly/OpenUpgrade | addons/email_template/__openerp__.py | 260 | 3068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name' : 'Email Templates',
'version' : '1.1',
'author' : 'OpenERP SA',
'website' : 'https://www.odoo.com/page/mailing',
'category' : 'Marketing',
'depends' : ['mail'],
'description': """
Email Templating (simplified version of the original Power Email by Openlabs).
==============================================================================
Lets you design complete email templates related to any OpenERP document (Sale
Orders, Invoices and so on), including sender, recipient, subject, body (HTML and
Text). You may also automatically attach files to your templates, or print and
attach a report.
For advanced use, the templates may include dynamic attributes of the document
they are related to. For example, you may use the name of a Partner's country
when writing to them, also providing a safe default in case the attribute is
not defined. Each template contains a built-in assistant to help with the
inclusion of these dynamic values.
If you enable the option, a composition assistant will also appear in the sidebar
of the OpenERP documents to which the template applies (e.g. Invoices).
This serves as a quick way to send a new email based on the template, after
reviewing and adapting the contents, if needed.
This composition assistant will also turn into a mass mailing system when called
for multiple documents at once.
These email templates are also at the heart of the marketing campaign system
(see the ``marketing_campaign`` application), if you need to automate larger
campaigns on any OpenERP document.
**Technical note:** only the templating system of the original Power Email by Openlabs was kept.
""",
'data': [
'wizard/email_template_preview_view.xml',
'email_template_view.xml',
'res_partner_view.xml',
'ir_actions_view.xml',
'wizard/mail_compose_message_view.xml',
'security/ir.model.access.csv'
],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jimi-c/ansible | test/units/modules/net_tools/nios/test_nios_mx_record.py | 11 | 5102 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_mx_record
from ansible.module_utils.net_tools.nios import api
from ansible.compat.tests.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosMXRecordModule(TestNiosModule):
module = nios_mx_record
def setUp(self):
super(TestNiosMXRecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_mx_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosMXRecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_mx_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com',
'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'mx': 'mailhost.ansible.com', 'preference': 0})
def test_nios_mx_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"preference": 0,
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_mx_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': None, 'extattrs': None}
ref = "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
| gpl-3.0 |
gojira/tensorflow | tensorflow/python/kernel_tests/random/multinomial_op_test.py | 20 | 10106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import timeit
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def composed_sampler(logits, num_samples):
# [batch size, num classes, num samples]
unif = random_ops.random_uniform(logits.get_shape().concatenate(
tensor_shape.TensorShape([num_samples])))
noise = -math_ops.log(-math_ops.log(unif))
# [batch size, num classes, 1]
logits = array_ops.expand_dims(logits, -1)
# [batch size, num samples]
return math_ops.argmax(logits + noise, axis=1)
native_sampler = random_ops.multinomial
class MultinomialTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:
with test_util.device(use_gpu=True):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(
logits, num_samples, output_dtype=output_dtype))
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
def testOneOpMultipleStepsIndependent(self):
with self.test_session(use_gpu=True) as sess:
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = sess.run(sample_op1)
sample1b = sess.run(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
with context.eager_mode(), test_util.device(use_gpu=True):
sample1, sample2 = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
def testTwoOpsIndependent(self):
with self.test_session(use_gpu=True) as sess:
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = sess.run([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
def testTwoOpsSameSeedDrawSameSequences(self):
with self.test_session(use_gpu=True) as sess:
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = sess.run([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with self.test_session(use_gpu=True):
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = random_ops.multinomial(logits, 10).eval()
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]])
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.test_session(use_gpu=True) as sess:
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = sess.run(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with self.test_session(use_gpu=True):
for batch in 0, 3:
for samples in 0, 7:
x = random_ops.multinomial(
array_ops.zeros([batch, classes]), samples).eval()
self.assertEqual(x.shape, (batch, samples))
def testEmptyClasses(self):
with self.test_session(use_gpu=True):
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
x.eval()
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with self.test_session(use_gpu=True):
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = random_ops.multinomial(logits, num_samples).eval()
self.assertAllEqual([[1023] * num_samples], samples)
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
class MultinomialBenchmark(test.Benchmark):
def benchmarkNativeOpVsComposedOps(self):
num_iters = 50
print("Composition of existing ops vs. Native Multinomial op [%d iters]" %
num_iters)
print("BatchSize\tNumClasses\tNumSamples\tsec(composed)\tsec(native)\t"
"speedup")
for batch_size in [32, 128]:
for num_classes in [10000, 100000]:
for num_samples in [1, 4, 32]:
n_dt, c_dt = native_op_vs_composed_ops(batch_size, num_classes,
num_samples, num_iters)
print("%d\t%d\t%d\t%.3f\t%.3f\t%.2f" % (batch_size, num_classes,
num_samples, c_dt, n_dt,
c_dt / n_dt))
self.report_benchmark(
name="native_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=n_dt)
self.report_benchmark(
name="composed_batch%d_classes%d_s%d" %
(batch_size, num_classes, num_samples),
iters=num_iters,
wall_time=c_dt)
if __name__ == "__main__":
test.main()
| apache-2.0 |
felixma/nova | nova/scheduler/chance.py | 36 | 2777 | # Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova.scheduler import driver
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
def _filter_hosts(self, request_spec, hosts, filter_properties):
"""Filter a list of hosts based on request_spec."""
ignore_hosts = filter_properties.get('ignore_hosts', [])
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, request_spec, filter_properties):
"""Picks a host that is up at random."""
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
hosts = self._filter_hosts(request_spec, hosts, filter_properties)
if not hosts:
msg = _("Could not find another compute")
raise exception.NoValidHost(reason=msg)
return random.choice(hosts)
def select_destinations(self, context, request_spec, filter_properties):
"""Selects random destinations."""
num_instances = request_spec['num_instances']
# NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
# 'limits' as keys for compatibility with filter_scheduler.
dests = []
for i in range(num_instances):
host = self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
host_state = dict(host=host, nodename=None, limits=None)
dests.append(host_state)
if len(dests) < num_instances:
reason = _('There are not enough hosts available.')
raise exception.NoValidHost(reason=reason)
return dests
| apache-2.0 |
tidwall/summitdb | vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py | 1232 | 3478 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| mit |
sameetb-cuelogic/edx-platform-test | lms/djangoapps/instructor/tests/test_certificates.py | 13 | 9569 | """Tests for the certificates panel of the instructor dash. """
import contextlib
import ddt
import mock
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from config_models.models import cache
from courseware.tests.factories import GlobalStaffFactory, InstructorFactory
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
@ddt.ddt
class CertificatesInstructorDashTest(ModuleStoreTestCase):
"""Tests for the certificate panel of the instructor dash. """
ERROR_REASON = "An error occurred!"
DOWNLOAD_URL = "http://www.example.com/abcd123/cert.pdf"
def setUp(self):
super(CertificatesInstructorDashTest, self).setUp()
self.course = CourseFactory.create()
self.url = reverse(
'instructor_dashboard',
kwargs={'course_id': unicode(self.course.id)}
)
self.global_staff = GlobalStaffFactory()
self.instructor = InstructorFactory(course_key=self.course.id)
# Need to clear the cache for model-based configuration
cache.clear()
# Enable the certificate generation feature
CertificateGenerationConfiguration.objects.create(enabled=True)
def test_visible_only_to_global_staff(self):
# Instructors don't see the certificates section
self.client.login(username=self.instructor.username, password="test")
self._assert_certificates_visible(False)
# Global staff can see the certificates section
self.client.login(username=self.global_staff.username, password="test")
self._assert_certificates_visible(True)
def test_visible_only_when_feature_flag_enabled(self):
# Disable the feature flag
CertificateGenerationConfiguration.objects.create(enabled=False)
cache.clear()
# Now even global staff can't see the certificates section
self.client.login(username=self.global_staff.username, password="test")
self._assert_certificates_visible(False)
@ddt.data("started", "error", "success")
def test_show_certificate_status(self, status):
self.client.login(username=self.global_staff.username, password="test")
with self._certificate_status("honor", status):
self._assert_certificate_status("honor", status)
def test_show_enabled_button(self):
self.client.login(username=self.global_staff.username, password="test")
# Initially, no example certs are generated, so
# the enable button should be disabled
self._assert_enable_certs_button_is_disabled()
with self._certificate_status("honor", "success"):
# Certs are disabled for the course, so the enable button should be shown
self._assert_enable_certs_button(True)
# Enable certificates for the course
certs_api.set_cert_generation_enabled(self.course.id, True)
# Now the "disable" button should be shown
self._assert_enable_certs_button(False)
def test_can_disable_even_after_failure(self):
self.client.login(username=self.global_staff.username, password="test")
with self._certificate_status("honor", "error"):
# When certs are disabled for a course, then don't allow them
# to be enabled if certificate generation doesn't complete successfully
certs_api.set_cert_generation_enabled(self.course.id, False)
self._assert_enable_certs_button_is_disabled()
# However, if certificates are already enabled, allow them
# to be disabled even if an error has occurred
certs_api.set_cert_generation_enabled(self.course.id, True)
self._assert_enable_certs_button(False)
def _assert_certificates_visible(self, is_visible):
"""Check that the certificates section is visible on the instructor dash. """
response = self.client.get(self.url)
if is_visible:
self.assertContains(response, "Certificates")
else:
self.assertNotContains(response, "Certificates")
@contextlib.contextmanager
def _certificate_status(self, description, status):
"""Configure the certificate status by mocking the certificates API. """
patched = 'instructor.views.instructor_dashboard.certs_api.example_certificates_status'
with mock.patch(patched) as certs_api_status:
cert_status = [{
'description': description,
'status': status
}]
if status == 'error':
cert_status[0]['error_reason'] = self.ERROR_REASON
if status == 'success':
cert_status[0]['download_url'] = self.DOWNLOAD_URL
certs_api_status.return_value = cert_status
yield
def _assert_certificate_status(self, cert_name, expected_status):
"""Check the certificate status display on the instructor dash. """
response = self.client.get(self.url)
if expected_status == 'started':
expected = 'Generating example {name} certificate'.format(name=cert_name)
self.assertContains(response, expected)
elif expected_status == 'error':
expected = self.ERROR_REASON
self.assertContains(response, expected)
elif expected_status == 'success':
expected = self.DOWNLOAD_URL
self.assertContains(response, expected)
else:
self.fail("Invalid certificate status: {status}".format(status=expected_status))
def _assert_enable_certs_button_is_disabled(self):
"""Check that the "enable student-generated certificates" button is disabled. """
response = self.client.get(self.url)
expected_html = '<button class="is-disabled" disabled>Enable Student-Generated Certificates</button>'
self.assertContains(response, expected_html)
def _assert_enable_certs_button(self, is_enabled):
"""Check whether the button says "enable" or "disable" cert generation. """
response = self.client.get(self.url)
expected_html = (
'Enable Student-Generated Certificates' if is_enabled
else 'Disable Student-Generated Certificates'
)
self.assertContains(response, expected_html)
@override_settings(CERT_QUEUE='certificates')
@ddt.ddt
class CertificatesInstructorApiTest(ModuleStoreTestCase):
"""Tests for the certificates end-points in the instructor dash API. """
def setUp(self):
super(CertificatesInstructorApiTest, self).setUp()
self.course = CourseFactory.create()
self.global_staff = GlobalStaffFactory()
self.instructor = InstructorFactory(course_key=self.course.id)
# Enable certificate generation
cache.clear()
CertificateGenerationConfiguration.objects.create(enabled=True)
@ddt.data('generate_example_certificates', 'enable_certificate_generation')
def test_allow_only_global_staff(self, url_name):
url = reverse(url_name, kwargs={'course_id': self.course.id})
# Instructors do not have access
self.client.login(username=self.instructor.username, password='test')
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
# Global staff have access
self.client.login(username=self.global_staff.username, password='test')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
def test_generate_example_certificates(self):
self.client.login(username=self.global_staff.username, password='test')
url = reverse(
'generate_example_certificates',
kwargs={'course_id': unicode(self.course.id)}
)
response = self.client.post(url)
# Expect a redirect back to the instructor dashboard
self._assert_redirects_to_instructor_dash(response)
# Expect that certificate generation started
# Cert generation will fail here because XQueue isn't configured,
# but the status should at least not be None.
status = certs_api.example_certificates_status(self.course.id)
self.assertIsNot(status, None)
@ddt.data(True, False)
def test_enable_certificate_generation(self, is_enabled):
self.client.login(username=self.global_staff.username, password='test')
url = reverse(
'enable_certificate_generation',
kwargs={'course_id': unicode(self.course.id)}
)
params = {'certificates-enabled': 'true' if is_enabled else 'false'}
response = self.client.post(url, data=params)
# Expect a redirect back to the instructor dashboard
self._assert_redirects_to_instructor_dash(response)
# Expect that certificate generation is now enabled for the course
actual_enabled = certs_api.cert_generation_enabled(self.course.id)
self.assertEqual(is_enabled, actual_enabled)
def _assert_redirects_to_instructor_dash(self, response):
"""Check that the response redirects to the certificates section. """
expected_redirect = reverse(
'instructor_dashboard',
kwargs={'course_id': unicode(self.course.id)}
)
expected_redirect += '#view-certificates'
self.assertRedirects(response, expected_redirect)
| agpl-3.0 |
rogerwang/chromium | third_party/tlslite/tlslite/integration/XMLRPCTransport.py | 87 | 5799 | """TLS Lite + xmlrpclib."""
import xmlrpclib
import httplib
from tlslite.integration.HTTPTLSConnection import HTTPTLSConnection
from tlslite.integration.ClientHelper import ClientHelper
class XMLRPCTransport(xmlrpclib.Transport, ClientHelper):
"""Handles an HTTPS transaction to an XML-RPC server."""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new XMLRPCTransport.
An instance of this class can be passed to L{xmlrpclib.ServerProxy}
to use TLS with XML-RPC calls::
from tlslite.api import XMLRPCTransport
from xmlrpclib import ServerProxy
transport = XMLRPCTransport(user="alice", password="abra123")
server = ServerProxy("https://localhost", transport)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the
client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
http = HTTPTLSConnection(host, None,
self.username, self.password,
self.sharedKey,
self.certChain, self.privateKey,
self.checker.cryptoID,
self.checker.protocol,
self.checker.x509Fingerprint,
self.checker.x509TrustList,
self.checker.x509CommonName,
self.settings)
http2 = httplib.HTTP()
http2._setup(http)
return http2 | bsd-3-clause |
yinsu/grpc | src/python/grpcio/grpc/framework/base/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
waheedahmed/edx-platform | lms/djangoapps/certificates/management/commands/cert_whitelist.py | 73 | 4092 | """
Management command which sets or gets the certificate whitelist for a given
user/course
"""
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from certificates.models import CertificateWhitelist
from django.contrib.auth.models import User
def get_user_from_identifier(identifier):
"""
This function takes the string identifier and fetch relevant user object from database
"""
identifier = identifier.strip()
if '@' in identifier:
user = User.objects.get(email=identifier)
else:
user = User.objects.get(username=identifier)
return user
class Command(BaseCommand):
"""
Management command to set or get the certificate whitelist
for a given user(s)/course
"""
help = """
Sets or gets the certificate whitelist for a given
user(s)/course
Add a user or list of users to the whitelist for a course
$ ... cert_whitelist --add joe -c "MITx/6.002x/2012_Fall"
OR
$ ... cert_whitelist --add joe,jenny,tom,jerry -c "MITx/6.002x/2012_Fall"
Remove a user or list of users from the whitelist for a course
$ ... cert_whitelist --del joe -c "MITx/6.002x/2012_Fall"
OR
$ ... cert_whitelist --del joe,jenny,tom,jerry -c "MITx/6.002x/2012_Fall"
Print out who is whitelisted for a course
$ ... cert_whitelist -c "MITx/6.002x/2012_Fall"
"""
option_list = BaseCommand.option_list + (
make_option('-a', '--add',
metavar='USER',
dest='add',
default=False,
help='user or list of users to add to the certificate whitelist'),
make_option('-d', '--del',
metavar='USER',
dest='del',
default=False,
help='user or list of users to remove from the certificate whitelist'),
make_option('-c', '--course-id',
metavar='COURSE_ID',
dest='course_id',
default=False,
help="course id to query"),
)
def handle(self, *args, **options):
course_id = options['course_id']
if not course_id:
raise CommandError("You must specify a course-id")
def update_user_whitelist(username, add=True):
"""
Update the status of whitelist user(s)
"""
user = get_user_from_identifier(username)
cert_whitelist, _created = CertificateWhitelist.objects.get_or_create(
user=user, course_id=course
)
cert_whitelist.whitelist = add
cert_whitelist.save()
# try to parse the serialized course key into a CourseKey
try:
course = CourseKey.from_string(course_id)
except InvalidKeyError:
print(("Course id {} could not be parsed as a CourseKey; "
"falling back to SSCK.from_dep_str").format(course_id))
course = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if options['add'] and options['del']:
raise CommandError("Either remove or add a user, not both")
if options['add'] or options['del']:
user_str = options['add'] or options['del']
add_to_whitelist = True if options['add'] else False
users_list = user_str.split(",")
for username in users_list:
if username.strip():
update_user_whitelist(username, add=add_to_whitelist)
whitelist = CertificateWhitelist.objects.filter(course_id=course)
wl_users = '\n'.join(
"{u.user.username} {u.user.email} {u.whitelist}".format(u=u)
for u in whitelist
)
print("User whitelist for course {0}:\n{1}".format(course_id, wl_users))
| agpl-3.0 |
lenstr/rethinkdb | external/v8_3.30.33.16/build/gyp/test/mac/gyptest-debuginfo.py | 349 | 1152 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to debug information generation.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='debuginfo')
test.build('test.gyp', test.ALL, chdir='debuginfo')
test.built_file_must_exist('libnonbundle_shared_library.dylib.dSYM',
chdir='debuginfo')
test.built_file_must_exist('nonbundle_loadable_module.so.dSYM',
chdir='debuginfo')
test.built_file_must_exist('nonbundle_executable.dSYM',
chdir='debuginfo')
test.built_file_must_exist('bundle_shared_library.framework.dSYM',
chdir='debuginfo')
test.built_file_must_exist('bundle_loadable_module.bundle.dSYM',
chdir='debuginfo')
test.built_file_must_exist('My App.app.dSYM',
chdir='debuginfo')
test.pass_test()
| agpl-3.0 |
ict-felix/stack | modules/resource/manager/stitching-entity/src/delegate/geni/v3/base.py | 2 | 22134 | from core.config import ConfParser
from handler.geni.v3.extensions.geni.util import cred_util
from handler.geni.v3 import exceptions
from handler.geni.v3 import extensions
from lxml import etree
from lxml.builder import ElementMaker
from core import log
logger=log.getLogger('geniv3delegatebase')
import ast
import os
import urllib2
class GENIv3DelegateBase(object):
"""
Please find more information about the concept of Handlers and Delegates via the wiki (e.g. https://github.com/motine/AMsoil/wiki/GENI).
The GENIv3 handler (see above) assumes that this class uses RSpec version 3 when interacting with the client.
For creating new a new RSpec type/extension, please see the wiki via https://github.com/motine/AMsoil/wiki/RSpec.
General parameters for all following methods:
{client_cert} The client's certificate. See [flaskrpcs]XMLRPCDispatcher.requestCertificate(). Also see http://groups.geni.net/geni/wiki/GeniApiCertificates
{credentials} The a list of credentials in the format specified at http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#credentials
Dates are converted to UTC and then made timezone-unaware (see http://docs.python.org/2/library/datetime.html#datetime.datetime.astimezone).
"""
ALLOCATION_STATE_UNALLOCATED = 'geni_unallocated'
"""The sliver does not exist. (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverAllocationStates)"""
ALLOCATION_STATE_ALLOCATED = 'geni_allocated'
"""The sliver is offered/promissed, but it does not consume actual resources. This state shall time out at some point in time."""
ALLOCATION_STATE_PROVISIONED = 'geni_provisioned'
"""The sliver is/has been instanciated. Operational states apply here."""
OPERATIONAL_STATE_PENDING_ALLOCATION = 'geni_pending_allocation'
"""Required for aggregates to support. A transient state."""
OPERATIONAL_STATE_NOTREADY = 'geni_notready'
"""Optional. A stable state."""
OPERATIONAL_STATE_CONFIGURING = 'geni_configuring'
"""Optional. A transient state."""
OPERATIONAL_STATE_STOPPING = 'geni_stopping'
"""Optional. A transient state."""
OPERATIONAL_STATE_READY = 'geni_ready'
"""Optional. A stable state."""
OPERATIONAL_STATE_READY_BUSY = 'geni_ready_busy'
"""Optional. A transient state."""
OPERATIONAL_STATE_FAILED = 'geni_failed'
"""Optional. A stable state."""
OPERATIONAL_ACTION_START = 'geni_start'
"""Sliver shall become geni_ready. The AM developer may define more states (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverOperationalActions)"""
OPERATIONAL_ACTION_RESTART = 'geni_restart'
"""Sliver shall become geni_ready again."""
OPERATIONAL_ACTION_STOP = 'geni_stop'
"""Sliver shall become geni_notready."""
def __init__(self):
super(GENIv3DelegateBase, self).__init__()
self.config = ConfParser("geniv3.conf")
self.general_section = self.config.get("general")
self.certificates_section = self.config.get("certificates")
def get_request_extensions_list(self):
"""Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
return [uri for prefix, uri in self.get_request_extensions_mapping().items()]
def get_request_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and request extensions (XSD schema's URLs as string).
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def get_manifest_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and manifest extensions (XSD schema's URLs as string).
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def get_ad_extensions_list(self):
"""Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
return [uri for prefix, uri in self.get_ad_extensions_mapping().items()]
def get_ad_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and advertisement extensions (XSD schema URLs as string) to be sent back by GetVersion.
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def is_single_allocation(self):
"""Overwrite by AM developer. Shall return a True or False. When True (not default), and performing one of (Describe, Allocate, Renew, Provision, Delete), such an AM requires you to include either the slice urn or the urn of all the slivers in the same state.
see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
return False
def get_allocation_mode(self):
"""Overwrite by AM developer. Shall return a either 'geni_single', 'geni_disjoint', 'geni_many'.
It defines whether this AM allows adding slivers to slices at an AM (i.e. calling Allocate multiple times, without first deleting the allocated slivers).
For description of the options see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
return 'geni_single'
def list_resources(self, client_cert, credentials, geni_available):
"""Overwrite by AM developer. Shall return an RSpec version 3 (advertisement) or raise an GENIv3...Error.
If {geni_available} is set, only return availabe resources.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def describe(self, urns, client_cert, credentials):
"""Overwrite by AM developer. Shall return an RSpec version 3 (manifest) or raise an GENIv3...Error.
{urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def allocate(self, slice_urn, client_cert, credentials, rspec, end_time=None):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a RSpec version 3 (manifest) of newly allocated slivers
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'exceptionspires' : Python-Date,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx},
...]
Please return like so: "return respecs, slivers"
{slice_urn} contains a slice identifier (e.g. 'urn:publicid:IDN+ofelia:eict:gcf+slice+myslice').
{end_time} Optional. A python datetime object which determines the desired expiry date of this allocation (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
>>> This is the first part of what CreateSliver used to do in previous versions of the AM API. The second part is now done by Provision, and the final part is done by PerformOperationalAction.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Allocate"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def renew(self, urns, client_cert, credentials, expiration_time, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'exceptionspires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{expiration_time} is a python datetime object
{best_effort} determines if the method shall fail in case that not all of the urns can be renewed (best_effort=False).
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Renew"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def provision(self, urns, client_cert, credentials, best_effort, end_time, geni_users):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a RSpec version 3 (manifest) of slivers
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'exceptionspires' : Python-Date,
'geni_error' : optional String},
...]
Please return like so: "return respecs, slivers"
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{best_effort} determines if the method shall fail in case that not all of the urns can be provisioned (best_effort=False)
{end_time} Optional. A python datetime object which determines the desired expiry date of this provision (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
{geni_users} is a list of the format: [ { 'urn' : ..., 'keys' : [sshkey, ...]}, ...]
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Provision"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def status(self, urns, client_cert, credentials):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a slice urn
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'exceptionspires' : Python-Date,
'geni_error' : optional String},
...]
Please return like so: "return slice_urn, slivers"
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Status"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def perform_operational_action(self, urns, client_cert, credentials, action, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'exceptionspires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice or sliver identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{action} an arbitraty string, but the following should be possible: "geni_start", "geni_stop", "geni_restart"
{best_effort} determines if the method shall fail in case that not all of the urns can be changed (best_effort=False)
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#PerformOperationalAction"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def delete(self, urns, client_cert, credentials, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'exceptionspires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{best_effort} determines if the method shall fail in case that not all of the urns can be deleted (best_effort=False)
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Delete"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def shutdown(self, slice_urn, client_cert, credentials):
"""Overwrite by AM developer.
Shall return True or False or raise an GENIv3...Error.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Shutdown"""
raise exceptions.GENIv3GeneralError("Method not implemented yet")
def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
"""
This method authenticates and authorizes.
It returns the client's urn, uuid, email (extracted from the {client_cert}). Example call: "urn, uuid, email = self.auth(...)"
Be aware, the email is not required in the certificate, hence it might be empty.
If the validation fails, an GENIv3ForbiddenError is thrown.
The credentials are checked so the user has all the required privileges (success if any credential fits all privileges).
The client certificate is not checked: this is usually done via the webserver configuration.
This method only treats certificates of type 'geni_sfa'.
Here a list of possible privileges (format: right_in_credential: [privilege1, privilege2, ...]):
"authority" : ["register", "remove", "update", "resolve", "list", "getcredential", "*"],
"refresh" : ["remove", "update"],
"resolve" : ["resolve", "list", "getcredential"],
"sa" : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "deleteslice", "deletesliver", "updateslice",
"getsliceresources", "getticket", "loanresources", "stopslice", "startslice", "renewsliver",
"deleteslice", "deletesliver", "resetslice", "listslices", "listnodes", "getpolicy", "sliverstatus"],
"embed" : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "renewsliver", "deleteslice",
"deletesliver", "updateslice", "sliverstatus", "getsliceresources", "shutdown"],
"bind" : ["getticket", "loanresources", "redeemticket"],
"control" : ["updateslice", "createslice", "createsliver", "renewsliver", "sliverstatus", "stopslice", "startslice",
"deleteslice", "deletesliver", "resetslice", "getsliceresources", "getgids"],
"info" : ["listslices", "listnodes", "getpolicy"],
"ma" : ["setbootstate", "getbootstate", "reboot", "getgids", "gettrustedcerts"],
"operator" : ["gettrustedcerts", "getgids"],
"*" : ["createsliver", "deletesliver", "sliverstatus", "renewsliver", "shutdown"]
When using the gcf clearinghouse implementation the credentials will have the rights:
- user: "refresh", "resolve", "info" (which resolves to the privileges: "remove", "update", "resolve", "list", "getcredential", "listslices", "listnodes", "getpolicy").
- slice: "refresh", "embed", "bind", "control", "info" (well, do the resolving yourself...)
"""
# check variables
if not isinstance(privileges, tuple):
raise TypeError("Privileges need to be a tuple.")
# collect credentials (only GENI certs, version ignored)
geni_credentials = []
for c in credentials:
if c['geni_type'] == 'geni_sfa':
geni_credentials.append(c['geni_value'])
# Get the cert_root from the configuration settings
root_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../../../../"))
cert_root = os.path.join(root_path, self.certificates_section.get("cert_root"))
logger.debug("client_certificate trusted, present at: %s" % str(cert_root))
logger.debug("client_certificate:\n%s" % str(client_cert))
if client_cert == None:
raise exceptions.GENIv3ForbiddenError("Could not determine the client SSL certificate")
# test the credential
try:
cred_verifier = extensions.geni.util.cred_util.CredentialVerifier(cert_root)
cred_verifier.verify_from_strings(client_cert, geni_credentials, slice_urn, privileges)
except Exception as e:
raise exceptions.GENIv3ForbiddenError(str(e))
user_gid = extensions.sfa.trust.gid.GID(string=client_cert)
user_urn = user_gid.get_urn()
user_uuid = user_gid.get_uuid()
user_email = user_gid.get_email()
return user_urn, user_uuid, user_email # TODO document return
def urn_type(self, urn):
"""Returns the type of the urn (e.g. slice, sliver).
For the possible types see: http://groups.geni.net/geni/wiki/GeniApiIdentifiers#ExamplesandUsage"""
return urn.split('+')[2].strip()
def lxml_ad_root(self):
"""Returns a xml root node with the namespace extensions specified by self.get_ad_extensions_mapping."""
return etree.Element('rspec', self.get_ad_extensions_mapping(), type='advertisement')
def lxml_manifest_root(self):
"""Returns a xml root node with the namespace extensions specified by self.get_manifest_extensions_mapping."""
return etree.Element('rspec', self.get_manifest_extensions_mapping(), type='manifest')
def lxml_to_string(self, rspec):
"""Converts a lxml root node to string (for returning to the client)."""
return etree.tostring(rspec, pretty_print=True)
def lxml_ad_element_maker(self, prefix):
"""Returns a lxml.builder.ElementMaker configured for avertisements and the namespace given by {prefix}."""
ext = self.get_ad_extensions_mapping()
return ElementMaker(namespace=ext[prefix], nsmap=ext)
def lxml_manifest_element_maker(self, prefix):
"""Returns a lxml.builder.ElementMaker configured for manifests and the namespace given by {prefix}."""
ext = self.get_manifest_extensions_mapping()
return ElementMaker(namespace=ext[prefix], nsmap=ext)
def lxml_parse_rspec(self, rspec_string):
"""Returns a the root element of the given {rspec_string} as lxml.Element.
If the config key is set, the rspec is validated with the schemas found at the URLs specified in schemaLocation of the the given RSpec."""
# parse
rspec_root = etree.fromstring(rspec_string)
# validate RSpec against specified schemaLocations
should_validate = ast.literal_eval(self.general_section.get("rspec_validation"))
if should_validate:
schema_locations = rspec_root.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
if schema_locations:
schema_location_list = schema_locations.split(" ")
schema_location_list = map(lambda x: x.strip(), schema_location_list) # strip whitespaces
for sl in schema_location_list:
try:
xmlschema_contents = urllib2.urlopen(sl) # try to download the schema
xmlschema_doc = etree.parse(xmlschema_contents)
xmlschema = etree.XMLSchema(xmlschema_doc)
xmlschema.validate(rspec_root)
except Exception as e:
logger.warning("RSpec validation failed failed (%s: %s)" % (sl, str(e),))
else:
logger.warning("RSpec does not specify any schema locations")
return rspec_root
def lxml_elm_has_request_prefix(self, lxml_elm, ns_name):
return str(lxml_elm.tag).startswith("{%s}" % (self.get_request_extensions_mapping()[ns_name],))
def lxml_elm_equals_request_tag(self, lxml_elm, ns_name, tagname):
"""Determines if the given tag by {ns_name} and {tagname} equals lxml_tag. The namespace URI is looked up via get_request_extensions_mapping()['ns_name']"""
return ("{%s}%s" % (self.get_request_extensions_mapping()[ns_name], tagname)) == str(lxml_elm.tag)
| apache-2.0 |
astagi/taiga-back | tests/integration/test_throwttling.py | 20 | 4739 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.core.urlresolvers import reverse
from django.core.cache import cache
from taiga.base.utils import json
from .. import factories as f
pytestmark = pytest.mark.django_db
anon_rate_path = "taiga.base.throttling.AnonRateThrottle.get_rate"
user_rate_path = "taiga.base.throttling.UserRateThrottle.get_rate"
import_rate_path = "taiga.export_import.throttling.ImportModeRateThrottle.get_rate"
def test_anonimous_throttling_policy(client, settings):
f.create_project()
url = reverse("projects-list")
with mock.patch(anon_rate_path) as anon_rate, \
mock.patch(user_rate_path) as user_rate, \
mock.patch(import_rate_path) as import_rate:
anon_rate.return_value = "2/day"
user_rate.return_value = "4/day"
import_rate.return_value = "7/day"
cache.clear()
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 429
def test_user_throttling_policy(client, settings):
project = f.create_project()
f.MembershipFactory.create(project=project, user=project.owner, is_owner=True)
url = reverse("projects-detail", kwargs={"pk": project.pk})
client.login(project.owner)
with mock.patch(anon_rate_path) as anon_rate, \
mock.patch(user_rate_path) as user_rate, \
mock.patch(import_rate_path) as import_rate:
anon_rate.return_value = "2/day"
user_rate.return_value = "4/day"
import_rate.return_value = "7/day"
cache.clear()
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 200
response = client.json.get(url)
assert response.status_code == 429
client.logout()
def test_import_mode_throttling_policy(client, settings):
project = f.create_project()
f.MembershipFactory.create(project=project, user=project.owner, is_owner=True)
project.default_issue_type = f.IssueTypeFactory.create(project=project)
project.default_issue_status = f.IssueStatusFactory.create(project=project)
project.default_severity = f.SeverityFactory.create(project=project)
project.default_priority = f.PriorityFactory.create(project=project)
project.save()
url = reverse("importer-issue", args=[project.pk])
data = {
"subject": "Test"
}
client.login(project.owner)
with mock.patch(anon_rate_path) as anon_rate, \
mock.patch(user_rate_path) as user_rate, \
mock.patch(import_rate_path) as import_rate:
anon_rate.return_value = "2/day"
user_rate.return_value = "4/day"
import_rate.return_value = "7/day"
cache.clear()
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 201
response = client.json.post(url, json.dumps(data))
assert response.status_code == 429
client.logout()
| agpl-3.0 |
JGarcia-Panach/odoo | addons/base_iban/__openerp__.py | 260 | 1724 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'IBAN Bank Accounts',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module installs the base for IBAN (International Bank Account Number) bank accounts and checks for it's validity.
======================================================================================================================
The ability to extract the correctly represented local accounts from IBAN accounts
with a single statement.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'data': ['base_iban_data.xml' , 'base_iban_view.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
veusz/veusz | veusz/datasets/plugin.py | 1 | 6627 | # Copyright (C) 2016 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from .oned import Dataset1DBase, Dataset
from .twod import Dataset2DBase, Dataset2D
from .nd import DatasetNDBase, DatasetND
from .text import DatasetText
from .date import DatasetDateTimeBase, DatasetDateTime
class _DatasetPlugin:
"""Shared methods for dataset plugins."""
def __init__(self, manager, ds):
self.pluginmanager = manager
self.pluginds = ds
def getPluginData(self, attr):
self.pluginmanager.update()
return getattr(self.pluginds, attr)
def linkedInformation(self):
"""Return information about how this dataset was created."""
fields = []
for name, val in self.pluginmanager.fields.items():
fields.append('%s: %s' % (str(name), str(val)))
try:
shape = [str(x) for x in self.data.shape]
except AttributeError:
shape = [str(len(self.data))]
shape = '\u00d7'.join(shape)
return '%s plugin dataset (fields %s), size %s' % (
self.pluginmanager.plugin.name,
', '.join(fields),
shape)
def canUnlink(self):
"""Can relationship be unlinked?"""
return True
def deleteRows(self, row, numrows):
pass
def insertRows(self, row, numrows, rowdata):
pass
def saveDataRelationToText(self, fileobj, name):
"""Save plugin to file, if this is the first one."""
# only try to save if this is the 1st dataset of this plugin
# manager in the document, so that we don't save more than once
docdatasets = set( self.document.data.values() )
for ds in self.pluginmanager.veuszdatasets:
if ds in docdatasets:
if ds is self:
# is 1st dataset
self.pluginmanager.saveToFile(fileobj)
return
def saveDataDumpToText(self, fileobj, name):
"""Save data to text: not used."""
def saveDataDumpToHDF5(self, group, name):
"""Save data to HDF5: not used."""
@property
def dstype(self):
"""Return type of plugin."""
return self.pluginmanager.plugin.name
class Dataset1DPlugin(_DatasetPlugin, Dataset1DBase):
"""Return 1D dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
Dataset1DBase.__init__(self)
def userSize(self):
"""Size of dataset."""
return str( self.data.shape[0] )
def __getitem__(self, key):
"""Return a dataset based on this dataset
We override this from DatasetConcreteBase as it would return a
DatsetExpression otherwise, not chopped sets of data.
"""
return Dataset(**self._getItemHelper(key))
# parent class sets these attributes, so override setattr to do nothing
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
serr = property(
lambda self: self.getPluginData('serr'),
lambda self, val: None )
nerr = property(
lambda self: self.getPluginData('nerr'),
lambda self, val: None )
perr = property(
lambda self: self.getPluginData('perr'),
lambda self, val: None )
class Dataset2DPlugin(_DatasetPlugin, Dataset2DBase):
"""Return 2D dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
Dataset2DBase.__init__(self)
def __getitem__(self, key):
return Dataset2D(
self.data[key], xrange=self.xrange, yrange=self.yrange,
xedge=self.xedge, yedge=self.yedge,
xcent=self.xcent, ycent=self.ycent)
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
xrange = property(
lambda self: self.getPluginData('rangex'),
lambda self, val: None )
yrange = property(
lambda self: self.getPluginData('rangey'),
lambda self, val: None )
xedge = property(
lambda self: self.getPluginData('xedge'),
lambda self, val: None )
yedge = property(
lambda self: self.getPluginData('yedge'),
lambda self, val: None )
xcent = property(
lambda self: self.getPluginData('xcent'),
lambda self, val: None )
ycent = property(
lambda self: self.getPluginData('ycent'),
lambda self, val: None )
class DatasetNDPlugin(_DatasetPlugin, DatasetNDBase):
"""Return N-dimensional dataset from plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetNDBase.__init__(self)
def __getitem__(self, key):
return DatasetND(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
class DatasetTextPlugin(_DatasetPlugin, DatasetText):
"""Return text dataset from a plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetText.__init__(self, [])
def __getitem__(self, key):
return DatasetText(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
class DatasetDateTimePlugin(_DatasetPlugin, DatasetDateTimeBase):
"""Return date dataset from plugin."""
def __init__(self, manager, ds):
_DatasetPlugin.__init__(self, manager, ds)
DatasetDateTimeBase.__init__(self)
self.serr = self.perr = self.nerr = None
def __getitem__(self, key):
return DatasetDateTime(self.data[key])
data = property(
lambda self: self.getPluginData('data'),
lambda self, val: None )
| gpl-2.0 |
fusionbox/mezzanine | mezzanine/core/auth_backends.py | 9 | 1838 | from __future__ import unicode_literals
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.tokens import default_token_generator
from django.db.models import Q
from django.utils.http import base36_to_int
from mezzanine.utils.models import get_user_model
User = get_user_model()
class MezzanineBackend(ModelBackend):
"""
Extends Django's ``ModelBackend`` to allow login via username,
email, or verification token.
Args are either ``username`` and ``password``, or ``uidb36``
and ``token``. In either case, ``is_active`` can also be given.
For login, is_active is not given, so that the login form can
raise a specific error for inactive users.
For password reset, True is given for is_active.
For signup verficiation, False is given for is_active.
"""
def authenticate(self, **kwargs):
if kwargs:
username = kwargs.pop("username", None)
if username:
username_or_email = Q(username=username) | Q(email=username)
password = kwargs.pop("password", None)
try:
user = User.objects.get(username_or_email, **kwargs)
except User.DoesNotExist:
pass
else:
if user.check_password(password):
return user
else:
if 'uidb36' not in kwargs:
return
kwargs["id"] = base36_to_int(kwargs.pop("uidb36"))
token = kwargs.pop("token")
try:
user = User.objects.get(**kwargs)
except User.DoesNotExist:
pass
else:
if default_token_generator.check_token(user, token):
return user
| bsd-2-clause |
gratefulfrog/lib | python/pymol/internal.py | 1 | 21631 |
import cmd
import types
from pymol import _cmd
import threading
import traceback
import thread
import re
import time
import pymol
from chempy import io
from cmd import DEFAULT_ERROR, DEFAULT_SUCCESS, loadable, _load2str, Shortcut, \
is_string, is_ok
# cache management:
def _cache_validate(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
if not hasattr(_pymol,"_cache"):
_pymol._cache = []
if not hasattr(_pymol,"_cache_memory"):
_pymol._cache_memory = 0
finally:
_self.unlock_data(_self)
def _cache_clear(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_pymol._cache = []
_pymol._cache_memory = 0
finally:
_self.unlock_data(_self)
return r
def _cache_mark(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
for entry in _self._pymol._cache:
entry[5] = 0.0
finally:
_self.unlock_data(_self)
return r
def _cache_purge(max_size, _self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
if len(_pymol._cache):
cur_size = sum(x[0] for x in _pymol._cache)
if max_size>=0: # purge to reduce size
now = time.time()
# sort by last access time
new_cache = map(lambda x:[(now-x[5])/x[4],x], _pymol._cache)
new_cache.sort()
new_cache = map(lambda x:x[1],new_cache)
# remove oldest entries one by one until size requirement is met
while (cur_size>max_size) and (len(new_cache)>1):
entry = new_cache.pop()
cur_size = cur_size - entry[0]
_pymol._cache = new_cache
_pymol._cache_memory = cur_size
else: # purge to eliminate unused entries
new_cache = []
for entry in _pymol._cache:
if entry[5] == 0.0:
cur_size = cur_size - entry[0]
else:
new_cache.append(entry)
_pymol._cache = new_cache
_pymol._cache_memory = cur_size
result = _pymol._cache_memory
finally:
_self.unlock_data(_self)
return result
def _cache_get(target, hash_size = None, _self=cmd):
result = None
try:
_self.lock_data(_self)
try:
if hash_size == None:
hash_size = len(target[1])
key = target[1][0:hash_size]
# should optimize this with a dictionary lookup, key -> index in _cache
for entry in _self._pymol._cache:
if entry[1][0:hash_size] == key:
if entry[2] == target[2]:
while len(entry)<6:
entry.append(0)
entry[4] = entry[4] + 1 # access count
entry[5] = time.time() # timestamp
result = entry[3]
break
except:
traceback.print_exc()
finally:
_self.unlock_data(_self)
return result
def _cache_set(new_entry, max_size, _self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
try:
hash_size = len(new_entry[1])
key = new_entry[1][0:hash_size]
count = 0
found = 0
new_entry[4] = new_entry[4] + 1 # incr access count
new_entry[5] = time.time() # timestamp
for entry in _pymol._cache:
if entry[1][0:hash_size] == key:
if entry[2] == new_entry[2]: # dupe (shouldn't happen)
entry[3] = new_entry[3]
found = 1
break
count = count + 1
if not found:
_pymol._cache.append(new_entry)
_pymol._cache_memory = _pymol._cache_memory + new_entry[0]
if max_size > 0:
if _pymol._cache_memory > max_size:
_cache_purge(max_size, _self)
except:
traceback.print_exc()
finally:
_self.unlock_data(_self)
return r
# ray tracing threads
def _ray_anti_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
t = threading.Thread(target=_cmd.ray_anti_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_cmd.ray_anti_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _ray_hash_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
if a != None:
t = threading.Thread(target=_cmd.ray_hash_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
if thread_info[0] != None:
_cmd.ray_hash_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _ray_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
t = threading.Thread(target=_cmd.ray_trace_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_cmd.ray_trace_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _coordset_update_thread(list_lock,thread_info,_self=cmd):
# WARNING: internal routine, subject to change
while 1:
list_lock.acquire()
if not len(thread_info):
list_lock.release()
break
else:
info = thread_info.pop(0)
list_lock.release()
_cmd.coordset_update_thread(_self._COb,info)
def _coordset_update_spawn(thread_info,n_thread,_self=cmd):
# WARNING: internal routine, subject to change
if len(thread_info):
list_lock = threading.Lock() # mutex for list
thread_list = []
for a in range(1,n_thread):
t = threading.Thread(target=_coordset_update_thread,
args=(list_lock,thread_info))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_coordset_update_thread(list_lock,thread_info)
for t in thread_list:
t.join()
def _object_update_thread(list_lock,thread_info,_self=cmd):
# WARNING: internal routine, subject to change
while 1:
list_lock.acquire()
if not len(thread_info):
list_lock.release()
break
else:
info = thread_info.pop(0)
list_lock.release()
_cmd.object_update_thread(_self._COb,info)
def _object_update_spawn(thread_info,n_thread,_self=cmd):
# WARNING: internal routine, subject to change
if len(thread_info):
list_lock = threading.Lock() # mutex for list
thread_list = []
for a in range(1,n_thread):
t = threading.Thread(target=_object_update_thread,
args=(list_lock,thread_info))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_object_update_thread(list_lock,thread_info)
for t in thread_list:
t.join()
# status reporting
# do command (while API already locked)
def _do(cmmd,log=0,echo=1,_self=cmd):
return _cmd.do(_self._COb,cmmd,log,echo)
# movie rendering
def _mpng(prefix, first=-1, last=-1, preserve=0, modal=0,
format=-1, mode=-1, quiet=1, _self=cmd): # INTERNAL
import sys
format = int(format)
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
fname = prefix
if re.search("[0-9]*\.png$",fname): # remove numbering, etc.
fname = re.sub("[0-9]*\.png$","",fname)
if re.search("[0-9]*\.ppm$",fname):
if format<0:
format = 1 # PPM
fname = re.sub("[0-9]*\.ppm$","",fname)
if format<0:
format = 0 # default = PNG
fname = cmd.exp_path(fname)
r = _cmd.mpng_(_self._COb,str(fname),int(first),
int(last),int(preserve),int(modal),
format,int(mode),int(quiet))
finally:
_self.unlock(-1,_self)
return r
# copy image
def _copy_image(_self=cmd,quiet=1):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.copy_image(_self._COb,int(quiet))
finally:
_self.unlock(r,_self)
return r
# loading
def file_read(finfo):
'''
Read a file, possibly gzipped or bzipped, and return the
uncompressed file contents as a string.
finfo may be a filename, URL or open file handle.
'''
try:
if not isinstance(finfo, basestring):
handle = finfo
elif '://' in finfo:
import urllib2 as urllib
handle = urllib.urlopen(finfo)
else:
handle = open(finfo, 'rb')
contents = handle.read()
handle.close()
except IOError:
raise pymol.CmdException('failed to open file "%s"' % finfo)
if contents[:2] == '\x1f\x8b': # gzip magic number
import cStringIO, gzip
fakestream = cStringIO.StringIO(contents)
return gzip.GzipFile(fileobj=fakestream).read()
if contents[:2] == 'BZ' and contents[4:10] == '1AY&SY': # bzip magic
import bz2
return bz2.decompress(contents)
return contents
def _load(oname,finfo,state,ftype,finish,discrete,
quiet=1,multiplex=0,zoom=-1,mimic=1,
_self=cmd):
# WARNING: internal routine, subject to change
# caller must already hold API lock
# NOTE: state index assumes 1-based state
r = DEFAULT_ERROR
size = 0
if ftype not in (loadable.model,loadable.brick):
if ftype == loadable.r3d:
import cgo
obj = cgo.from_r3d(finfo)
if is_ok(obj):
r = _cmd.load_object(_self._COb,str(oname),obj,int(state)-1,loadable.cgo,
int(finish),int(discrete),int(quiet),
int(zoom))
else:
print "Load-Error: Unable to open file '%s'."%finfo
elif ftype == loadable.cc1: # ChemDraw 3D
obj = io.cc1.fromFile(finfo)
if obj:
r = _cmd.load_object(_self._COb,str(oname),obj,int(state)-1,loadable.model,
int(finish),int(discrete),
int(quiet),int(zoom))
elif ftype == loadable.moe:
try:
# BEGIN PROPRIETARY CODE SEGMENT
from epymol import moe
moe_str = file_read(finfo)
r = moe.read_moestr(moe_str,str(oname),int(state),
int(finish),int(discrete),int(quiet),int(zoom),_self=_self)
# END PROPRIETARY CODE SEGMENT
except ImportError:
print "Error: .MOE format not supported by this PyMOL build."
if _self._raising(-1,_self): raise pymol.CmdException
elif ftype == loadable.mae:
try:
# BEGIN PROPRIETARY CODE SEGMENT
from epymol import mae
mae_str = file_read(finfo)
r = mae.read_maestr(mae_str,str(oname),
int(state),
int(finish),int(discrete),
int(quiet),int(zoom),int(multiplex),
int(mimic),
_self=_self)
# END PROPRIETARY CODE SEGMENT
except ValueError:
print "Error: .MAE format not supported by this PyMOL build."
if _self._raising(-1,_self): raise pymol.CmdException
else:
if ftype in _load2str and ('://' in finfo or cmd.gz_ext_re.search(finfo)):
# NOTE: we could safely always do this, not only for URLs and
# compressed files. But I don't want to change the old behavior
# that regular files are read from the C function.
finfo = file_read(finfo)
ftype = _load2str[ftype]
r = _cmd.load(_self._COb,str(oname),finfo,int(state)-1,int(ftype),
int(finish),int(discrete),int(quiet),
int(multiplex),int(zoom))
else:
try:
x = io.pkl.fromFile(finfo)
if isinstance(x,types.ListType) or isinstance(x,types.TupleType):
for a in x:
r = _cmd.load_object(_self._COb,str(oname),a,int(state)-1,
int(ftype),0,int(discrete),int(quiet),
int(zoom))
if(state>0):
state = state + 1
_cmd.finish_object(_self._COb,str(oname))
else:
r = _cmd.load_object(_self._COb,str(oname),x,
int(state)-1,int(ftype),
int(finish),int(discrete),
int(quiet),int(zoom))
except:
# traceback.print_exc()
print "Load-Error: Unable to load file '%s'." % finfo
return r
# function keys and other specials
def _special(k,x,y,m=0,_self=cmd): # INTERNAL (invoked when special key is pressed)
pymol=_self._pymol
# WARNING: internal routine, subject to change
k=int(k)
m=int(m)
my_special = _self.special
if(m>0) and (m<5):
my_special = (_self.special,
_self.shft_special,
_self.ctrl_special,
_self.ctsh_special,
_self.alt_special)[m]
if my_special.has_key(k):
if my_special[k][1]:
apply(my_special[k][1],my_special[k][2],my_special[k][3])
else:
key = my_special[k][0]
if(m>0) and (m<5):
key = ('','SHFT-','CTRL-','CTSH-','ALT-')[m] + key
if pymol._scene_dict.has_key(key):
_self.scene(key)
elif is_string(pymol._scene_dict_sc.interpret(key+"-")):
_self.scene(pymol._scene_dict_sc[key+"-"])
elif pymol._view_dict.has_key(key):
_self.view(key)
elif is_string(pymol._view_dict_sc.interpret(key+"-")):
_self.view(pymol._view_dict_sc[key+"-"])
return None
# control keys
def _ctrl(k,_self=cmd):
# WARNING: internal routine, subject to change
if _self.ctrl.has_key(k):
ck = _self.ctrl[k]
if ck[0]!=None:
apply(ck[0],ck[1],ck[2])
return None
# alt keys
def _alt(k,_self=cmd):
# WARNING: internal routine, subject to change
if _self.alt.has_key(k):
ak = _self.alt[k]
if ak[0]!=None:
apply(ak[0],ak[1],ak[2])
return None
# command (apple) keys
def _cmmd(k,_self=cmd):
# WARNING: internal routine, subject to change
# command-key on macs
if _self.cmmd.has_key(k):
ak = _self.cmmd[k]
if ak[0]!=None:
apply(ak[0],ak[1],ak[2])
return None
def _ctsh(k,_self=cmd):
# WARNING: internal routine, subject to change
# command-key on macs
if _self.ctsh.has_key(k):
ak = _self.ctsh[k]
if ak[0]!=None:
apply(ak[0],ak[1],ak[2])
return None
# writing PNG files (thread-unsafe)
def _png(a,width=0,height=0,dpi=-1.0,ray=0,quiet=1,prior=0,format=-1,_self=cmd):
# INTERNAL - can only be safely called by GLUT thread (unless prior == 1)
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
fname = a
if re.search("\.ppm$",fname):
if format<0:
format = 1 # PPM
elif not re.search("\.png$",fname):
if a[0:1] != chr(1): # not an encoded file descriptor (integer)
fname = fname +".png"
if format<0:
format = 0 # PNG
fname = cmd.exp_path(fname)
r = _cmd.png(_self._COb,str(fname),int(width),int(height),
float(dpi),int(ray),int(quiet),int(prior),int(format))
finally:
_self.unlock(-1,_self)
return r
# quitting (thread-specific)
def _quit(code=0, _self=cmd):
pymol=_self._pymol
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
try: # flush and close log if possible to avoid threading exception
if pymol._log_file!=None:
try:
pymol._log_file.flush()
except:
pass
pymol._log_file.close()
del pymol._log_file
except:
pass
if _self.reaper!=None:
try:
_self.reaper.join()
except:
pass
r = _cmd.quit(_self._COb, int(code))
finally:
_self.unlock(-1,_self)
return r
# screen redraws (thread-specific)
def _refresh(swap_buffers=1,_self=cmd): # Only call with GLUT thread!
# WARNING: internal routine, subject to change
r = None
try:
_self.lock(_self)
if hasattr(_self._pymol,'glutThread'):
if thread.get_ident() == _self._pymol.glutThread:
if swap_buffers:
r = _cmd.refresh_now(_self._COb)
else:
r = _cmd.refresh(_self._COb)
else:
r = _cmd.refresh_later(_self._COb)
else:
r = _cmd.refresh_later(_self._COb)
finally:
_self.unlock(-1,_self)
return r
# stereo (platform dependent )
def _sgi_stereo(flag): # SGI-SPECIFIC - bad bad bad
import sys
# WARNING: internal routine, subject to change
if sys.platform[0:4]=='irix':
if os.path.exists("/usr/gfx/setmon"):
if flag:
mode = os.environ.get('PYMOL_SGI_STEREO','1024x768_96s')
os.system("/usr/gfx/setmon -n "+mode)
else:
mode = os.environ.get('PYMOL_SGI_MONO','72hz')
os.system("/usr/gfx/setmon -n "+mode)
# color alias interpretation
def _interpret_color(_self,color):
# WARNING: internal routine, subject to change
_validate_color_sc(_self)
new_color = _self.color_sc.interpret(color)
if new_color:
if is_string(new_color):
return new_color
else:
_self.color_sc.auto_err(color,'color')
else:
return color
def _validate_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
if _self.color_sc == None: # update color shortcuts if needed
lst = _self.get_color_indices()
lst.extend([('default',-1),('auto',-2),('current',-3),('atomic',-4)])
_self.color_sc = Shortcut(map(lambda x:x[0],lst))
color_dict = {}
for a in lst: color_dict[a[0]]=a[1]
def _invalidate_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
_self.color_sc = None
def _get_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
_validate_color_sc(_self=_self)
return _self.color_sc
def _get_feedback(_self=cmd): # INTERNAL
# WARNING: internal routine, subject to change
l = []
if _self.lock_attempt(_self):
try:
r = _cmd.get_feedback(_self._COb)
while r:
l.append(r)
r = _cmd.get_feedback(_self._COb)
finally:
_self.unlock(-1,_self)
else:
l = None
return l
get_feedback = _get_feedback # for legacy compatibility
def _fake_drag(_self=cmd): # internal
_self.lock(_self)
try:
_cmd.fake_drag(_self._COb)
finally:
_self.unlock(-1,_self)
return 1
def _sdof(tx,ty,tz,rx,ry,rz,_self=cmd):
_cmd._sdof(_self._COb,tx,ty,tz,rx,ry,rz)
# testing tools
# for comparing floating point numbers calculated using
# different FPUs and which may show some wobble...
def _dump_floats(lst,format="%7.3f",cnt=9):
# WARNING: internal routine, subject to change
c = cnt
for a in lst:
print format%a,
c = c -1
if c<=0:
print
c=cnt
if c!=cnt:
print
def _dump_ufloats(lst,format="%7.3f",cnt=9):
# WARNING: internal routine, subject to change
c = cnt
for a in lst:
print format%abs(a),
c = c -1
if c<=0:
print
c=cnt
if c!=cnt:
print
# HUH?
def _adjust_coord(a,i,x):
a.coord[i]=a.coord[i]+x
return None
| gpl-2.0 |
rh-lab-q/bkrdoc | bkrdoc/analysis/doc_information_representation.py | 1 | 1389 | #!/usr/bin/python
__author__ = 'Jiri_Kulda'
from bkrdoc.analysis import Option
class DocumentationInformation(object):
"""
This class contains data to describe every BeakerLib command
:param cmd_name: Command name
:param topic_object: Instance of Topic class
:param action: BeakerLib command action
:param importance: BeakerLib command importance
:param options: Instance of Option class
"""
command_name = ""
topic = ""
options = Option
action = []
importance = ""
def __init__(self, cmd_name, topic_object, action, importance, options=None):
if options is None:
self.options = Option()
else:
self.options = options
self.command_name = cmd_name
self.topic = topic_object
self.action = action
self.importance = importance
def get_topic(self):
return self.topic.get_topic()
def get_topic_subject(self):
return self.topic.get_subject()
def get_action(self):
return self.action
def get_importance(self):
return self.importance
def get_status(self):
return self.options.get_status()
def get_option(self):
return self.options.get_option()
def set_status(self, status):
self.options.set_status(status)
def get_command_name(self):
return self.command_name
| gpl-3.0 |
firerszd/kbengine | kbe/res/scripts/common/Lib/binhex.py | 89 | 13708 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import io
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
LINELEN = 64
RUNCHAR = b"\x90"
#
# This code is no longer byte-order dependent
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
with io.open(name, 'rb') as fp:
# Quick check for textfile
data = fp.read(512)
if 0 not in data:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return b''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
self.hqxdata = b''
self.linelen = LINELEN - 1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen // 3) * 3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last] + b'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + b':\n')
def close(self):
if self.data:
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
close_on_error = False
if isinstance(ofp, str):
ofname = ofp
ofp = io.open(ofname, 'wb')
close_on_error = True
try:
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
except:
if close_on_error:
ofp.close()
raise
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error('Filename too long')
d = bytes([nl]) + name.encode("latin-1") + b'\0'
tp, cr = finfo.Type, finfo.Creator
if isinstance(tp, str):
tp = tp.encode("latin-1")
if isinstance(cr, str):
cr = cr.encode("latin-1")
d2 = tp + cr
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error('Writing data at the wrong time')
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Writing resource data at the wrong time')
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Close at the wrong time')
if self.rlen != 0:
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""binhex(infilename, outfilename): create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = io.open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = b''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd + 2) // 3) * 4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while True:
try:
decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error('Premature EOF on binhex file')
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error('Premature EOF on binhex file')
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = b''
self.post_buffer = b''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd - len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1:] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
mark = mark - 2
elif self.pre_buffer[-2:-1] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if isinstance(ifp, str):
ifp = io.open(ifp, 'rb')
#
# Find initial colon.
#
while True:
ch = ifp.read(1)
if not ch:
raise Error("No binhex data found")
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == b'\r':
continue
if ch == b':':
break
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error('CRC error, computed %x, read %x'
% (self.crc, filecrc))
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error('Read data at wrong time')
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = b''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error('close_data at wrong time')
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error('Read resource data at wrong time')
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""hexbin(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = io.open(out, 'wb')
# XXXX Do translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while True:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
| lgpl-3.0 |
havard024/prego | crm/lib/python2.7/site-packages/django/template/debug.py | 110 | 3602 | from django.template.base import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
from django.utils.timezone import template_localtime
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source, msg):
e = TemplateSyntaxError(msg)
e.django_template_source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'django_template_source'):
e.django_template_source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
return node.render(context)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except UnicodeDecodeError:
return ''
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| mit |
preo/dnspython | dns/rdtypes/IN/A.py | 8 | 1906 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.ipv4
import dns.rdata
import dns.tokenizer
class A(dns.rdata.Rdata):
"""A record.
@ivar address: an IPv4 address
@type address: string (in the standard "dotted quad" format)"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(A, self).__init__(rdclass, rdtype)
# check that it's OK
junk = dns.ipv4.inet_aton(address)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return self.address
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_identifier()
tok.get_eol()
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + rdlen])
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
| isc |
bowang/tensorflow | tensorflow/python/lib/io/file_io.py | 32 | 16702 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import uuid
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'. Append 'b' for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) in bytes mode or 'n' bytes of the
string if in string (regular) mode.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
@deprecation.deprecated_args(
None,
"position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. offset is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
with errors.raise_exception_on_not_ok_status() as status:
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2."
.format(whence))
ret_status = self._read_buf.Seek(offset)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the file. Leaves the '\n' at the end."""
self._preread_check()
return self._prepare_value(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
self._preread_check()
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.FileExists(compat.as_bytes(filename), status)
except errors.NotFoundError:
return False
return True
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(filename), status)
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(single_filename), status)
]
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
Args:
dirname: string, name of the directory to be created
Notes:
The parent directories need to exist. Use recursive_create_dir instead if
there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CopyFile(
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RenameFile(
compat.as_bytes(oldname), compat.as_bytes(newname), overwrite, status)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
status = c_api_util.ScopedTFStatus()
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(dirname):
raise errors.NotFoundError(None, None, "Could not find directory")
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in pywrap_tensorflow.GetChildren(
compat.as_bytes(dirname), status)
]
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
top = compat.as_str_any(top)
try:
listing = list_directory(top)
except errors.NotFoundError:
return
files = []
subdirs = []
for item in listing:
full_path = os.path.join(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if in_order:
yield here
for subdir in subdirs:
for subitem in walk(os.path.join(top, subdir), in_order):
yield subitem
if not in_order:
yield here
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
file_statistics = pywrap_tensorflow.FileStatistics()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.Stat(compat.as_bytes(filename), file_statistics, status)
return file_statistics
| apache-2.0 |
petrutlucian94/cinder | cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py | 21 | 4243 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
from oslo_serialization import jsonutils
import webob
from cinder.api.contrib import extended_snapshot_attributes
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {'id': UUID1,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake',
'progress': '0%',
'expected_attrs': ['metadata']}
def fake_snapshot_get(self, context, snapshot_id):
param = _get_default_snapshot_param()
return param
def fake_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class ExtendedSnapshotAttributesTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-snapshot-attributes:'
def setUp(self):
super(ExtendedSnapshotAttributesTest, self).setUp()
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app())
return res
def _get_snapshot(self, body):
return jsonutils.loads(body).get('snapshot')
def _get_snapshots(self, body):
return jsonutils.loads(body).get('snapshots')
def assertSnapshotAttributes(self, snapshot, project_id, progress):
self.assertEqual(project_id,
snapshot.get('%sproject_id' % self.prefix))
self.assertEqual(progress, snapshot.get('%sprogress' % self.prefix))
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show(self, snapshot_get_by_id, volume_get_by_id,
snapshot_metadata_get):
ctx = context.RequestContext('fake', 'fake', auth_token=True)
snapshot = _get_default_snapshot_param()
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
url = '/v2/fake/snapshots/%s' % UUID1
res = self._make_request(url)
self.assertEqual(200, res.status_int)
self.assertSnapshotAttributes(self._get_snapshot(res.body),
project_id='fake',
progress='0%')
def test_detail(self):
url = '/v2/fake/snapshots/detail'
res = self._make_request(url)
self.assertEqual(200, res.status_int)
for snapshot in self._get_snapshots(res.body):
self.assertSnapshotAttributes(snapshot,
project_id='fake',
progress='0%')
class ExtendedSnapshotAttributesXmlTest(ExtendedSnapshotAttributesTest):
content_type = 'application/xml'
ext = extended_snapshot_attributes
prefix = '{%s}' % ext.Extended_snapshot_attributes.namespace
def _get_snapshot(self, body):
return etree.XML(body)
def _get_snapshots(self, body):
return etree.XML(body).getchildren()
| apache-2.0 |
sahiljain/catapult | third_party/google-endpoints/future/builtins/newnext.py | 70 | 2014 | '''
This module provides a newnext() function in Python 2 that mimics the
behaviour of ``next()`` in Python 3, falling back to Python 2's behaviour for
compatibility if this fails.
``newnext(iterator)`` calls the iterator's ``__next__()`` method if it exists. If this
doesn't exist, it falls back to calling a ``next()`` method.
For example:
>>> class Odds(object):
... def __init__(self, start=1):
... self.value = start - 2
... def __next__(self): # note the Py3 interface
... self.value += 2
... return self.value
... def __iter__(self):
... return self
...
>>> iterator = Odds()
>>> next(iterator)
1
>>> next(iterator)
3
If you are defining your own custom iterator class as above, it is preferable
to explicitly decorate the class with the @implements_iterator decorator from
``future.utils`` as follows:
>>> @implements_iterator
... class Odds(object):
... # etc
... pass
This next() function is primarily for consuming iterators defined in Python 3
code elsewhere that we would like to run on Python 2 or 3.
'''
_builtin_next = next
_SENTINEL = object()
def newnext(iterator, default=_SENTINEL):
"""
next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration.
"""
# args = []
# if default is not _SENTINEL:
# args.append(default)
try:
try:
return iterator.__next__()
except AttributeError:
try:
return iterator.next()
except AttributeError:
raise TypeError("'{0}' object is not an iterator".format(
iterator.__class__.__name__))
except StopIteration as e:
if default is _SENTINEL:
raise e
else:
return default
__all__ = ['newnext']
| bsd-3-clause |
jbbskinny/sympy | sympy/solvers/tests/test_numeric.py | 71 | 2053 | from sympy import Eq, Matrix, pi, sin, sqrt, Symbol, Integral, Piecewise, symbols
from mpmath import mnorm, mpf
from sympy.solvers import nsolve
from sympy.utilities.lambdify import lambdify
from sympy.utilities.pytest import raises, XFAIL
def test_nsolve():
# onedimensional
x = Symbol('x')
assert nsolve(sin(x), 2) - pi.evalf() < 1e-15
assert nsolve(Eq(2*x, 2), x, -10) == nsolve(2*x - 2, -10)
# Testing checks on number of inputs
raises(TypeError, lambda: nsolve(Eq(2*x, 2)))
raises(TypeError, lambda: nsolve(Eq(2*x, 2), x, 1, 2))
# issue 4829
assert nsolve(x**2/(1 - x)/(1 - 2*x)**2 - 100, x, 0) # doesn't fail
# multidimensional
x1 = Symbol('x1')
x2 = Symbol('x2')
f1 = 3 * x1**2 - 2 * x2**2 - 1
f2 = x1**2 - 2 * x1 + x2**2 + 2 * x2 - 8
f = Matrix((f1, f2)).T
F = lambdify((x1, x2), f.T, modules='mpmath')
for x0 in [(-1, 1), (1, -2), (4, 4), (-4, -4)]:
x = nsolve(f, (x1, x2), x0, tol=1.e-8)
assert mnorm(F(*x), 1) <= 1.e-10
# The Chinese mathematician Zhu Shijie was the very first to solve this
# nonlinear system 700 years ago (z was added to make it 3-dimensional)
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f1 = -x + 2*y
f2 = (x**2 + x*(y**2 - 2) - 4*y) / (x + 4)
f3 = sqrt(x**2 + y**2)*z
f = Matrix((f1, f2, f3)).T
F = lambdify((x, y, z), f.T, modules='mpmath')
def getroot(x0):
root = nsolve(f, (x, y, z), x0)
assert mnorm(F(*root), 1) <= 1.e-8
return root
assert list(map(round, getroot((1, 1, 1)))) == [2.0, 1.0, 0.0]
assert nsolve([Eq(
f1), Eq(f2), Eq(f3)], [x, y, z], (1, 1, 1)) # just see that it works
a = Symbol('a')
assert nsolve(1/(0.001 + a)**3 - 6/(0.9 - a)**3, a, 0.3).ae(
mpf('0.31883011387318591'))
def test_issue_6408():
x = Symbol('x')
assert nsolve(Piecewise((x, x < 1), (x**2, True)), x, 2) == 0.0
@XFAIL
def test_issue_6408_fail():
x, y = symbols('x y')
assert nsolve(Integral(x*y, (x, 0, 5)), y, 2) == 0.0
| bsd-3-clause |
Jgarcia-IAS/SAT | openerp/addons/hr_payroll/wizard/__init__.py | 442 | 1159 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wang1352083/pythontool | python-2.7.12-lib/test/test_pyexpat.py | 3 | 26380 | # XXX TypeErrors on calling handlers, or on bad return values from a
# handler, are obscure and unhelpful.
import StringIO, sys
import unittest
from xml.parsers import expat
from test import test_support
from test.test_support import sortdict, run_unittest
class SetAttributeTest(unittest.TestCase):
def setUp(self):
self.parser = expat.ParserCreate(namespace_separator='!')
def test_buffer_text(self):
self.assertIs(self.parser.buffer_text, False)
for x in 0, 1, 2, 0:
self.parser.buffer_text = x
self.assertIs(self.parser.buffer_text, bool(x))
def test_namespace_prefixes(self):
self.assertIs(self.parser.namespace_prefixes, False)
for x in 0, 1, 2, 0:
self.parser.namespace_prefixes = x
self.assertIs(self.parser.namespace_prefixes, bool(x))
def test_returns_unicode(self):
self.assertIs(self.parser.returns_unicode, test_support.have_unicode)
for x in 0, 1, 2, 0:
self.parser.returns_unicode = x
self.assertIs(self.parser.returns_unicode, bool(x))
def test_ordered_attributes(self):
self.assertIs(self.parser.ordered_attributes, False)
for x in 0, 1, 2, 0:
self.parser.ordered_attributes = x
self.assertIs(self.parser.ordered_attributes, bool(x))
def test_specified_attributes(self):
self.assertIs(self.parser.specified_attributes, False)
for x in 0, 1, 2, 0:
self.parser.specified_attributes = x
self.assertIs(self.parser.specified_attributes, bool(x))
def test_invalid_attributes(self):
with self.assertRaises(AttributeError):
self.parser.foo = 1
with self.assertRaises(AttributeError):
self.parser.foo
data = '''\
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<?xml-stylesheet href="stylesheet.css"?>
<!-- comment data -->
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
<!ELEMENT root ANY>
<!NOTATION notation SYSTEM "notation.jpeg">
<!ENTITY acirc "â">
<!ENTITY external_entity SYSTEM "entity.file">
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
%unparsed_entity;
]>
<root attr1="value1" attr2="value2ὀ">
<myns:subelement xmlns:myns="http://www.python.org/namespace">
Contents of subelements
</myns:subelement>
<sub2><![CDATA[contents of CDATA section]]></sub2>
&external_entity;
</root>
'''
# Produce UTF-8 output
class ParseTest(unittest.TestCase):
class Outputter:
def __init__(self):
self.out = []
def StartElementHandler(self, name, attrs):
self.out.append('Start element: ' + repr(name) + ' ' +
sortdict(attrs))
def EndElementHandler(self, name):
self.out.append('End element: ' + repr(name))
def CharacterDataHandler(self, data):
data = data.strip()
if data:
self.out.append('Character data: ' + repr(data))
def ProcessingInstructionHandler(self, target, data):
self.out.append('PI: ' + repr(target) + ' ' + repr(data))
def StartNamespaceDeclHandler(self, prefix, uri):
self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri))
def EndNamespaceDeclHandler(self, prefix):
self.out.append('End of NS decl: ' + repr(prefix))
def StartCdataSectionHandler(self):
self.out.append('Start of CDATA section')
def EndCdataSectionHandler(self):
self.out.append('End of CDATA section')
def CommentHandler(self, text):
self.out.append('Comment: ' + repr(text))
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
self.out.append('Notation declared: %s' %(args,))
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
self.out.append('Unparsed entity decl: %s' %(args,))
def NotStandaloneHandler(self, userData):
self.out.append('Not standalone')
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
self.out.append('External entity ref: %s' %(args[1:],))
return 1
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
handler_names = [
'StartElementHandler', 'EndElementHandler',
'CharacterDataHandler', 'ProcessingInstructionHandler',
'UnparsedEntityDeclHandler', 'NotationDeclHandler',
'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
'CommentHandler', 'StartCdataSectionHandler',
'EndCdataSectionHandler',
'DefaultHandler', 'DefaultHandlerExpand',
#'NotStandaloneHandler',
'ExternalEntityRefHandler'
]
def test_utf8(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
parser.returns_unicode = 0
parser.Parse(data, 1)
# Verify output
op = out.out
self.assertEqual(op[0], 'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: ' comment data '")
self.assertEqual(op[2], "Notation declared: ('notation', None, 'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')")
self.assertEqual(op[4], "Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}")
self.assertEqual(op[5], "NS decl: 'myns' 'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: 'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: 'Contents of subelements'")
self.assertEqual(op[8], "End element: 'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: 'myns'")
self.assertEqual(op[10], "Start element: 'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: 'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: 'sub2'")
self.assertEqual(op[15], "External entity ref: (None, 'entity.file', None)")
self.assertEqual(op[16], "End element: 'root'")
def test_unicode(self):
# Try the parse again, this time producing Unicode output
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
parser.Parse(data, 1)
op = out.out
self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: u' comment data '")
self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')")
self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}")
self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: u'Contents of subelements'")
self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: u'myns'")
self.assertEqual(op[10], "Start element: u'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: u'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: u'sub2'")
self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)")
self.assertEqual(op[16], "End element: u'root'")
def test_parse_file(self):
# Try parsing a file
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
parser.returns_unicode = 1
for name in self.handler_names:
setattr(parser, name, getattr(out, name))
file = StringIO.StringIO(data)
parser.ParseFile(file)
op = out.out
self.assertEqual(op[0], 'PI: u\'xml-stylesheet\' u\'href="stylesheet.css"\'')
self.assertEqual(op[1], "Comment: u' comment data '")
self.assertEqual(op[2], "Notation declared: (u'notation', None, u'notation.jpeg', None)")
self.assertEqual(op[3], "Unparsed entity decl: (u'unparsed_entity', None, u'entity.file', None, u'notation')")
self.assertEqual(op[4], "Start element: u'root' {u'attr1': u'value1', u'attr2': u'value2\\u1f40'}")
self.assertEqual(op[5], "NS decl: u'myns' u'http://www.python.org/namespace'")
self.assertEqual(op[6], "Start element: u'http://www.python.org/namespace!subelement' {}")
self.assertEqual(op[7], "Character data: u'Contents of subelements'")
self.assertEqual(op[8], "End element: u'http://www.python.org/namespace!subelement'")
self.assertEqual(op[9], "End of NS decl: u'myns'")
self.assertEqual(op[10], "Start element: u'sub2' {}")
self.assertEqual(op[11], 'Start of CDATA section')
self.assertEqual(op[12], "Character data: u'contents of CDATA section'")
self.assertEqual(op[13], 'End of CDATA section')
self.assertEqual(op[14], "End element: u'sub2'")
self.assertEqual(op[15], "External entity ref: (None, u'entity.file', None)")
self.assertEqual(op[16], "End element: u'root'")
# Issue 4877: expat.ParseFile causes segfault on a closed file.
fp = open(test_support.TESTFN, 'wb')
try:
fp.close()
parser = expat.ParserCreate()
with self.assertRaises(ValueError):
parser.ParseFile(fp)
finally:
test_support.unlink(test_support.TESTFN)
def test_parse_again(self):
parser = expat.ParserCreate()
file = StringIO.StringIO(data)
parser.ParseFile(file)
# Issue 6676: ensure a meaningful exception is raised when attempting
# to parse more than one XML document per xmlparser instance,
# a limitation of the Expat library.
with self.assertRaises(expat.error) as cm:
parser.ParseFile(file)
self.assertEqual(expat.ErrorString(cm.exception.code),
expat.errors.XML_ERROR_FINISHED)
class NamespaceSeparatorTest(unittest.TestCase):
def test_legal(self):
# Tests that make sure we get errors when the namespace_separator value
# is illegal, and that we don't for good values:
expat.ParserCreate()
expat.ParserCreate(namespace_separator=None)
expat.ParserCreate(namespace_separator=' ')
def test_illegal(self):
try:
expat.ParserCreate(namespace_separator=42)
self.fail()
except TypeError, e:
self.assertEqual(str(e),
'ParserCreate() argument 2 must be string or None, not int')
try:
expat.ParserCreate(namespace_separator='too long')
self.fail()
except ValueError, e:
self.assertEqual(str(e),
'namespace_separator must be at most one character, omitted, or None')
def test_zero_length(self):
# ParserCreate() needs to accept a namespace_separator of zero length
# to satisfy the requirements of RDF applications that are required
# to simply glue together the namespace URI and the localname. Though
# considered a wart of the RDF specifications, it needs to be supported.
#
# See XML-SIG mailing list thread starting with
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
#
expat.ParserCreate(namespace_separator='') # too short
class InterningTest(unittest.TestCase):
def test(self):
# Test the interning machinery.
p = expat.ParserCreate()
L = []
def collector(name, *args):
L.append(name)
p.StartElementHandler = collector
p.EndElementHandler = collector
p.Parse("<e> <e/> <e></e> </e>", 1)
tag = L[0]
self.assertEqual(len(L), 6)
for entry in L:
# L should have the same string repeated over and over.
self.assertTrue(tag is entry)
class BufferTextTest(unittest.TestCase):
def setUp(self):
self.stuff = []
self.parser = expat.ParserCreate()
self.parser.buffer_text = 1
self.parser.CharacterDataHandler = self.CharacterDataHandler
def check(self, expected, label):
self.assertEqual(self.stuff, expected,
"%s\nstuff = %r\nexpected = %r"
% (label, self.stuff, map(unicode, expected)))
def CharacterDataHandler(self, text):
self.stuff.append(text)
def StartElementHandler(self, name, attrs):
self.stuff.append("<%s>" % name)
bt = attrs.get("buffer-text")
if bt == "yes":
self.parser.buffer_text = 1
elif bt == "no":
self.parser.buffer_text = 0
def EndElementHandler(self, name):
self.stuff.append("</%s>" % name)
def CommentHandler(self, data):
self.stuff.append("<!--%s-->" % data)
def setHandlers(self, handlers=[]):
for name in handlers:
setattr(self.parser, name, getattr(self, name))
def test_default_to_disabled(self):
parser = expat.ParserCreate()
self.assertFalse(parser.buffer_text)
def test_buffering_enabled(self):
# Make sure buffering is turned on
self.assertTrue(self.parser.buffer_text)
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ['123'],
"buffered text not properly collapsed")
def test1(self):
# XXX This test exposes more detail of Expat's text chunking than we
# XXX like, but it tests what we need to concisely.
self.setHandlers(["StartElementHandler"])
self.parser.Parse("<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
"buffering control not reacting as expected")
def test2(self):
self.parser.Parse("<a>1<b/><2><c/> \n 3</a>", 1)
self.assertEqual(self.stuff, ["1<2> \n 3"],
"buffered text not properly collapsed")
def test3(self):
self.setHandlers(["StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff, ["<a>", "1", "<b>", "2", "<c>", "3"],
"buffered text not properly split")
def test4(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.CharacterDataHandler = None
self.parser.Parse("<a>1<b/>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"])
def test5(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.Parse("<a>1<b></b>2<c/>3</a>", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"])
def test6(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c></c>345</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
"buffered text not properly split")
def test7(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse("<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", 1)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
raise RuntimeError(name)
def test(self):
parser = expat.ParserCreate()
parser.StartElementHandler = self.StartElementHandler
try:
parser.Parse("<a><b><c/></b></a>", 1)
self.fail()
except RuntimeError, e:
self.assertEqual(e.args[0], 'a',
"Expected RuntimeError for element 'a', but" + \
" found %r" % e.args[0])
# Test Current* members:
class PositionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
self.check_pos('s')
def EndElementHandler(self, name):
self.check_pos('e')
def check_pos(self, event):
pos = (event,
self.parser.CurrentByteIndex,
self.parser.CurrentLineNumber,
self.parser.CurrentColumnNumber)
self.assertTrue(self.upto < len(self.expected_list),
'too many parser events')
expected = self.expected_list[self.upto]
self.assertEqual(pos, expected,
'Expected position %s, got position %s' %(pos, expected))
self.upto += 1
def test(self):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self.StartElementHandler
self.parser.EndElementHandler = self.EndElementHandler
self.upto = 0
self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2),
('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)]
xml = '<a>\n <b>\n <c/>\n </b>\n</a>'
self.parser.Parse(xml, 1)
class sf1296433Test(unittest.TestCase):
def test_parse_only_xml_data(self):
# http://python.org/sf/1296433
#
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025)
# this one doesn't crash
#xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000)
class SpecificException(Exception):
pass
def handler(text):
raise SpecificException
parser = expat.ParserCreate()
parser.CharacterDataHandler = handler
self.assertRaises(Exception, parser.Parse, xml)
class ChardataBufferTest(unittest.TestCase):
"""
test setting of chardata buffer size
"""
def test_1025_bytes(self):
self.assertEqual(self.small_buffer_test(1025), 2)
def test_1000_bytes(self):
self.assertEqual(self.small_buffer_test(1000), 1)
def test_wrong_size(self):
parser = expat.ParserCreate()
parser.buffer_text = 1
with self.assertRaises(ValueError):
parser.buffer_size = -1
with self.assertRaises(ValueError):
parser.buffer_size = 0
with self.assertRaises(TypeError):
parser.buffer_size = 512.0
with self.assertRaises(TypeError):
parser.buffer_size = sys.maxint+1
def test_unchanged_size(self):
xml1 = ("<?xml version='1.0' encoding='iso8859'?><s>%s" % ('a' * 512))
xml2 = 'a'*512 + '</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 512
parser.buffer_text = 1
# Feed 512 bytes of character data: the handler should be called
# once.
self.n = 0
parser.Parse(xml1)
self.assertEqual(self.n, 1)
# Reassign to buffer_size, but assign the same size.
parser.buffer_size = parser.buffer_size
self.assertEqual(self.n, 1)
# Try parsing rest of the document
parser.Parse(xml2)
self.assertEqual(self.n, 2)
def test_disabling_buffer(self):
xml1 = "<?xml version='1.0' encoding='iso8859'?><a>%s" % ('a' * 512)
xml2 = ('b' * 1024)
xml3 = "%s</a>" % ('c' * 1024)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
# Parse one chunk of XML
self.n = 0
parser.Parse(xml1, 0)
self.assertEqual(parser.buffer_size, 1024)
self.assertEqual(self.n, 1)
# Turn off buffering and parse the next chunk.
parser.buffer_text = 0
self.assertFalse(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
for i in range(10):
parser.Parse(xml2, 0)
self.assertEqual(self.n, 11)
parser.buffer_text = 1
self.assertTrue(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml3, 1)
self.assertEqual(self.n, 12)
def make_document(self, bytes):
return ("<?xml version='1.0'?><tag>" + bytes * 'a' + '</tag>')
def counting_handler(self, text):
self.n += 1
def small_buffer_test(self, buffer_len):
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * buffer_len)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 1024
parser.buffer_text = 1
self.n = 0
parser.Parse(xml)
return self.n
def test_change_size_1(self):
xml1 = "<?xml version='1.0' encoding='iso8859'?><a><s>%s" % ('a' * 1024)
xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
self.n = 0
parser.Parse(xml1, 0)
parser.buffer_size *= 2
self.assertEqual(parser.buffer_size, 2048)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 2)
def test_change_size_2(self):
xml1 = "<?xml version='1.0' encoding='iso8859'?><a>a<s>%s" % ('a' * 1023)
xml2 = "aaa</s><s>%s</s></a>" % ('a' * 1025)
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 2048
self.assertEqual(parser.buffer_size, 2048)
self.n=0
parser.Parse(xml1, 0)
parser.buffer_size //= 2
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml2, 1)
self.assertEqual(self.n, 4)
class MalformedInputText(unittest.TestCase):
def test1(self):
xml = "\0\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'unclosed token: line 2, column 0')
def test2(self):
xml = "<?xml version\xc2\x85='1.0'?>\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14')
class ForeignDTDTests(unittest.TestCase):
"""
Tests for the UseForeignDTD method of expat parser objects.
"""
def test_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
# test UseForeignDTD() is equal to UseForeignDTD(True)
handler_call_args[:] = []
parser = expat.ParserCreate()
parser.UseForeignDTD()
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
def test_ignore_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(
"<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
self.assertEqual(handler_call_args, [("bar", "baz")])
def test_main():
run_unittest(SetAttributeTest,
ParseTest,
NamespaceSeparatorTest,
InterningTest,
BufferTextTest,
HandlerExceptionTest,
PositionTest,
sf1296433Test,
ChardataBufferTest,
MalformedInputText,
ForeignDTDTests)
if __name__ == "__main__":
test_main()
| mit |
PabloPiaggi/lammps | tools/eff/lmp2radii.py | 54 | 3001 | #!/usr/local/bin/python-2.5/bin/python
Info="""
Module name: lmp2radii.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@wag.caltech.edu
Project: pEFF
Version: August 2009
Extracts the electron radii from a lammps trajectory dump of style custom:
dump 1 all custom period dump_file id type q spin eradius x y z...
NOTE: The radius must be the 5th column per trajectory entry in the dump file
"""
# import essentials:
import sys, os
from math import log10
from shutil import rmtree
from getopt import gnu_getopt as getopt
import numpy
def printHelp():
print Info
print "Usage: python lmp2radii.pyx test.lammpstrj\n"
return
def makeradii(infile):
print "Reading %s ... [WAIT]"%infile,
fin = open(infile,'r')
lines = fin.xreadlines()
print 7*"\b"+"[DONE]"
frame=0
radii=[]
# grep the number of frames and atoms/frame
os.system("grep TIMESTEP %s | wc -l > frames; grep -m 1 -A 1 ATOMS %s > atoms"%(infile,infile))
tmp=open("frames",'r')
frames=int(tmp.readline().split()[0])
tmp.close()
tmp=open("atoms",'r')
atoms=int(tmp.readlines()[1].split()[0])
tmp.close()
os.system("rm -rf frames atoms lines")
arry=numpy.zeros((atoms,frames),dtype=float)
framecnt=0
header=9
ecount=0
print "Extracting electron radii per frame from %s ... "%(infile),
for i,line in enumerate(lines):
lo=(atoms+header)*framecnt+header
hi=lo+atoms
if (i<lo):
continue
elif (i >= lo) and (i < hi):
lparse=line.split()
id=int(lparse[0])
r=float(lparse[4])
if (r!=0.0):
arry[id-1][framecnt]=r
if (framecnt==0): ecount+=1
if (i==lo+1):
sys.stdout.write("%d/%d%s"%(framecnt+1,frames,(int(log10(framecnt+1))+3+int(log10(frames)))*"\b"))
sys.stdout.flush()
if (i == hi+1):
framecnt+=1
print
print "Writing radii/frame table to %s ... "%(infile+'.out'),
sys.stdout.flush()
fout=open(infile+'.out','w')
for i in range(frames):
fout.writelines('\tF'+str(i))
fout.writelines("\n")
e=1
for a in range(atoms):
if arry[a][0] == 0.0: continue
else:
sys.stdout.write("%d/%d%s"%(e,ecount,(int(log10(e))+int(log10(ecount))+3)*"\b"))
sys.stdout.flush()
e+=1
fout.writelines("%d\t"%(a+1))
for f in range(frames):
fout.writelines("%f\t"%(arry[a][f]))
fout.writelines("\n")
print
print "Done !! (generated radii/frame table) \n"
fout.close()
fin.close()
if __name__ == '__main__':
# set defaults
# check for input:
opts, argv = getopt(sys.argv[1:], 'h')
# if no input, print help and exit
if len(argv) != 1:
printHelp()
sys.exit(1)
else:
infile=argv[0]
# read options
for opt, arg in opts:
if opt == '-h': # -h: print help
printHelp()
makeradii(infile)
| gpl-2.0 |
ronakkhunt/kuma | vendor/packages/nose/loader.py | 43 | 25526 | """
Test Loader
-----------
nose's test loader implements the same basic functionality as its
superclass, unittest.TestLoader, but extends it by more liberal
interpretations of what may be a test and how a test may be named.
"""
from __future__ import generators
import logging
import os
import sys
import unittest
import types
from inspect import isfunction
from nose.pyversion import unbound_method, ismethod
from nose.case import FunctionTestCase, MethodTestCase
from nose.failure import Failure
from nose.config import Config
from nose.importer import Importer, add_path, remove_path
from nose.selector import defaultSelector, TestAddress
from nose.util import func_lineno, getpackage, isclass, isgenerator, \
ispackage, regex_last_key, resolve_name, transplant_func, \
transplant_class, test_address
from nose.suite import ContextSuiteFactory, ContextList, LazySuite
from nose.pyversion import sort_list, cmp_to_key
log = logging.getLogger(__name__)
#log.setLevel(logging.DEBUG)
# for efficiency and easier mocking
op_normpath = os.path.normpath
op_abspath = os.path.abspath
op_join = os.path.join
op_isdir = os.path.isdir
op_isfile = os.path.isfile
__all__ = ['TestLoader', 'defaultTestLoader']
class TestLoader(unittest.TestLoader):
"""Test loader that extends unittest.TestLoader to:
* Load tests from test-like functions and classes that are not
unittest.TestCase subclasses
* Find and load test modules in a directory
* Support tests that are generators
* Support easy extensions of or changes to that behavior through plugins
"""
config = None
importer = None
workingDir = None
selector = None
suiteClass = None
def __init__(self, config=None, importer=None, workingDir=None,
selector=None):
"""Initialize a test loader.
Parameters (all optional):
* config: provide a `nose.config.Config`_ or other config class
instance; if not provided a `nose.config.Config`_ with
default values is used.
* importer: provide an importer instance that implements
`importFromPath`. If not provided, a
`nose.importer.Importer`_ is used.
* workingDir: the directory to which file and module names are
relative. If not provided, assumed to be the current working
directory.
* selector: a selector class or instance. If a class is
provided, it will be instantiated with one argument, the
current config. If not provided, a `nose.selector.Selector`_
is used.
"""
if config is None:
config = Config()
if importer is None:
importer = Importer(config=config)
if workingDir is None:
workingDir = config.workingDir
if selector is None:
selector = defaultSelector(config)
elif isclass(selector):
selector = selector(config)
self.config = config
self.importer = importer
self.workingDir = op_normpath(op_abspath(workingDir))
self.selector = selector
if config.addPaths:
add_path(workingDir, config)
self.suiteClass = ContextSuiteFactory(config=config)
self._visitedPaths = set([])
unittest.TestLoader.__init__(self)
def getTestCaseNames(self, testCaseClass):
"""Override to select with selector, unless
config.getTestCaseNamesCompat is True
"""
if self.config.getTestCaseNamesCompat:
return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
def wanted(attr, cls=testCaseClass, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = filter(wanted, dir(testCaseClass))
# add runTest if nothing else picked
if not cases and hasattr(testCaseClass, 'runTest'):
cases = ['runTest']
if self.sortTestMethodsUsing:
sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
return cases
def _haveVisited(self, path):
# For cases where path is None, we always pretend we haven't visited
# them.
if path is None:
return False
return path in self._visitedPaths
def _addVisitedPath(self, path):
if path is not None:
self._visitedPaths.add(path)
def loadTestsFromDir(self, path):
"""Load tests from the directory at path. This is a generator
-- each suite of tests from a module or other file is yielded
and is expected to be executed before the next file is
examined.
"""
log.debug("load from dir %s", path)
plugins = self.config.plugins
plugins.beforeDirectory(path)
if self.config.addPaths:
paths_added = add_path(path, self.config)
entries = os.listdir(path)
sort_list(entries, regex_last_key(self.config.testMatch))
for entry in entries:
# this hard-coded initial-dot test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('.'):
continue
entry_path = op_abspath(op_join(path, entry))
is_file = op_isfile(entry_path)
wanted = False
if is_file:
is_dir = False
wanted = self.selector.wantFile(entry_path)
else:
is_dir = op_isdir(entry_path)
if is_dir:
# this hard-coded initial-underscore test will be removed:
# http://code.google.com/p/python-nose/issues/detail?id=82
if entry.startswith('_'):
continue
wanted = self.selector.wantDirectory(entry_path)
is_package = ispackage(entry_path)
# Python 3.3 now implements PEP 420: Implicit Namespace Packages.
# As a result, it's now possible that parent paths that have a
# segment with the same basename as our package ends up
# in module.__path__. So we have to keep track of what we've
# visited, and not-revisit them again.
if wanted and not self._haveVisited(entry_path):
self._addVisitedPath(entry_path)
if is_file:
plugins.beforeContext()
if entry.endswith('.py'):
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
yield self.loadTestsFromFile(entry_path)
plugins.afterContext()
elif is_package:
# Load the entry as a package: given the full path,
# loadTestsFromName() will figure it out
yield self.loadTestsFromName(
entry_path, discovered=True)
else:
# Another test dir in this one: recurse lazily
yield self.suiteClass(
lambda: self.loadTestsFromDir(entry_path))
tests = []
for test in plugins.loadTestsFromDir(path):
tests.append(test)
# TODO: is this try/except needed?
try:
if tests:
yield self.suiteClass(tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
yield self.suiteClass([Failure(*sys.exc_info())])
# pop paths
if self.config.addPaths:
for p in paths_added:
remove_path(p)
plugins.afterDirectory(path)
def loadTestsFromFile(self, filename):
"""Load tests from a non-module file. Default is to raise a
ValueError; plugins may implement `loadTestsFromFile` to
provide a list of tests loaded from the file.
"""
log.debug("Load from non-module file %s", filename)
try:
tests = [test for test in
self.config.plugins.loadTestsFromFile(filename)]
if tests:
# Plugins can yield False to indicate that they were
# unable to load tests from a file, but it was not an
# error -- the file just had no tests to load.
tests = filter(None, tests)
return self.suiteClass(tests)
else:
# Nothing was able to even try to load from this file
open(filename, 'r').close() # trigger os error
raise ValueError("Unable to load tests from file %s"
% filename)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return self.suiteClass(
[Failure(exc[0], exc[1], exc[2],
address=(filename, None, None))])
def loadTestsFromGenerator(self, generator, module):
"""Lazy-load tests from a generator function. The generator function
may yield either:
* a callable, or
* a function name resolvable within the same module
"""
def generate(g=generator, m=module):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = getattr(m, test_func)
yield FunctionTestCase(test_func, arg=arg, descriptor=g)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromGeneratorMethod(self, generator, cls):
"""Lazy-load tests from a generator method.
This is more complicated than loading from a generator function,
since a generator method may yield:
* a function
* a bound or unbound method, or
* a method name
"""
# convert the unbound generator method
# into a bound method so it can be called below
if hasattr(generator, 'im_class'):
cls = generator.im_class
inst = cls()
method = generator.__name__
generator = getattr(inst, method)
def generate(g=generator, c=cls):
try:
for test in g():
test_func, arg = self.parseGeneratedTest(test)
if not callable(test_func):
test_func = unbound_method(c, getattr(c, test_func))
if ismethod(test_func):
yield MethodTestCase(test_func, arg=arg, descriptor=g)
elif callable(test_func):
# In this case we're forcing the 'MethodTestCase'
# to run the inline function as its test call,
# but using the generator method as the 'method of
# record' (so no need to pass it as the descriptor)
yield MethodTestCase(g, test=test_func, arg=arg)
else:
yield Failure(
TypeError,
"%s is not a callable or method" % test_func)
except KeyboardInterrupt:
raise
except:
exc = sys.exc_info()
yield Failure(exc[0], exc[1], exc[2],
address=test_address(generator))
return self.suiteClass(generate, context=generator, can_split=False)
def loadTestsFromModule(self, module, path=None, discovered=False):
"""Load all tests from module and return a suite containing
them. If the module has been discovered and is not test-like,
the suite will be empty by default, though plugins may add
their own tests.
"""
log.debug("Load from module %s", module)
tests = []
test_classes = []
test_funcs = []
# For *discovered* modules, we only load tests when the module looks
# testlike. For modules we've been directed to load, we always
# look for tests. (discovered is set to True by loadTestsFromDir)
if not discovered or self.selector.wantModule(module):
for item in dir(module):
test = getattr(module, item, None)
# print "Check %s (%s) in %s" % (item, test, module.__name__)
if isclass(test):
if self.selector.wantClass(test):
test_classes.append(test)
elif isfunction(test) and self.selector.wantFunction(test):
test_funcs.append(test)
sort_list(test_classes, lambda x: x.__name__)
sort_list(test_funcs, func_lineno)
tests = map(lambda t: self.makeTest(t, parent=module),
test_classes + test_funcs)
# Now, descend into packages
# FIXME can or should this be lazy?
# is this syntax 2.2 compatible?
module_paths = getattr(module, '__path__', [])
if path:
path = os.path.normcase(os.path.realpath(path))
for module_path in module_paths:
log.debug("Load tests from module path %s?", module_path)
log.debug("path: %s os.path.realpath(%s): %s",
path, os.path.normcase(module_path),
os.path.realpath(os.path.normcase(module_path)))
if (self.config.traverseNamespace or not path) or \
os.path.realpath(
os.path.normcase(module_path)).startswith(path):
# Egg files can be on sys.path, so make sure the path is a
# directory before trying to load from it.
if os.path.isdir(module_path):
tests.extend(self.loadTestsFromDir(module_path))
for test in self.config.plugins.loadTestsFromModule(module, path):
tests.append(test)
return self.suiteClass(ContextList(tests, context=module))
def loadTestsFromName(self, name, module=None, discovered=False):
"""Load tests from the entity with the given name.
The name may indicate a file, directory, module, or any object
within a module. See `nose.util.split_test_name` for details on
test name parsing.
"""
# FIXME refactor this method into little bites?
log.debug("load from %s (%s)", name, module)
suite = self.suiteClass
# give plugins first crack
plug_tests = self.config.plugins.loadTestsFromName(name, module)
if plug_tests:
return suite(plug_tests)
addr = TestAddress(name, workingDir=self.workingDir)
if module:
# Two cases:
# name is class.foo
# The addr will be incorrect, since it thinks class.foo is
# a dotted module name. It's actually a dotted attribute
# name. In this case we want to use the full submitted
# name as the name to load from the module.
# name is module:class.foo
# The addr will be correct. The part we want is the part after
# the :, which is in addr.call.
if addr.call:
name = addr.call
parent, obj = self.resolve(name, module)
if (isclass(parent)
and getattr(parent, '__module__', None) != module.__name__
and not isinstance(obj, Failure)):
parent = transplant_class(parent, module.__name__)
obj = getattr(parent, obj.__name__)
log.debug("parent %s obj %s module %s", parent, obj, module)
if isinstance(obj, Failure):
return suite([obj])
else:
return suite(ContextList([self.makeTest(obj, parent)],
context=parent))
else:
if addr.module:
try:
if addr.filename is None:
module = resolve_name(addr.module)
else:
self.config.plugins.beforeImport(
addr.filename, addr.module)
# FIXME: to support module.name names,
# do what resolve-name does and keep trying to
# import, popping tail of module into addr.call,
# until we either get an import or run out of
# module parts
try:
module = self.importer.importFromPath(
addr.filename, addr.module)
finally:
self.config.plugins.afterImport(
addr.filename, addr.module)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return suite([Failure(exc[0], exc[1], exc[2],
address=addr.totuple())])
if addr.call:
return self.loadTestsFromName(addr.call, module)
else:
return self.loadTestsFromModule(
module, addr.filename,
discovered=discovered)
elif addr.filename:
path = addr.filename
if addr.call:
package = getpackage(path)
if package is None:
return suite([
Failure(ValueError,
"Can't find callable %s in file %s: "
"file is not a python module" %
(addr.call, path),
address=addr.totuple())])
return self.loadTestsFromName(addr.call, module=package)
else:
if op_isdir(path):
# In this case we *can* be lazy since we know
# that each module in the dir will be fully
# loaded before its tests are executed; we
# also know that we're not going to be asked
# to load from . and ./some_module.py *as part
# of this named test load*
return LazySuite(
lambda: self.loadTestsFromDir(path))
elif op_isfile(path):
return self.loadTestsFromFile(path)
else:
return suite([
Failure(OSError, "No such file %s" % path,
address=addr.totuple())])
else:
# just a function? what to do? I think it can only be
# handled when module is not None
return suite([
Failure(ValueError, "Unresolvable test name %s" % name,
address=addr.totuple())])
def loadTestsFromNames(self, names, module=None):
"""Load tests from all names, returning a suite containing all
tests.
"""
plug_res = self.config.plugins.loadTestsFromNames(names, module)
if plug_res:
suite, names = plug_res
if suite:
return self.suiteClass([
self.suiteClass(suite),
unittest.TestLoader.loadTestsFromNames(self, names, module)
])
return unittest.TestLoader.loadTestsFromNames(self, names, module)
def loadTestsFromTestCase(self, testCaseClass):
"""Load tests from a unittest.TestCase subclass.
"""
cases = []
plugins = self.config.plugins
for case in plugins.loadTestsFromTestCase(testCaseClass):
cases.append(case)
# For efficiency in the most common case, just call and return from
# super. This avoids having to extract cases and rebuild a context
# suite when there are no plugin-contributed cases.
if not cases:
return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
cases.extend(
[case for case in
super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
return self.suiteClass(cases)
def loadTestsFromTestClass(self, cls):
"""Load tests from a test class that is *not* a unittest.TestCase
subclass.
In this case, we can't depend on the class's `__init__` taking method
name arguments, so we have to compose a MethodTestCase for each
method in the class that looks testlike.
"""
def wanted(attr, cls=cls, sel=self.selector):
item = getattr(cls, attr, None)
if isfunction(item):
item = unbound_method(cls, item)
elif not ismethod(item):
return False
return sel.wantMethod(item)
cases = [self.makeTest(getattr(cls, case), cls)
for case in filter(wanted, dir(cls))]
for test in self.config.plugins.loadTestsFromTestClass(cls):
cases.append(test)
return self.suiteClass(ContextList(cases, context=cls))
def makeTest(self, obj, parent=None):
try:
return self._makeTest(obj, parent)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
return Failure(exc[0], exc[1], exc[2], address=addr)
def _makeTest(self, obj, parent=None):
"""Given a test object and its parent, return a test case
or test suite.
"""
plug_tests = []
try:
addr = test_address(obj)
except KeyboardInterrupt:
raise
except:
addr = None
for test in self.config.plugins.makeTest(obj, parent):
plug_tests.append(test)
# TODO: is this try/except needed?
try:
if plug_tests:
return self.suiteClass(plug_tests)
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
return Failure(exc[0], exc[1], exc[2], address=addr)
if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
# This is a Python 3.x 'unbound method'. Wrap it with its
# associated class..
obj = unbound_method(parent, obj)
if isinstance(obj, unittest.TestCase):
return obj
elif isclass(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_class(obj, parent.__name__)
if issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
else:
return self.loadTestsFromTestClass(obj)
elif ismethod(obj):
if parent is None:
parent = obj.__class__
if issubclass(parent, unittest.TestCase):
return parent(obj.__name__)
else:
if isgenerator(obj):
return self.loadTestsFromGeneratorMethod(obj, parent)
else:
return MethodTestCase(obj)
elif isfunction(obj):
if parent and obj.__module__ != parent.__name__:
obj = transplant_func(obj, parent.__name__)
if isgenerator(obj):
return self.loadTestsFromGenerator(obj, parent)
else:
return FunctionTestCase(obj)
else:
return Failure(TypeError,
"Can't make a test from %s" % obj,
address=addr)
def resolve(self, name, module):
"""Resolve name within module
"""
obj = module
parts = name.split('.')
for part in parts:
parent, obj = obj, getattr(obj, part, None)
if obj is None:
# no such test
obj = Failure(ValueError, "No such test %s" % name)
return parent, obj
def parseGeneratedTest(self, test):
"""Given the yield value of a test generator, return a func and args.
This is used in the two loadTestsFromGenerator* methods.
"""
if not isinstance(test, tuple): # yield test
test_func, arg = (test, tuple())
elif len(test) == 1: # yield (test,)
test_func, arg = (test[0], tuple())
else: # yield test, foo, bar, ...
assert len(test) > 1 # sanity check
test_func, arg = (test[0], test[1:])
return test_func, arg
defaultTestLoader = TestLoader
| mpl-2.0 |
JohnCEarls/MasterDirac | masterdirac/controller/data.py | 1 | 7002 | import json
import boto
import boto.utils
import boto.sqs
from boto.sqs.message import Message
import logging
from collections import deque
import serverinterface
import masterdirac.models.server as svr_mdl
import masterdirac.models.run as run_mdl
import servermanager as sm
from datetime import datetime, timedelta
import multiprocessing
class Interface(serverinterface.ServerInterface):
def __init__(self, init_message, master_name):
super( Interface, self ).__init__(init_message, master_name )
self.logger = logging.getLogger(self.unique_id)
self.logger.info("Data Interface created")
self.num_nodes = init_message['num-nodes']
self.set_status( svr_mdl.INIT )
def handle_state(self):
self.get_responses()
self.check_response()
if not self.cluster_active:
#cluster down
self.set_status( svr_mdl.TERMINATED )
state = self.status
self.logger.debug("handle_state[%s]" % state)
if state == svr_mdl.INIT:
self.send_init()
elif state == svr_mdl.WAITING:
if self._run_id is None:
self.restart()
elif run_mdl.get_ANRun( self._run_id )['status'] != run_mdl.ACTIVE:
self._run_id = None
self.restart()
elif state == svr_mdl.TERMINATED:
self.delete_queues()
elif state==svr_mdl.RESTARTING and self._restart_timeout < datetime.now():
self.hard_restart()
def send_init(self):
self.logger.debug("Attempting to send init")
active_run = self._get_active_run()
if active_run is None:
self.logger.debug("No active runs")
return
self._run_id = active_run['run_id']
intercomm_settings = active_run['intercomm_settings']
aws_locations = (
intercomm_settings[ 'sqs_from_data_to_agg' ],
intercomm_settings[ 'sqs_from_data_to_agg_truth'],
intercomm_settings[ 'sqs_from_data_to_gpu' ],
intercomm_settings[ 's3_from_data_to_gpu'] )
dest_data = active_run['dest_data']
source_files = (
dest_data[ 'working_bucket' ],
dest_data[ 'dataframe_file' ],
dest_data[ 'meta_file' ] )
network_config = active_run['network_config']
network_settings = (
network_config[ 'network_table' ],
network_config[ 'network_source']
)
run_settings = active_run['run_settings']
block_sizes = (
run_settings[ 'sample_block_size'],
run_settings[ 'pairs_block_size'],
run_settings[ 'nets_block_size'] )
gpu_mem_max = 2*1024*1024*1024
self._aws_locations = aws_locations
data_sqs_queue, data_sqs_queue_truth, gpu_sqs_queue, working_bucket = aws_locations
self._source_files = source_files
ds_bucket, data_file, meta_file = source_files
network_table, network_source = network_settings
sample_block_size, pairs_block_size, nets_block_size = block_sizes
data_message = {'message-type':'init-settings',
'data_sqs_queue': data_sqs_queue,
'data_sqs_queue_truth': data_sqs_queue_truth,
'gpu_sqs_queue': gpu_sqs_queue,
'working_bucket': working_bucket,
'ds_bucket': ds_bucket,
'data_file': data_file,
'meta_file': meta_file,
'network_table': network_table,
'network_source': network_source,
'sample_block_size': sample_block_size,
'pairs_block_size': pairs_block_size,
'nets_block_size': nets_block_size,
'gpu_mem_max': gpu_mem_max
}
js_mess = json.dumps( data_message )
self.logger.debug("DataInit message [%s]" % js_mess)
self._send_command( js_mess )
self.set_status(svr_mdl.WAITING)
@property
def server_id(self):
return "data"
@property
def unique_id(self):
return self.cluster_name
def send_run(self, run_id, strain, num_runs, shuffle, k):
data_message = {'message-type':'run-instructions',
'strain': strain,
'shuffle': shuffle,
'k': k,
'num-runs': num_runs,
'run-id': run_id
}
if run_id == self._run_id:
js_mess = json.dumps( data_message )
self.logger.debug("Sending run message[%s]", js_mess)
self._send_command( js_mess )
self.set_status( svr_mdl.RUNNING )
return True
else:
self.logger.warning("Rec'd a send run for a run that is not initialized")
return False
def restart( self ):
data_message = {'message-type':'restart-notice'}
js_mess = json.dumps( data_message )
self.logger.debug("Sending run message[%s]", js_mess)
self._send_command( js_mess )
self.set_status( svr_mdl.RESTARTING )
self._restart_timeout = datetime.now() + timedelta( minutes=2 )
def check_response(self):
while len(self.status_queue) > 0:
message = self.status_queue.popleft()
self._handle_response( message )
def _handle_response(self, message ):
if message['message-type'] == 'run-complete':
self.logger.debug("Response: %s" % json.dumps(message))
self.set_status( svr_mdl.WAITING )
elif message['message-type'] == 'terminated':
self.logger.info("%s terminated" % self.unique_id)
self.set_status(svr_mdl.TERMINATED)
elif message['message-type'] == 'restarting':
self.logger.info("%s restarting" % self.unique_id)
self.set_status(svr_mdl.INIT)
else:
self.logger.error("Error[Unexpected Response] : %s" %\
json.dumps( message ))
Exception("Unexpected Response")
def _get_active_run( self ):
for run in run_mdl.get_ANRun():
if run['master_name'] == self._master_name:
if run['status'] == run_mdl.ACTIVE:
return run
return None
def busy(self):
"""
Is it currently working in a run
"""
self.check_response()
return self.status != svr_mdl.WAITING
def _restart(self):
self.logger.warning("Restart unimplemented in data node")
def hard_restart(self):
worker_id = self.worker_id
restart_process = multiprocessing.Process( target = sm.restart_data,
args=( worker_id, ),
name=worker_id)
restart_process.start()
| agpl-3.0 |
cloudera/Impala | tests/custom_cluster/test_krpc_metrics.py | 2 | 3403 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import time
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_cluster import ImpalaCluster
from tests.common.skip import SkipIf, SkipIfBuildType
from tests.verifiers.mem_usage_verifier import MemUsageVerifier
class TestKrpcMetrics(CustomClusterTestSuite):
"""Test for KRPC metrics that require special arguments during cluster startup."""
RPCZ_URL = 'http://localhost:25000/rpcz?json'
TEST_QUERY = 'select count(*) from tpch_parquet.lineitem l1 \
join tpch_parquet.lineitem l2 where l1.l_orderkey = l2.l_orderkey;'
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
super(TestKrpcMetrics, cls).setup_class()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args('-datastream_service_queue_mem_limit=1B \
-datastream_service_num_svc_threads=1')
def test_krpc_queue_overflow_rpcz(self, vector):
"""Test that rejected RPCs show up on the /rpcz debug web page.
"""
def get_rpc_overflows():
rpcz = self.get_debug_page(self.RPCZ_URL)
assert len(rpcz['services']) > 0
for s in rpcz['services']:
if s['service_name'] == 'impala.DataStreamService':
return int(s['rpcs_queue_overflow'])
assert False, "Could not find DataStreamService metrics"
before = get_rpc_overflows()
assert before == 0
self.client.execute(self.TEST_QUERY)
after = get_rpc_overflows()
assert before < after
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args('-datastream_service_queue_mem_limit=1B \
-datastream_service_num_svc_threads=1')
def test_krpc_queue_overflow_metrics(self, vector):
"""Test that rejected RPCs show up on the /metrics debug web page.
"""
metric_name = 'rpc.impala.DataStreamService.rpcs_queue_overflow'
before = self.get_metric(metric_name)
assert before == 0
self.client.execute(self.TEST_QUERY)
after = self.get_metric(metric_name)
assert before < after
@pytest.mark.execute_serially
def test_krpc_service_queue_metrics(self, vector):
"""Test that memory usage metrics for the data stream service queue show up on the
/metrics debug web page.
"""
self.client.execute(self.TEST_QUERY)
assert self.get_metric('mem-tracker.DataStreamService.current_usage_bytes') >= 0
assert self.get_metric('mem-tracker.DataStreamService.peak_usage_bytes') > 0
| apache-2.0 |
Gravecorp/Gap | packages/IronPython.StdLib.2.7.3/content/Lib/encodings/undefined.py | 860 | 1299 | """ Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
def decode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
raise UnicodeError("undefined encoding")
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
raise UnicodeError("undefined encoding")
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='undefined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mpl-2.0 |
jgonthier/psi4 | psi4/driver/qcdb/libmintsgshell.py | 3 | 12927 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
#MAX_IOFF = 30000
#extern size_t ioff[MAX_IOFF];
#
#MAX_DF = 500
#extern double df[MAX_DF];
#
#MAX_BC = 20
#extern double bc[MAX_BC][MAX_BC];
#
#MAX_FAC = 100
#extern double fac[MAX_FAC];
#
#
#MAX_DF = 500
#extern double df[MAX_DF];
#
## Globals
#size_t ioff[MAX_IOFF];
#double df[MAX_DF];
#double bc[MAX_BC][MAX_BC];
#double fac[MAX_FAC];
#
#def Wavefunction_initialize_singletons():
# done = False
#
# if done:
# return
#
# ioff[0] = 0;
# for (size_t i=1; i<MAX_IOFF; ++i)
# ioff[i] = ioff[i-1] + i;
#
# df[0] = 1.0;
# df[1] = 1.0;
# df[2] = 1.0;
# for (int i=3; i<MAX_DF; ++i)
# df[i] = (i-1)*df[i-2];
#
# for (int i=0; i<MAX_BC; ++i)
# for (int j=0; j<=i; ++j)
# bc[i][j] = combinations(i, j);
#
# fac[0] = 1.0;
# for (int i=1; i<MAX_FAC; ++i)
# fac[i] = i*fac[i-1];
#
# done = True
def df(n):
"""Gives the double factorial of *n*"""
return 1.0 if n <= 0 else 1.0 * n * df(n - 2)
def INT_NCART(am):
"""Gives the number of cartesian functions for an angular momentum.
#define INT_NCART(am) ((am>=0) ? ((((am)+2)*((am)+1))>>1) : 0)
"""
return (((abs(am) + 2) * (abs(am) + 1)) >> 1)
def INT_NPURE(am):
"""Gives the number of spherical functions for an angular momentum.
#define INT_NPURE(am) (2*(am)+1)
"""
return 2 * abs(am) + 1
def INT_NFUNC(pu, am):
"""Gives the number of functions for an angular momentum based on pu.
#define INT_NFUNC(pu,am) ((pu)?INT_NPURE(am):INT_NCART(am))
"""
return INT_NCART(am) if pu in {'Cartesian', False} else INT_NPURE(am)
def INT_CARTINDEX(am, i, j):
"""Returns offset index for cartesian function.
#define INT_CARTINDEX(am,i,j) (((i) == (am))? 0 : (((((am) - (i) + 1)*((am) - (i)))>>1) + (am) - (i) - (j)))
"""
return 0 if (i == am) else ((((am - i + 1) * (am - i)) >> 1) + am - i - j)
def INT_ICART(a, b, c):
"""Given a, b, and c, return a cartesian offset.
#define INT_ICART(a, b, c) (((((((a)+(b)+(c)+1)<<1)-(a))*((a)+1))>>1)-(b)-1)
"""
return ((((((a + b + c + 1) << 1) - a) * (a + 1)) >> 1) - b - 1)
def INT_IPURE(l, m):
"""Given l and m, return a pure function offset.
#define INT_IPURE(l, m) ((l)+(m))
"""
return l + m
# Lookup array that when you index the angular momentum it returns the corresponding letter
PrimitiveType = ['Normalized', 'Unnormalized']
GaussianType = ['Cartesian', 'Pure'] # Cartesian = 0, Pure = 1
class ShellInfo(object):
"""This class has the same behavior as GaussianShell, but implements everything using
slower data structures, which are easier to construct. These are used to build the
basis set, which builds more efficient pointer-based GaussianShell objects.
@param am Angular momentum.
@param c An array of contraction coefficients.
@param e An array of exponent values.
@param pure Pure spherical harmonics, or Cartesian.
@param nc The atomic center that this shell is located on. Must map
back to the correct atom in the owning BasisSet molecule. Used
in integral derivatives for indexing.
@param center The x, y, z position of the shell. This is passed to
reduce the number of calls to the molecule.
@param start The starting index of the first function this shell
provides. Used to provide starting positions in matrices.
@param pt Is the shell already normalized?
@param rpowers For an ECP, the array of radial powers.
"""
def __init__(self, am, c, e, pure, nc, center, start, pt='Normalized', rpowers=None):
# Angular momentum
self.l = am
# Flag for pure angular momentum (Cartesian = 0, Pure = 1)
self.puream = pure
# Exponents (of length nprimitives_)
self.PYexp = e
# Contraction coefficients (of length nprimitives_)
self.PYcoef = c
# ERD normalized contraction coefficients (of length nprimitives_)
self.PYerd_coef = []
# Original (un-normalized) contraction coefficients (of length nprimitives)
self.PYoriginal_coef = [c[n] for n in range(len(c))]
# Atom number this shell goes to. Needed when indexing integral derivatives.
self.nc = nc
# Atomic center number in the Molecule
self.center = center
#
self.start = start
# How many cartesian functions? (1=s, 3=p, 6=d, ...)
self.PYncartesian = INT_NCART(self.l)
# How many functions? (1=s, 3=p, 5/6=d, ...) * Dependent on the value of puream_
self.PYnfunction = INT_NFUNC(self.puream, self.l)
# These are the radial factors for ECPs. They are not defined for regular shells.
self.rpowers = rpowers
# Compute the normalization constants
if pt == 'Unnormalized':
self.normalize_shell()
self.erd_normalize_shell()
else:
self.PYerd_coef = [0.0] * self.nprimitive()
def primitive_normalization(self, p):
"""Normalizes a single primitive.
@param p The primitive index to normalize.
@return Normalization constant to be applied to the primitive.
"""
tmp1 = self.l + 1.5
g = 2.0 * self.PYexp[p]
z = pow(g, tmp1)
return math.sqrt((pow(2.0, self.l) * z) / (math.pi * math.sqrt(math.pi) * df(2 * self.l)))
def contraction_normalization(self):
"""Normalizes an entire contraction set. Applies the normalization to the coefficients
"""
e_sum = 0.0
for i in range(self.nprimitive()):
for j in range(self.nprimitive()):
g = self.PYexp[i] + self.PYexp[j]
z = pow(g, self.l + 1.5)
e_sum += self.PYcoef[i] * self.PYcoef[j] / z
tmp = ((2.0 * math.pi / (2.0 / math.sqrt(math.pi))) * df(2 * self.l)) / pow(2.0, self.l)
try:
norm = math.sqrt(1.0 / (tmp * e_sum))
except ZeroDivisionError:
# This is likely an ECP with no local function.
pass
else:
# Normalize, as usual.
self.PYcoef = [i * norm for i in self.PYcoef]
def normalize_shell(self):
"""Handles calling primitive_normalization and
contraction_normalization for you.
"""
for i in range(self.nprimitive()):
normalization = self.primitive_normalization(i)
self.PYcoef[i] *= normalization
self.contraction_normalization()
def erd_normalize_shell(self):
"""Compute the normalization coefficients for Electronic
Repulsion Direct integral evaluation.
"""
tsum = 0.0
for j in range(self.nprimitive()):
for k in range(j + 1):
a1 = self.PYexp[j]
a2 = self.PYexp[k]
temp = self.PYoriginal_coef[j] * self.PYoriginal_coef[k]
temp2 = self.l + 1.5
temp3 = 2.0 * math.sqrt(a1 * a2) / (a1 + a2)
temp3 = pow(temp3, temp2)
temp *= temp3
tsum += temp
if j != k:
tsum += temp
prefac = pow(2.0, 2 * self.l) / df(2 * self.l) if self.l > 1 else 1.0
norm = math.sqrt(prefac / tsum)
self.PYerd_coef = [j * norm for j in self.PYoriginal_coef]
def copy(self, nc=None, c=None):
"""Return a copy of the ShellInfo"""
if nc is not None and c is not None:
return ShellInfo(self.l, self.PYoriginal_coef, self.PYexp,
self.puream, nc, c,
self.start, 'Unnormalized', self.rpowers)
else:
return ShellInfo(self.l, self.PYoriginal_coef, self.PYexp,
self.puream, self.nc, self.center,
self.start, 'Unnormalized', self.rpowers)
# better to just deepcopy?
def nprimitive(self):
"""Return the number of primitive Gaussians"""
return len(self.PYexp)
def nfunction(self):
"""Return the total number of basis functions"""
return INT_NFUNC(self.puream, self.l)
def ncartesian(self):
"""Return the total number of functions if this shell was Cartesian"""
return self.PYncartesian
def am(self):
"""Return the angular momentum of the given contraction"""
return self.l
def amchar(self):
"""Return the character symbol for the angular momentum of the given contraction"""
return 'spdfghiklmnopqrtuvwxyz'[self.l]
def AMCHAR(self):
"""Return the character symbol for the angular momentum of the given contraction (upper case)"""
return self.amchar().upper()
def is_cartesian(self):
"""Returns true if contraction is Cartesian"""
return self.puream == 'Cartesian'
def is_pure(self):
"""Returns true if contraction is pure"""
return self.puream == 'Pure'
def center(self):
"""Returns the center of the Molecule this shell is on"""
return self.center
def ncenter(self):
"""Returns the atom number this shell is on. Used by integral derivatives for indexing."""
return self.nc
def exp(self, prim):
"""Returns the exponent of the given primitive"""
return self.PYexp[prim]
def coef(self, pi):
"""Return coefficient of pi'th primitive"""
return self.PYcoef[pi]
def erd_coef(self, pi):
"""Return ERD normalized coefficient of pi'th primitive"""
return self.PYerd_coef[pi]
def original_coef(self, pi):
"""Return unnormalized coefficient of pi'th primitive"""
return self.PYoriginal_coef[pi]
def rpower(self, pi):
"""Return r exponent (for ECP) of pi'th primitive"""
return self.rpowers[pi] if self.rpowers else None
def exps(self):
"""Returns the exponent of the given primitive"""
return self.PYexp
def coefs(self):
"""Return coefficient of pi'th primitive and ci'th contraction"""
return self.PYcoef
def original_coefs(self):
"""Return unnormalized coefficient of pi'th primitive and ci'th contraction"""
return self.PYoriginal_coef
def aslist(self):
"""Return minimal list of shell info"""
if self.rpowers and self.rpowers[0] is not None:
# This is an ECP, so we tack the radial powers onto the end of the list
info = [self.l] + [(self.PYexp[K], self.PYoriginal_coef[K], self.rpower(K)) for K in range(self.nprimitive())]
else:
# This is a regular shell, with only coefficients and exponents to worry about
info = [self.l] + [(self.PYexp[K], self.PYoriginal_coef[K]) for K in range(self.nprimitive())]
return info
def pyprint(self, outfile=None):
"""Print out the shell"""
text = """ %c %3d 1.00\n""" % (self.AMCHAR(), self.nprimitive())
for K in range(self.nprimitive()):
text += """ %20.8f %20.8f\n""" % (self.PYexp[K], self.PYoriginal_coef[K])
if outfile is None:
return text
else:
with open(outfile, mode='w') as handle:
handle.write(text)
def pyprint_gamess(self, outfile=None):
"""Print out the shell in Gamess format"""
text = """%c %3d\n""" % (self.AMCHAR(), self.nprimitive())
for K in range(self.nprimitive()):
text += """%3d %15.8f %15.8f\n""" % (K + 1, self.PYexp[K], self.PYoriginal_coef[K])
if outfile is None:
return text
else:
with open(outfile, mode='w') as handle:
handle.write(text)
def __str__(self):
"""String representation of shell"""
return self.pyprint(outfile=None)
def function_index(self):
"""Return the basis function index where this shell starts."""
return self.start
def set_function_index(self, i):
"""Set basis function index where this shell starts."""
self.start = i
| lgpl-3.0 |
projectweekend/THPL-Data-API | app/utils/testing.py | 1 | 2164 | import json
from falcon.testing import TestBase
from app import api
from app.config import API_KEY, CONTENT_TYPE_METHODS
HEADERS = {
'Content-Type': 'application/json',
'X-API-KEY': API_KEY
}
class APITestCase(TestBase):
def setUp(self):
super(APITestCase, self).setUp()
def _simulate_request(self, method, path, data, token=None, **kwargs):
headers = HEADERS.copy()
if token:
headers['Authorization'] = token
if 'headers' in kwargs:
more_headers = kwargs.pop('headers')
headers.update(more_headers)
# Content-Type is only sent on POST, PUT, PATCH
if method not in CONTENT_TYPE_METHODS:
headers.pop('Content-Type')
self.api = api
result = self.simulate_request(
path=path,
method=method,
headers=headers,
body=json.dumps(data),
**kwargs)
try:
return json.loads(result[0].decode('utf-8'))
except IndexError:
return None
def simulate_get(self, path, token=None, **kwargs):
return self._simulate_request(
method='GET',
path=path,
data=None,
token=token,
**kwargs)
def simulate_post(self, path, data, token=None, **kwargs):
return self._simulate_request(
method='POST',
path=path,
data=data,
token=token,
**kwargs)
def simulate_put(self, path, data, token=None, **kwargs):
return self._simulate_request(
method='PUT',
path=path,
data=data,
token=token,
**kwargs)
def simulate_patch(self, path, data, token=None, **kwargs):
return self._simulate_request(
method='PATCH',
path=path,
data=data,
token=token,
**kwargs)
def simulate_delete(self, path, token=None, **kwargs):
return self._simulate_request(
method='DELETE',
path=path,
data=None,
token=token,
**kwargs)
| mit |
sanjeevtripurari/hue | desktop/core/ext-py/PyYAML-3.09/tests/lib/test_structure.py | 60 | 6768 |
import yaml, canonical
import pprint
def _convert_structure(loader):
if loader.check_event(yaml.ScalarEvent):
event = loader.get_event()
if event.tag or event.anchor or event.value:
return True
else:
return None
elif loader.check_event(yaml.SequenceStartEvent):
loader.get_event()
sequence = []
while not loader.check_event(yaml.SequenceEndEvent):
sequence.append(_convert_structure(loader))
loader.get_event()
return sequence
elif loader.check_event(yaml.MappingStartEvent):
loader.get_event()
mapping = []
while not loader.check_event(yaml.MappingEndEvent):
key = _convert_structure(loader)
value = _convert_structure(loader)
mapping.append((key, value))
loader.get_event()
return mapping
elif loader.check_event(yaml.AliasEvent):
loader.get_event()
return '*'
else:
loader.get_event()
return '?'
def test_structure(data_filename, structure_filename, verbose=False):
nodes1 = []
nodes2 = eval(open(structure_filename, 'rb').read())
try:
loader = yaml.Loader(open(data_filename, 'rb'))
while loader.check_event():
if loader.check_event(yaml.StreamStartEvent, yaml.StreamEndEvent,
yaml.DocumentStartEvent, yaml.DocumentEndEvent):
loader.get_event()
continue
nodes1.append(_convert_structure(loader))
if len(nodes1) == 1:
nodes1 = nodes1[0]
assert nodes1 == nodes2, (nodes1, nodes2)
finally:
if verbose:
print "NODES1:"
pprint.pprint(nodes1)
print "NODES2:"
pprint.pprint(nodes2)
test_structure.unittest = ['.data', '.structure']
def _compare_events(events1, events2, full=False):
assert len(events1) == len(events2), (len(events1), len(events2))
for event1, event2 in zip(events1, events2):
assert event1.__class__ == event2.__class__, (event1, event2)
if isinstance(event1, yaml.AliasEvent) and full:
assert event1.anchor == event2.anchor, (event1, event2)
if isinstance(event1, (yaml.ScalarEvent, yaml.CollectionStartEvent)):
if (event1.tag not in [None, u'!'] and event2.tag not in [None, u'!']) or full:
assert event1.tag == event2.tag, (event1, event2)
if isinstance(event1, yaml.ScalarEvent):
assert event1.value == event2.value, (event1, event2)
def test_parser(data_filename, canonical_filename, verbose=False):
events1 = None
events2 = None
try:
events1 = list(yaml.parse(open(data_filename, 'rb')))
events2 = list(yaml.canonical_parse(open(canonical_filename, 'rb')))
_compare_events(events1, events2)
finally:
if verbose:
print "EVENTS1:"
pprint.pprint(events1)
print "EVENTS2:"
pprint.pprint(events2)
test_parser.unittest = ['.data', '.canonical']
def test_parser_on_canonical(canonical_filename, verbose=False):
events1 = None
events2 = None
try:
events1 = list(yaml.parse(open(canonical_filename, 'rb')))
events2 = list(yaml.canonical_parse(open(canonical_filename, 'rb')))
_compare_events(events1, events2, full=True)
finally:
if verbose:
print "EVENTS1:"
pprint.pprint(events1)
print "EVENTS2:"
pprint.pprint(events2)
test_parser_on_canonical.unittest = ['.canonical']
def _compare_nodes(node1, node2):
assert node1.__class__ == node2.__class__, (node1, node2)
assert node1.tag == node2.tag, (node1, node2)
if isinstance(node1, yaml.ScalarNode):
assert node1.value == node2.value, (node1, node2)
else:
assert len(node1.value) == len(node2.value), (node1, node2)
for item1, item2 in zip(node1.value, node2.value):
if not isinstance(item1, tuple):
item1 = (item1,)
item2 = (item2,)
for subnode1, subnode2 in zip(item1, item2):
_compare_nodes(subnode1, subnode2)
def test_composer(data_filename, canonical_filename, verbose=False):
nodes1 = None
nodes2 = None
try:
nodes1 = list(yaml.compose_all(open(data_filename, 'rb')))
nodes2 = list(yaml.canonical_compose_all(open(canonical_filename, 'rb')))
assert len(nodes1) == len(nodes2), (len(nodes1), len(nodes2))
for node1, node2 in zip(nodes1, nodes2):
_compare_nodes(node1, node2)
finally:
if verbose:
print "NODES1:"
pprint.pprint(nodes1)
print "NODES2:"
pprint.pprint(nodes2)
test_composer.unittest = ['.data', '.canonical']
def _make_loader():
global MyLoader
class MyLoader(yaml.Loader):
def construct_sequence(self, node):
return tuple(yaml.Loader.construct_sequence(self, node))
def construct_mapping(self, node):
pairs = self.construct_pairs(node)
pairs.sort()
return pairs
def construct_undefined(self, node):
return self.construct_scalar(node)
MyLoader.add_constructor(u'tag:yaml.org,2002:map', MyLoader.construct_mapping)
MyLoader.add_constructor(None, MyLoader.construct_undefined)
def _make_canonical_loader():
global MyCanonicalLoader
class MyCanonicalLoader(yaml.CanonicalLoader):
def construct_sequence(self, node):
return tuple(yaml.CanonicalLoader.construct_sequence(self, node))
def construct_mapping(self, node):
pairs = self.construct_pairs(node)
pairs.sort()
return pairs
def construct_undefined(self, node):
return self.construct_scalar(node)
MyCanonicalLoader.add_constructor(u'tag:yaml.org,2002:map', MyCanonicalLoader.construct_mapping)
MyCanonicalLoader.add_constructor(None, MyCanonicalLoader.construct_undefined)
def test_constructor(data_filename, canonical_filename, verbose=False):
_make_loader()
_make_canonical_loader()
native1 = None
native2 = None
try:
native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
native2 = list(yaml.load_all(open(canonical_filename, 'rb'), Loader=MyCanonicalLoader))
assert native1 == native2, (native1, native2)
finally:
if verbose:
print "NATIVE1:"
pprint.pprint(native1)
print "NATIVE2:"
pprint.pprint(native2)
test_constructor.unittest = ['.data', '.canonical']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| apache-2.0 |
TangHao1987/intellij-community | python/lib/Lib/encodings/undefined.py | 860 | 1299 | """ Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
def decode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
raise UnicodeError("undefined encoding")
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
raise UnicodeError("undefined encoding")
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='undefined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
sometallgit/AutoUploader | Python27/Lib/email/charset.py | 75 | 16044 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before they can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
# Set the input charset after filtering through the aliases and/or codecs
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
| mit |
n0trax/ansible | lib/ansible/utils/module_docs_fragments/cloudstack.py | 175 | 2631 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard cloudstack documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the CloudStack API.
required: false
default: null
api_secret:
description:
- Secret key of the CloudStack API.
required: false
default: null
api_url:
description:
- URL of the CloudStack API e.g. https://cloud.example.com/client/api.
required: false
default: null
api_http_method:
description:
- HTTP method used.
required: false
default: 'get'
choices: [ 'get', 'post' ]
api_timeout:
description:
- HTTP timeout.
required: false
default: 10
api_region:
description:
- Name of the ini section in the C(cloustack.ini) file.
required: false
default: cloudstack
requirements:
- "python >= 2.6"
- "cs >= 0.6.10"
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
provided by the arguments C(api_url), C(api_key), C(api_secret).
Configuration is read from several locations, in the following order.
- The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
- A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- A C(cloudstack.ini) file in the current working directory.
- A C(.cloudstack.ini) file in the users home directory.
Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini).
Use the argument C(api_region) to select the section name, default section is C(cloudstack).
See https://github.com/exoscale/cs for more information.
- A detailed guide about cloudstack modules can be found on http://docs.ansible.com/ansible/guide_cloudstack.html
- This module supports check mode.
'''
| gpl-3.0 |
pdellaert/ansible | test/units/modules/network/fortios/test_fortios_system_proxy_arp.py | 21 | 8167 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_proxy_arp
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_proxy_arp.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_proxy_arp_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_proxy_arp_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_proxy_arp_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'proxy-arp', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_proxy_arp_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'proxy-arp', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_proxy_arp_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_proxy_arp_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'random_attribute_not_valid': 'tag',
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
rmotr/whooshercises | tests.py | 1 | 6306 | import toml
import unittest
from document_utils import(
build_document_from_toml_doc, build_document_from_module
)
from exceptions import InvalidTagsException, InvalidExerciseModule
from whoosh_exercises import build_query
from utils import format_result
COMPLETE_DESCRIPTION = """
Write a method that will take a string as input, and return a new
string with the same letters in reverse order.
Don't use String's reverse method; that would be too simple.
""".strip()
COMPLETE_DOCUMENTATION = '''
title = "Reverse a string"
level = "intro"
dificulty = "easy"
tags = ["strings", "iterators", "for-loops", "loops"]
description = """
{}
"""
'''.format(COMPLETE_DESCRIPTION)
INVALID_TAGS_DOCUMENTATION = """
title = "Reverse a string"
level = "intro"
dificulty = "easy"
tags = ["strings", "this, is, invalid", "for-loops", "loops"]
"""
class TomlDocumentsTestCase(unittest.TestCase):
def test_tags_are_built_correctly(self):
doc = build_document_from_toml_doc(COMPLETE_DOCUMENTATION)
self.assertEqual(doc['tags'], "strings,iterators,for-loops,loops")
self.assertEqual(doc['original']['tags'], ["strings", "iterators", "for-loops", "loops"])
def test_title_is_correct(self):
doc = build_document_from_toml_doc(COMPLETE_DOCUMENTATION)
self.assertEqual(doc['title'], "Reverse a string")
self.assertEqual(doc['original']['title'], "Reverse a string")
def test_description_is_correct(self):
doc = build_document_from_toml_doc(COMPLETE_DOCUMENTATION)
self.assertEqual(doc['description'].strip(),
COMPLETE_DESCRIPTION.strip())
self.assertEqual(doc['original']['description'].strip(),
COMPLETE_DESCRIPTION.strip())
def test_level_is_correct(self):
doc = build_document_from_toml_doc(COMPLETE_DOCUMENTATION)
self.assertEqual(doc['level'], 'intro')
self.assertEqual(doc['original']['level'].strip(), 'intro')
def test_tags_cant_contain_commas(self):
with self.assertRaises(InvalidTagsException):
doc = build_document_from_toml_doc(INVALID_TAGS_DOCUMENTATION)
def test_document_exception_contains_document_title(self):
with self.assertRaises(InvalidTagsException) as e:
doc = build_document_from_toml_doc(INVALID_TAGS_DOCUMENTATION)
self.assertEqual(e.exception.document_title, "Reverse a string")
# This should be mocked
from assignments import reverse
class TomlFromModulesTestCase(unittest.TestCase):
def test_package_is_correct(self):
doc = build_document_from_module(reverse)
self.assertEqual(doc['package'], 'assignments.reverse')
def test_package_is_correct(self):
doc = build_document_from_module(reverse)
self.assertTrue('assignments/reverse.py' in doc['path'])
def test_tags_are_built_correctly(self):
doc = build_document_from_module(reverse)
self.assertEqual(doc['tags'], "strings,iterators,for-loops,loops")
def test_title_is_correct(self):
doc = build_document_from_module(reverse)
self.assertEqual(doc['title'], "Reverse a string")
def test_level_is_correct(self):
doc = build_document_from_module(reverse)
self.assertEqual(doc['level'], 'intro')
def test_raise_if_module_is_invalid(self):
with self.assertRaises(InvalidExerciseModule):
doc = build_document_from_module("invalid")
from whoosh.query import *
class BuildQueryTestCase(unittest.TestCase):
def test_build_query_only_with_title_query(self):
query = build_query(title='test-title')
self.assertEqual(query, And([Term('title', 'test-title')]))
def test_build_query_only_with_one_tag(self):
query = build_query(tags=['test-tag'])
self.assertEqual(query, And([Term('tags', 'test-tag')]))
def test_build_query_only_with_multiple_tag(self):
query = build_query(tags=['test-tag-1', 'test-tag-2', 'test-tag-3'])
self.assertEqual(query, And([
Term('tags', 'test-tag-1'),
Term('tags', 'test-tag-2'),
Term('tags', 'test-tag-3'),
]))
def test_build_query_only_with_level(self):
query = build_query(level='test-level')
self.assertEqual(query, And([Term('level', 'test-level')]))
def test_build_query_with_title_and_level(self):
query = build_query(title='test-title', level='test-level')
self.assertEqual(query, And([
Term('title', 'test-title'),
Term('level', 'test-level'),
]))
def test_build_query_with_title_and_one_tag(self):
query = build_query(title='test-title', tags=['test-tag'])
self.assertEqual(query, And([
Term('title', 'test-title'),
Term('tags', 'test-tag'),
]))
def test_build_query_with_title_and_one_tag(self):
query = build_query(
title='test-title',
tags=['test-tag-1', 'test-tag-2', 'test-tag-3'])
self.assertEqual(query, And([
Term('title', 'test-title'),
Term('tags', 'test-tag-1'),
Term('tags', 'test-tag-2'),
Term('tags', 'test-tag-3'),
]))
class FormatResultTestCase(unittest.TestCase):
def setUp(self):
self.result = {
'level': 'intro',
'package': 'assignments.factorial',
'path': '/home/santiago/code/python/rmotr/whooshercises/assignments/factorial.py',
'tags': 'factorial,iterators,for-loops,loops,math',
'title': 'Factorial of a number'
}
def test_format_result_case_1(self):
self.assertEqual(
format_result(self.result, "(%P) - %t"),
"(/home/santiago/code/python/rmotr/whooshercises/assignments/factorial.py) - Factorial of a number"
)
def test_format_result_case_2(self):
self.assertEqual(
format_result(self.result, "%t - %p"),
"Factorial of a number - assignments.factorial"
)
def test_format_result_case_2(self):
self.assertEqual(
format_result(self.result, "Title: %t - Package: %p"),
"Title: Factorial of a number - Package: assignments.factorial"
)
if __name__ == '__main__':
unittest.main()
| mit |
ratoaq2/deluge | deluge/core/filtermanager.py | 1 | 9416 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import unicode_literals
import logging
import deluge.component as component
from deluge.common import PY2, TORRENT_STATE
log = logging.getLogger(__name__)
STATE_SORT = ['All', 'Active'] + TORRENT_STATE
# Special purpose filters:
def filter_keywords(torrent_ids, values):
# Cleanup
keywords = ','.join([v.lower() for v in values])
keywords = keywords.split(',')
for keyword in keywords:
torrent_ids = filter_one_keyword(torrent_ids, keyword)
return torrent_ids
def filter_one_keyword(torrent_ids, keyword):
"""
search torrent on keyword.
searches title,state,tracker-status,tracker,files
"""
all_torrents = component.get('TorrentManager').torrents
for torrent_id in torrent_ids:
torrent = all_torrents[torrent_id]
if keyword in torrent.filename.lower():
yield torrent_id
elif keyword in torrent.state.lower():
yield torrent_id
elif torrent.trackers and keyword in torrent.trackers[0]['url']:
yield torrent_id
elif keyword in torrent_id:
yield torrent_id
# Want to find broken torrents (search on "error", or "unregistered")
elif keyword in torrent.tracker_status.lower():
yield torrent_id
else:
for t_file in torrent.get_files():
if keyword in t_file['path'].lower():
yield torrent_id
break
def filter_by_name(torrent_ids, search_string):
all_torrents = component.get('TorrentManager').torrents
try:
search_string, match_case = search_string[0].split('::match')
except ValueError:
search_string = search_string[0]
match_case = False
if match_case is False:
search_string = search_string.lower()
for torrent_id in torrent_ids:
torrent_name = all_torrents[torrent_id].get_name()
if match_case is False:
torrent_name = all_torrents[torrent_id].get_name().lower()
else:
torrent_name = all_torrents[torrent_id].get_name()
if search_string in torrent_name:
yield torrent_id
def tracker_error_filter(torrent_ids, values):
filtered_torrent_ids = []
tm = component.get('TorrentManager')
# If this is a tracker_host, then we need to filter on it
if values[0] != 'Error':
for torrent_id in torrent_ids:
if values[0] == tm[torrent_id].get_status(['tracker_host'])['tracker_host']:
filtered_torrent_ids.append(torrent_id)
return filtered_torrent_ids
# Check torrent's tracker_status for 'Error:' and return those torrent_ids
for torrent_id in torrent_ids:
if 'Error:' in tm[torrent_id].get_status(['tracker_status'])['tracker_status']:
filtered_torrent_ids.append(torrent_id)
return filtered_torrent_ids
class FilterManager(component.Component):
"""FilterManager
"""
def __init__(self, core):
component.Component.__init__(self, 'FilterManager')
log.debug('FilterManager init..')
self.core = core
self.torrents = core.torrentmanager
self.registered_filters = {}
self.register_filter('keyword', filter_keywords)
self.register_filter('name', filter_by_name)
self.tree_fields = {}
self.register_tree_field('state', self._init_state_tree)
def _init_tracker_tree():
return {'Error': 0}
self.register_tree_field('tracker_host', _init_tracker_tree)
self.register_filter('tracker_host', tracker_error_filter)
def _init_users_tree():
return {'': 0}
self.register_tree_field('owner', _init_users_tree)
def filter_torrent_ids(self, filter_dict):
"""
returns a list of torrent_id's matching filter_dict.
core filter method
"""
if not filter_dict:
return self.torrents.get_torrent_list()
# Sanitize input: filter-value must be a list of strings
for key, value in filter_dict.items():
if isinstance(value, str if not PY2 else basestring):
filter_dict[key] = [value]
# Optimized filter for id
if 'id' in filter_dict:
torrent_ids = list(filter_dict['id'])
del filter_dict['id']
else:
torrent_ids = self.torrents.get_torrent_list()
# Return if there's nothing more to filter
if not filter_dict:
return torrent_ids
# Special purpose, state=Active.
if 'state' in filter_dict:
# We need to make sure this is a list for the logic below
filter_dict['state'] = list(filter_dict['state'])
if 'state' in filter_dict and 'Active' in filter_dict['state']:
filter_dict['state'].remove('Active')
if not filter_dict['state']:
del filter_dict['state']
torrent_ids = self.filter_state_active(torrent_ids)
if not filter_dict:
return torrent_ids
# Registered filters
for field, values in filter_dict.items():
if field in self.registered_filters:
# Filters out doubles
torrent_ids = list(set(self.registered_filters[field](torrent_ids, values)))
del filter_dict[field]
if not filter_dict:
return torrent_ids
torrent_keys, plugin_keys = self.torrents.separate_keys(list(filter_dict), torrent_ids)
# Leftover filter arguments, default filter on status fields.
for torrent_id in list(torrent_ids):
status = self.core.create_torrent_status(torrent_id, torrent_keys, plugin_keys)
for field, values in filter_dict.items():
if field in status and status[field] in values:
continue
elif torrent_id in torrent_ids:
torrent_ids.remove(torrent_id)
return torrent_ids
def get_filter_tree(self, show_zero_hits=True, hide_cat=None):
"""
returns {field: [(value,count)] }
for use in sidebar.
"""
torrent_ids = self.torrents.get_torrent_list()
tree_keys = list(self.tree_fields)
if hide_cat:
for cat in hide_cat:
tree_keys.remove(cat)
torrent_keys, plugin_keys = self.torrents.separate_keys(tree_keys, torrent_ids)
items = dict((field, self.tree_fields[field]()) for field in tree_keys)
for torrent_id in list(torrent_ids):
status = self.core.create_torrent_status(torrent_id, torrent_keys, plugin_keys) # status={key:value}
for field in tree_keys:
value = status[field]
items[field][value] = items[field].get(value, 0) + 1
if 'tracker_host' in items:
items['tracker_host']['All'] = len(torrent_ids)
items['tracker_host']['Error'] = len(tracker_error_filter(torrent_ids, ('Error',)))
if not show_zero_hits:
for cat in ['state', 'owner', 'tracker_host']:
if cat in tree_keys:
self._hide_state_items(items[cat])
# Return a dict of tuples:
sorted_items = {field: sorted(items[field].items()) for field in tree_keys}
if 'state' in tree_keys:
sorted_items['state'].sort(self._sort_state_items)
return sorted_items
def _init_state_tree(self):
init_state = {}
init_state['All'] = len(self.torrents.get_torrent_list())
for state in TORRENT_STATE:
init_state[state] = 0
init_state['Active'] = len(self.filter_state_active(self.torrents.get_torrent_list()))
return init_state
def register_filter(self, filter_id, filter_func, filter_value=None):
self.registered_filters[filter_id] = filter_func
def deregister_filter(self, filter_id):
del self.registered_filters[filter_id]
def register_tree_field(self, field, init_func=lambda: {}):
self.tree_fields[field] = init_func
def deregister_tree_field(self, field):
if field in self.tree_fields:
del self.tree_fields[field]
def filter_state_active(self, torrent_ids):
for torrent_id in list(torrent_ids):
status = self.torrents[torrent_id].get_status(['download_payload_rate', 'upload_payload_rate'])
if status['download_payload_rate'] or status['upload_payload_rate']:
pass
else:
torrent_ids.remove(torrent_id)
return torrent_ids
def _hide_state_items(self, state_items):
"""For hide(show)-zero hits"""
for (value, count) in state_items.items():
if value != 'All' and count == 0:
del state_items[value]
def _sort_state_items(self, x, y):
if x[0] in STATE_SORT:
ix = STATE_SORT.index(x[0])
else:
ix = 99
if y[0] in STATE_SORT:
iy = STATE_SORT.index(y[0])
else:
iy = 99
return ix - iy
| gpl-3.0 |
mapennell/ansible | lib/ansible/plugins/action/debug.py | 14 | 1807 | # Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
''' Print statements during execution '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if 'msg' in self._task.args:
if 'fail' in self._task.args and boolean(self._task.args['fail']):
result = dict(failed=True, msg=self._task.args['msg'])
else:
result = dict(msg=self._task.args['msg'])
# FIXME: move the LOOKUP_REGEX somewhere else
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
results = self._templar.template(self._task.args['var'], convert_bare=True)
result = dict()
result[self._task.args['var']] = results
else:
result = dict(msg='here we are')
# force flag to make debug output module always verbose
result['verbose_always'] = True
return result
| gpl-3.0 |
laszlocsomor/tensorflow | tensorflow/contrib/lite/testing/generate_examples.py | 3 | 43693 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory> zipped
bazel run //tensorflow/contrib/lite/testing:generate_examples
third_party/tensorflow/contrib/lite/testing/generated_examples zipped
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import os
import re
import sys
import tempfile
import traceback
import zipfile
import numpy as np
from six import StringIO
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.contrib.lite.testing import generate_examples_report as report_lib
from tensorflow.python.framework import graph_util as tf_graph_util
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
# TODO(ahentz): remove this flag
parser.add_argument("type", help="zipped")
parser.add_argument("--zip_to_output",
type=str,
help="Particular zip to output.",
required=False)
parser.add_argument("--toco",
type=str,
help="Path to toco tool.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a toco error."))
parser.add_argument(
"--ignore_toco_errors",
action="store_true",
help="Raise an exception if any toco error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
r"relu.*input_shape=\[\]": "67587484",
r"sigmoid.*input_shape=\[\]": "67645668",
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Transposition in MatMul is not supported.
r"fully_connected.*transpose_.=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
r"softmax.*input_shape=\[1,3,4,3\]": "67749831",
# SpaceToDepth only supports float32.
r"space_to_depth.*(float16|int32|uint8|int64)": "68018134",
}
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
drop_control_dependency):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
drop_control_dependency: whether to ignore control dependency nodes.
Returns:
the options in a string.
"""
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --input_shapes=%s" % shape_str +
" --output_arrays=%s" % ",".join(output_arrays))
if drop_control_dependency:
s += " --drop_control_dependency"
return s
def write_toco_options(filename,
data_types,
input_arrays,
output_arrays,
shapes,
drop_control_dependency=False):
"""Create TOCO options to process a model.
Args:
filename: Filename to write the options to.
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: names of the output tensors
shapes: shapes of the input tensors
drop_control_dependency: whether to ignore control dependency nodes.
"""
with open(filename, "w") as fp:
fp.write(
toco_options(
data_types=data_types,
input_arrays=input_arrays,
output_arrays=output_arrays,
shapes=shapes,
drop_control_dependency=drop_control_dependency))
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(x.flatten())]
fp.write("values," + ",".join(values) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
values = ["{:.9f}".format(value) for value in list(t.flatten())]
fp.write(" input: \"" + ",".join(values) + "\"\n")
for t in example["outputs"]:
values = ["{:.9f}".format(value) for value in list(t.flatten())]
fp.write(" output: \"" + ",".join(values) + "\"\n")
fp.write("}\n")
_TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int64: (np.int64, "INT64"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64):
value = np.random.random_integers(min_value, max_value, shape)
return value.astype(dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
def make_control_dep_tests(zip_path):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs,
drop_control_dependency=True)
def toco_convert(graph_def_str, input_tensors, output_tensors,
drop_control_dependency=False):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
graph_def_str: Graph def proto in serialized string format.
input_tensors: List of input tensor tuples `(name, shape, type)`
output_tensors: List of output tensors (names)
drop_control_dependency: whether to ignore control dependency nodes.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
opts = toco_options(
data_types=data_types,
input_arrays=[x[0] for x in input_tensors],
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
drop_control_dependency=drop_control_dependency)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(bin_path, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def make_zip_of_tests(zip_path,
test_parameters,
make_graph,
make_test_inputs,
drop_control_dependency=False):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
zip_path: Path of zip file to write
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
drop_control_dependency: whether to ignore control dependency nodes.
Raises:
RuntimeError: if there are toco errors that can't be ignored.
"""
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
param_dict = dict(zip(keys, curr))
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
tflite_model_binary, toco_log = toco_convert(
sess.graph_def.SerializeToString(),
[(input_tensor.name.split(":")[0], input_tensor.get_shape(),
input_tensor.dtype) for input_tensor in inputs],
[out.name.split(":")[0]
for out in outputs], drop_control_dependency)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if FLAGS.save_graphdefs:
archive.writestr(label + ".pb",
text_format.MessageToString(sess.graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not FLAGS.known_bugs_are_errors:
for pattern, bug_number in KNOWN_BUGS.items():
if re.search(pattern, label):
print("Ignored TOCO error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\ntoco error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
if not FLAGS.ignore_toco_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(zip_path):
"""Actual function that generates examples.
Args:
zip_path: path to write zip to.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
return f
def make_relu_tests(zip_path):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_relu1_tests(zip_path):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_relu6_tests(zip_path):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
def make_constant_tests(zip_path):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
}]
def build_graph(parameters):
# Since Toco & Tflite can't have a single constant op in the entire graph,
# this test adds a zero tesnor with a constant op tensor.
input1 = tf.placeholder(dtype=parameters["dtype"], name="input1",
shape=parameters["input_shape"])
out = tf.ones(parameters["input_shape"], dtype=parameters["dtype"]) + input1
return [input1], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = np.zeros(parameters["input_shape"],
dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
return [input1], sess.run(outputs, feed_dict={inputs[0]: input1})
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_add_tests(zip_path):
"""Make a set of tests to do add with and without broadcast."""
# These parameters are split because we don't support broadcasting.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
}, {
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
}, {
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
}]
def build_graph(parameters):
input1 = tf.placeholder(dtype=parameters["dtype"], name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(dtype=parameters["dtype"], name="input2",
shape=parameters["input_shape_2"])
out = tf.add(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_mul_tests(zip_path):
"""Make a set of tests to do mul with and without broadcast."""
# These parameters are split because we don't support broadcasting.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
}, {
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
}, {
"dtype": [tf.float32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
}]
def build_graph(parameters):
input1 = tf.placeholder(dtype=parameters["dtype"], name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(dtype=parameters["dtype"], name="input2",
shape=parameters["input_shape_2"])
out = tf.multiply(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={inputs[0]: input1,
inputs[1]: input2})
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_global_batch_norm_tests(zip_path):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_fused_batch_norm_tests(zip_path):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_conv_tests(zip_path):
"""Make a set of tests to do convolution."""
test_parameters = [{
"input_shape": [[1, 3, 4, 3]],
"filter_shape": [[1, 1, 3, 2]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}, {
"input_shape": [[2, 14, 14, 2]],
"filter_shape": [[6, 6, 2, 2]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_values = create_tensor_data(np.float32, parameters["filter_shape"])
out = tf.nn.conv2d(input_tensor, filter_values,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(np.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_depthwiseconv_tests(zip_path):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
}, {
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
}]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]]
filter_values = create_tensor_data(np.float32, filter_shape)
out = tf.nn.depthwise_conv2d(
input_tensor, filter_values,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(np.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_concatenation_tests(zip_path):
"""Make a set of tests to do concatenatinon."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=tf.float32, name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32,
get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_fully_connected_tests(zip_path):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
}]
def build_graph(parameters):
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return [input_tensor1], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values1 = create_tensor_data(np.float32, shape=parameters["shape1"])
return [input_values1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values1])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_l2norm_tests(zip_path):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_local_response_norm_tests(zip_path):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 4, 5],
"bias": [None, 0.1, 0.3, -0.1],
"alpha": [None, 1, 2, -3],
"beta": [None, 0.5, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_reshape_tests(zip_path):
"""Make a set of tests to do reshape."""
# Alll shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.reshape(input_tensor, shape=parameters["output_shape"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_resize_bilinear_tests(zip_path):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_sigmoid_tests(zip_path):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_softmax_tests(zip_path):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_space_to_depth_tests(zip_path):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.float16, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
# Toco binary path provided by the generate rule.
bin_path = None
def main(unused_args):
global bin_path
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
if FLAGS.type == "zipped":
opstest_path = os.path.join(FLAGS.output_path)
mkdir_if_not_exist(opstest_path)
def _path(filename):
return os.path.join(opstest_path, filename)
dispatch = {
"control_dep.zip": make_control_dep_tests,
"add.zip": make_add_tests,
"conv.zip": make_conv_tests,
"constant.zip": make_constant_tests,
"depthwiseconv.zip": make_depthwiseconv_tests,
"concat.zip": make_concatenation_tests,
"fully_connected.zip": make_fully_connected_tests,
"global_batch_norm.zip": make_global_batch_norm_tests,
"fused_batch_norm.zip": make_fused_batch_norm_tests,
"l2norm.zip": make_l2norm_tests,
"local_response_norm.zip": make_local_response_norm_tests,
"mul.zip": make_mul_tests,
"relu.zip": make_relu_tests,
"relu1.zip": make_relu1_tests,
"relu6.zip": make_relu6_tests,
"l2_pool.zip": make_pool_tests(make_l2_pool),
"avg_pool.zip": make_pool_tests(tf.nn.avg_pool),
"max_pool.zip": make_pool_tests(tf.nn.max_pool),
"reshape.zip": make_reshape_tests,
"resize_bilinear.zip": make_resize_bilinear_tests,
"sigmoid.zip": make_sigmoid_tests,
"softmax.zip": make_softmax_tests,
"space_to_depth.zip": make_space_to_depth_tests,
}
out = FLAGS.zip_to_output
bin_path = FLAGS.toco
if out in dispatch:
dispatch[out](_path(out))
else:
raise RuntimeError("Invalid zip to output %r" % out)
else:
raise RuntimeError("Invalid argument for type of generation.")
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> zipped <zip file to generate>")
else:
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
raphaottoni/pinterest-crawler | sumarizeActivity.py | 1 | 1121 | #!/usr/bin/python
import os
import re
import csv
import logging
#loggin setup
logging.basicConfig(filename="timeline.log", filemode="a", level=logging.INFO, format="[ %(asctime)s ] %(levelname)s : %(message)s")
saida = open("timelineUsersPinterest.txt","w")
for arq in os.listdir("./profiles"):
try:
logging.info("["+arq+"]: iniciando" )
for board in os.listdir("./profiles/"+arq+"/boards/"):
try:
File= open("./profiles/"+arq+"/boards/"+board+ "/attributes", "r")
#csvFile = csv.reader(File,delimiter=';')
#csvFile.next()
File.next()
for row in File:
total = row.count(";")
if (total > 4):
row = row.replace(";","",(total - 4))
category = row.split(";")[1]
#category = row[1]
photos = open('./profiles/'+arq+'/boards/'+board+'/timeline',"r")
csvPhotos = csv.reader(photos,delimiter=";")
for photo in csvPhotos:
saida.write(arq+";"+category+";"+photo[0]+";"+photo[1]+"\n")
except :
logging.error("["+ arq+"]: nao tem o seguinte board attribute: "+board)
except :
logging.error("["+ arq+"]: nao existe mais")
| mit |
Hutspace/odekro | mzalendo/core/management/commands/core_output_mp_scorecard_csv.py | 1 | 2405 | from optparse import make_option
from pprint import pprint
import csv
import StringIO
import re
from django.core.management.base import BaseCommand
from django.conf import settings
from core import models
from scorecards.models import Entry
class Command(BaseCommand):
help = 'Output CSV of all MPs and their scorecard ratings'
# option_list = BaseCommand.option_list + (
# make_option(
# '--delete',
# action='store_true',
# dest='delete',
# default=False,
# help='Delete found duplicates'),
# )
def handle(self, **options):
"""Create a CSV line for each MP"""
# gather all the data before creating the CSV
scorecard_field_names_set = set()
politician_data = []
mps = models.Person.objects.all().is_politician()
for mp in mps:
data = {}
data['Name'] = mp.name.encode('utf-8')
try:
data['Constituency'] = mp.constituencies()[0].name.encode('utf-8')
except IndexError:
data['Constituency'] = u'N/A' # some mps don't have constituencies
# we want all scorecards - the person model has an overide on the 'scorecards'
# method that mixes in the constituency ones
for scorecard_entry in mp.scorecards():
if scorecard_entry.disabled: continue # don't want these - their rating is bogus
category_name = scorecard_entry.category.name
rating = scorecard_entry.score_as_word()
scorecard_field_names_set.add(category_name)
data[ category_name ] = rating
politician_data.append(data)
csv_output = StringIO.StringIO()
csv_fieldnames = [ 'Name', 'Constituency' ] + sorted( list(scorecard_field_names_set) )
writer = csv.DictWriter( csv_output, csv_fieldnames )
# Needs Python 2.7
# writer.writeheader()
# Silly dance for Python 2.6.6's csv.DictWriter which bizarrely does not have writeheader
fieldname_dict = {}
for key in csv_fieldnames:
fieldname_dict[key] = key
writer.writerow( fieldname_dict )
for data in politician_data:
writer.writerow( data )
print csv_output.getvalue()
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/240_test_logging.py | 8 | 16322 | #!/usr/bin/env python
#
# Copyright 2001-2004 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import select
import os, sys, string, struct, types, cPickle, cStringIO
import socket, threading, time
import logging, logging.handlers, logging.config
BANNER = "-- %-10s %-6s ---------------------------------------------------\n"
FINISH_UP = "Finish up, it's closing time. Messages should bear numbers 0 through 24."
#----------------------------------------------------------------------------
# Log receiver
#----------------------------------------------------------------------------
TIMEOUT = 10
from SocketServer import ThreadingTCPServer, StreamRequestHandler
class LogRecordStreamHandler(StreamRequestHandler):
"""
Handler for a streaming logging request. It basically logs the record
using whatever logging policy is configured locally.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while 1:
try:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
except:
raise
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
logname = "logrecv.tcp." + record.name
#If the end-of-messages sentinel is seen, tell the server to terminate
if record.msg == FINISH_UP:
self.server.abort = 1
record.msg = record.msg + " (via " + logname + ")"
logger = logging.getLogger(logname)
logger.handle(record)
# The server sets socketDataProcessed when it's done.
socketDataProcessed = threading.Event()
class LogRecordSocketReceiver(ThreadingTCPServer):
"""
A simple-minded TCP socket-based logging receiver suitable for test
purposes.
"""
allow_reuse_address = 1
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
def serve_until_stopped(self):
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
abort = self.abort
#notify the main thread that we're about to exit
socketDataProcessed.set()
def process_request(self, request, client_address):
#import threading
t = threading.Thread(target = self.finish_request,
args = (request, client_address))
t.start()
def runTCP(tcpserver):
tcpserver.serve_until_stopped()
#----------------------------------------------------------------------------
# Test 0
#----------------------------------------------------------------------------
msgcount = 0
def nextmessage():
global msgcount
rv = "Message %d" % msgcount
msgcount = msgcount + 1
return rv
def test0():
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
#These should log
ERR.log(logging.FATAL, nextmessage())
ERR.error(nextmessage())
INF.log(logging.FATAL, nextmessage())
INF.error(nextmessage())
INF.warn(nextmessage())
INF.info(nextmessage())
INF_UNDEF.log(logging.FATAL, nextmessage())
INF_UNDEF.error(nextmessage())
INF_UNDEF.warn (nextmessage())
INF_UNDEF.info (nextmessage())
INF_ERR.log(logging.FATAL, nextmessage())
INF_ERR.error(nextmessage())
INF_ERR_UNDEF.log(logging.FATAL, nextmessage())
INF_ERR_UNDEF.error(nextmessage())
DEB.log(logging.FATAL, nextmessage())
DEB.error(nextmessage())
DEB.warn (nextmessage())
DEB.info (nextmessage())
DEB.debug(nextmessage())
UNDEF.log(logging.FATAL, nextmessage())
UNDEF.error(nextmessage())
UNDEF.warn (nextmessage())
UNDEF.info (nextmessage())
GRANDCHILD.log(logging.FATAL, nextmessage())
CHILD.log(logging.FATAL, nextmessage())
#These should not log
ERR.warn(nextmessage())
ERR.info(nextmessage())
ERR.debug(nextmessage())
INF.debug(nextmessage())
INF_UNDEF.debug(nextmessage())
INF_ERR.warn(nextmessage())
INF_ERR.info(nextmessage())
INF_ERR.debug(nextmessage())
INF_ERR_UNDEF.warn(nextmessage())
INF_ERR_UNDEF.info(nextmessage())
INF_ERR_UNDEF.debug(nextmessage())
INF.info(FINISH_UP)
#----------------------------------------------------------------------------
# Test 1
#----------------------------------------------------------------------------
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 10
TACITURN = 9
TERSE = 8
EFFUSIVE = 7
SOCIABLE = 6
VERBOSE = 5
TALKATIVE = 4
GARRULOUS = 3
CHATTERBOX = 2
BORING = 1
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
#
# Now, to demonstrate filtering: suppose for some perverse reason we only
# want to print out all except GARRULOUS messages. Let's create a filter for
# this purpose...
#
class SpecificLevelFilter(logging.Filter):
def __init__(self, lvl):
self.level = lvl
def filter(self, record):
return self.level != record.levelno
class GarrulousFilter(SpecificLevelFilter):
def __init__(self):
SpecificLevelFilter.__init__(self, GARRULOUS)
#
# Now, let's demonstrate filtering at the logger. This time, use a filter
# which excludes SOCIABLE and TACITURN messages. Note that GARRULOUS events
# are still excluded.
#
class VerySpecificFilter(logging.Filter):
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
def message(s):
sys.stdout.write("%s\n" % s)
SHOULD1 = "This should only be seen at the '%s' logging level (or lower)"
def test1():
#
# Now, tell the logging system to associate names with our levels.
#
for lvl in my_logging_levels.keys():
logging.addLevelName(lvl, my_logging_levels[lvl])
#
# Now, define a test function which logs an event at each of our levels.
#
def doLog(log):
for lvl in LEVEL_RANGE:
log.log(lvl, SHOULD1, logging.getLevelName(lvl))
log = logging.getLogger("")
hdlr = log.handlers[0]
#
# Set the logging level to each different value and call the utility
# function to log events.
# In the output, you should see that each time round the loop, the number of
# logging events which are actually output decreases.
#
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
#
# Now, we demonstrate level filtering at the handler level. Tell the
# handler defined above to filter at level 'SOCIABLE', and repeat the
# above loop. Compare the output from the two runs.
#
hdlr.setLevel(SOCIABLE)
message("-- Filtering at handler level to SOCIABLE --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
hdlr.setLevel(0) #turn off level filtering at the handler
garr = GarrulousFilter()
hdlr.addFilter(garr)
message("-- Filtering using GARRULOUS filter --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
spec = VerySpecificFilter()
log.addFilter(spec)
message("-- Filtering using specific filter for SOCIABLE, TACITURN --")
for lvl in LEVEL_RANGE:
message("-- setting logging level to '%s' -----" %
logging.getLevelName(lvl))
log.setLevel(lvl)
doLog(log)
log.removeFilter(spec)
hdlr.removeFilter(garr)
#Undo the one level which clashes...for regression tests
logging.addLevelName(logging.DEBUG, "DEBUG")
#----------------------------------------------------------------------------
# Test 2
#----------------------------------------------------------------------------
MSG = "-- logging %d at INFO, messages should be seen every 10 events --"
def test2():
logger = logging.getLogger("")
sh = logger.handlers[0]
sh.close()
logger.removeHandler(sh)
mh = logging.handlers.MemoryHandler(10,logging.WARNING, sh)
logger.setLevel(logging.DEBUG)
logger.addHandler(mh)
message("-- logging at DEBUG, nothing should be seen yet --")
logger.debug("Debug message")
message("-- logging at INFO, nothing should be seen yet --")
logger.info("Info message")
message("-- logging at WARNING, 3 messages should be seen --")
logger.warn("Warn message")
for i in xrange(102):
message(MSG % i)
logger.info("Info index = %d", i)
mh.close()
logger.removeHandler(mh)
logger.addHandler(sh)
#----------------------------------------------------------------------------
# Test 3
#----------------------------------------------------------------------------
FILTER = "a.b"
def doLog3():
logging.getLogger("a").info("Info 1")
logging.getLogger("a.b").info("Info 2")
logging.getLogger("a.c").info("Info 3")
logging.getLogger("a.b.c").info("Info 4")
logging.getLogger("a.b.c.d").info("Info 5")
logging.getLogger("a.bb.c").info("Info 6")
logging.getLogger("b").info("Info 7")
logging.getLogger("b.a").info("Info 8")
logging.getLogger("c.a.b").info("Info 9")
logging.getLogger("a.bb").info("Info 10")
def test3():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
hand = root.handlers[0]
message("Unfiltered...")
doLog3()
message("Filtered with '%s'..." % FILTER)
filt = logging.Filter(FILTER)
hand.addFilter(filt)
doLog3()
hand.removeFilter(filt)
#----------------------------------------------------------------------------
# Test Harness
#----------------------------------------------------------------------------
def banner(nm, typ):
sep = BANNER % (nm, typ)
sys.stdout.write(sep)
sys.stdout.flush()
def test_main_inner():
rootLogger = logging.getLogger("")
rootLogger.setLevel(logging.DEBUG)
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter(logging.BASIC_FORMAT)
hdlr.setFormatter(fmt)
rootLogger.addHandler(hdlr)
#Set up a handler such that all events are sent via a socket to the log
#receiver (logrecv).
#The handler will only be added to the rootLogger for some of the tests
shdlr = logging.handlers.SocketHandler('localhost',
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
#Configure the logger for logrecv so events do not propagate beyond it.
#The sockLogger output is buffered in memory until the end of the test,
#and printed at the end.
sockOut = cStringIO.StringIO()
sockLogger = logging.getLogger("logrecv")
sockLogger.setLevel(logging.DEBUG)
sockhdlr = logging.StreamHandler(sockOut)
sockhdlr.setFormatter(logging.Formatter(
"%(name)s -> %(levelname)s: %(message)s"))
sockLogger.addHandler(sockhdlr)
sockLogger.propagate = 0
#Set up servers
threads = []
tcpserver = LogRecordSocketReceiver()
#sys.stdout.write("About to start TCP server...\n")
threads.append(threading.Thread(target=runTCP, args=(tcpserver,)))
for thread in threads:
thread.start()
try:
banner("log_test0", "begin")
rootLogger.addHandler(shdlr)
test0()
shdlr.close()
rootLogger.removeHandler(shdlr)
banner("log_test0", "end")
banner("log_test1", "begin")
test1()
banner("log_test1", "end")
banner("log_test2", "begin")
test2()
banner("log_test2", "end")
banner("log_test3", "begin")
test3()
banner("log_test3", "end")
finally:
#wait for TCP receiver to terminate
socketDataProcessed.wait()
for thread in threads:
thread.join()
banner("logrecv output", "begin")
sys.stdout.write(sockOut.getvalue())
sockOut.close()
sockLogger.removeHandler(sockhdlr)
sockhdlr.close()
banner("logrecv output", "end")
sys.stdout.flush()
try:
hdlr.close()
except:
pass
rootLogger.removeHandler(hdlr)
def test_main():
import locale
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first so we can restore it at the end.
try:
original_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except (ValueError, locale.Error):
# this happens on a Solaris box which only supports "C" locale
# or a Mac OS X box which supports very little locale stuff at all
original_locale = None
try:
test_main_inner()
finally:
if original_locale is not None:
locale.setlocale(locale.LC_ALL, original_locale)
if __name__ == "__main__":
sys.stdout.write("test_logging\n")
test_main()
| gpl-3.0 |
ueshin/apache-spark | python/pyspark/sql/streaming.py | 15 | 49462 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
from py4j.java_gateway import java_import
from pyspark import since, keyword_only
from pyspark.sql.column import _to_seq
from pyspark.sql.readwriter import OptionUtils, to_str
from pyspark.sql.types import StructType, StructField, StringType
from pyspark.sql.utils import ForeachBatchFunction, StreamingQueryException
__all__ = ["StreamingQuery", "StreamingQueryManager", "DataStreamReader", "DataStreamWriter"]
class StreamingQuery(object):
"""
A handle to a query that is executing continuously in the background as new data arrives.
All these methods are thread-safe.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
def __init__(self, jsq):
self._jsq = jsq
@property
@since(2.0)
def id(self):
"""Returns the unique id of this query that persists across restarts from checkpoint data.
That is, this id is generated when a query is started for the first time, and
will be the same every time it is restarted from checkpoint data.
There can only be one query with the same id active in a Spark cluster.
Also see, `runId`.
"""
return self._jsq.id().toString()
@property
@since(2.1)
def runId(self):
"""Returns the unique id of this query that does not persist across restarts. That is, every
query that is started (or restarted from checkpoint) will have a different runId.
"""
return self._jsq.runId().toString()
@property
@since(2.0)
def name(self):
"""Returns the user-specified name of the query, or null if not specified.
This name can be specified in the `org.apache.spark.sql.streaming.DataStreamWriter`
as `dataframe.writeStream.queryName("query").start()`.
This name, if set, must be unique across all active queries.
"""
return self._jsq.name()
@property
@since(2.0)
def isActive(self):
"""Whether this streaming query is currently active or not.
"""
return self._jsq.isActive()
@since(2.0)
def awaitTermination(self, timeout=None):
"""Waits for the termination of `this` query, either by :func:`query.stop()` or by an
exception. If the query has terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If the query has terminated, then all subsequent calls to this method will either return
immediately (if the query was terminated by :func:`stop()`), or throw the exception
immediately (if the query has terminated with exception).
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsq.awaitTermination(int(timeout * 1000))
else:
return self._jsq.awaitTermination()
@property
@since(2.1)
def status(self):
"""
Returns the current status of the query.
"""
return json.loads(self._jsq.status().json())
@property
@since(2.1)
def recentProgress(self):
"""Returns an array of the most recent [[StreamingQueryProgress]] updates for this query.
The number of progress updates retained for each stream is configured by Spark session
configuration `spark.sql.streaming.numRecentProgressUpdates`.
"""
return [json.loads(p.json()) for p in self._jsq.recentProgress()]
@property
def lastProgress(self):
"""
Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or
None if there were no progress updates
.. versionadded:: 2.1.0
Returns
-------
dict
"""
lastProgress = self._jsq.lastProgress()
if lastProgress:
return json.loads(lastProgress.json())
else:
return None
def processAllAvailable(self):
"""Blocks until all available data in the source has been processed and committed to the
sink. This method is intended for testing.
.. versionadded:: 2.0.0
Notes
-----
In the case of continually arriving data, this method may block forever.
Additionally, this method is only guaranteed to block until data that has been
synchronously appended data to a stream source prior to invocation.
(i.e. `getOffset` must immediately reflect the addition).
"""
return self._jsq.processAllAvailable()
@since(2.0)
def stop(self):
"""Stop this streaming query.
"""
self._jsq.stop()
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 2.1.0
Parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
Examples
--------
>>> sq = sdf.writeStream.format('memory').queryName('query_explain').start()
>>> sq.processAllAvailable() # Wait a bit to generate the runtime plans.
>>> sq.explain()
== Physical Plan ==
...
>>> sq.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> sq.stop()
"""
# Cannot call `_jsq.explain(...)` because it will print in the JVM process.
# We should print it in the Python process.
print(self._jsq.explainInternal(extended))
def exception(self):
"""
.. versionadded:: 2.1.0
Returns
-------
:class:`StreamingQueryException`
the StreamingQueryException if the query was terminated by an exception, or None.
"""
if self._jsq.exception().isDefined():
je = self._jsq.exception().get()
msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace()))
return StreamingQueryException(msg, stackTrace, je.getCause())
else:
return None
class StreamingQueryManager(object):
"""A class to manage all the :class:`StreamingQuery` StreamingQueries active.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
def __init__(self, jsqm):
self._jsqm = jsqm
@property
def active(self):
"""Returns a list of active queries associated with this SQLContext
.. versionadded:: 2.0.0
Examples
--------
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sqm = spark.streams
>>> # get the list of active streaming queries
>>> [q.name for q in sqm.active]
['this_query']
>>> sq.stop()
"""
return [StreamingQuery(jsq) for jsq in self._jsqm.active()]
def get(self, id):
"""Returns an active query from this SQLContext or throws exception if an active query
with this name doesn't exist.
.. versionadded:: 2.0.0
Examples
--------
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.name
'this_query'
>>> sq = spark.streams.get(sq.id)
>>> sq.isActive
True
>>> sq = sqlContext.streams.get(sq.id)
>>> sq.isActive
True
>>> sq.stop()
"""
return StreamingQuery(self._jsqm.get(id))
@since(2.0)
def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will
either return immediately (if the query was terminated by :func:`query.stop()`),
or throw the exception immediately (if the query was terminated with exception). Use
:func:`resetTerminated()` to clear past terminations and wait for new terminations.
In the case where multiple queries have terminated since :func:`resetTermination()`
was called, if any query has terminated with exception, then :func:`awaitAnyTermination()`
will throw any of the exception. For correctly documenting exceptions across multiple
queries, users need to stop all of them after any of them terminates with exception, and
then check the `query.exception()` for each query.
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsqm.awaitAnyTermination(int(timeout * 1000))
else:
return self._jsqm.awaitAnyTermination()
def resetTerminated(self):
"""Forget about past terminated queries so that :func:`awaitAnyTermination()` can be used
again to wait for new terminations.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.streams.resetTerminated()
"""
self._jsqm.resetTerminated()
class DataStreamReader(OptionUtils):
"""
Interface used to load a streaming :class:`DataFrame <pyspark.sql.DataFrame>` from external
storage systems (e.g. file systems, key-value stores, etc).
Use :attr:`SparkSession.readStream <pyspark.sql.SparkSession.readStream>` to access this.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.readStream()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
def format(self, source):
"""Specifies the input data source format.
.. versionadded:: 2.0.0
Parameters
----------
source : str
name of the data source, e.g. 'json', 'parquet'.
Notes
-----
This API is evolving.
Examples
--------
>>> s = spark.readStream.format("text")
"""
self._jreader = self._jreader.format(source)
return self
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
.. versionadded:: 2.0.0
Parameters
----------
schema : :class:`pyspark.sql.types.StructType` or str
a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
Notes
-----
This API is evolving.
Examples
--------
>>> s = spark.readStream.schema(sdf_schema)
>>> s = spark.readStream.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, str):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
def option(self, key, value):
"""Adds an input option for the underlying data source.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Examples
--------
>>> s = spark.readStream.option("x", 1)
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
def options(self, **options):
"""Adds input options for the underlying data source.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Examples
--------
>>> s = spark.readStream.options(x="1", y=2)
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
def load(self, path=None, format=None, schema=None, **options):
"""Loads a data stream from a data source and returns it as a
:class:`DataFrame <pyspark.sql.DataFrame>`.
.. versionadded:: 2.0.0
Parameters
----------
path : str, optional
optional string for file-system backed data sources.
format : str, optional
optional string for format of the data source. Default to 'parquet'.
schema : :class:`pyspark.sql.types.StructType` or str, optional
optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
**options : dict
all other string options
Notes
-----
This API is evolving.
Examples
--------
>>> json_sdf = spark.readStream.format("json") \\
... .schema(sdf_schema) \\
... .load(tempfile.mkdtemp())
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if path is not None:
if type(path) != str or len(path.strip()) == 0:
raise ValueError("If the path is provided for stream, it needs to be a " +
"non-empty string. List of paths are not supported.")
return self._df(self._jreader.load(path))
else:
return self._df(self._jreader.load())
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None,
dropFieldIfAllNull=None, encoding=None, pathGlobFilter=None,
recursiveFileLookup=None, allowNonNumericNumbers=None):
"""
Loads a JSON file stream and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
.. versionadded:: 2.0.0
Parameters
----------
path : str
string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
schema : :class:`pyspark.sql.types.StructType` or str, optional
an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
Other Parameters
----------------
Extra options
For the extra options, refer to
`Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
.. # noqa
Notes
-----
This API is evolving.
Examples
--------
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale,
dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding,
pathGlobFilter=pathGlobFilter, recursiveFileLookup=recursiveFileLookup,
allowNonNumericNumbers=allowNonNumericNumbers)
if isinstance(path, str):
return self._df(self._jreader.json(path))
else:
raise TypeError("path can be only a single string")
def orc(self, path, mergeSchema=None, pathGlobFilter=None, recursiveFileLookup=None):
"""Loads a ORC file stream, returning the result as a :class:`DataFrame`.
.. versionadded:: 2.3.0
Other Parameters
----------------
Extra options
For the extra options, refer to
`Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-orc.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp())
>>> orc_sdf.isStreaming
True
>>> orc_sdf.schema == sdf_schema
True
"""
self._set_opts(mergeSchema=mergeSchema, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup)
if isinstance(path, str):
return self._df(self._jreader.orc(path))
else:
raise TypeError("path can be only a single string")
def parquet(self, path, mergeSchema=None, pathGlobFilter=None, recursiveFileLookup=None,
datetimeRebaseMode=None, int96RebaseMode=None):
"""
Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
.. versionadded:: 2.0.0
Parameters
----------
path : str
the path in any Hadoop supported file system
Other Parameters
----------------
Extra options
For the extra options, refer to
`Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#data-source-option>`_.
in the version you use.
.. # noqa
Examples
--------
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
"""
self._set_opts(mergeSchema=mergeSchema, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup,
datetimeRebaseMode=datetimeRebaseMode, int96RebaseMode=int96RebaseMode)
if isinstance(path, str):
return self._df(self._jreader.parquet(path))
else:
raise TypeError("path can be only a single string")
def text(self, path, wholetext=False, lineSep=None, pathGlobFilter=None,
recursiveFileLookup=None):
"""
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
The text files must be encoded as UTF-8.
By default, each line in the text file is a new row in the resulting DataFrame.
.. versionadded:: 2.0.0
Parameters
----------
paths : str or list
string, or list of strings, for input path(s).
Other Parameters
----------------
Extra options
For the extra options, refer to
`Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-text.html#data-source-option>`_
in the version you use.
.. # noqa
Notes
-----
This API is evolving.
Examples
--------
>>> text_sdf = spark.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
>>> "value" in str(text_sdf.schema)
True
"""
self._set_opts(
wholetext=wholetext, lineSep=lineSep, pathGlobFilter=pathGlobFilter,
recursiveFileLookup=recursiveFileLookup)
if isinstance(path, str):
return self._df(self._jreader.text(path))
else:
raise TypeError("path can be only a single string")
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None,
enforceSchema=None, emptyValue=None, locale=None, lineSep=None,
pathGlobFilter=None, recursiveFileLookup=None, unescapedQuoteHandling=None):
r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
Parameters
----------
path : str or list
string, or list of strings, for input path(s).
schema : :class:`pyspark.sql.types.StructType` or str, optional
an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
.. versionadded:: 2.0.0
Other Parameters
----------------
Extra options
For the extra options, refer to
`Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Notes
-----
This API is evolving.
Examples
--------
>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema)
>>> csv_sdf.isStreaming
True
>>> csv_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema,
emptyValue=emptyValue, locale=locale, lineSep=lineSep,
pathGlobFilter=pathGlobFilter, recursiveFileLookup=recursiveFileLookup,
unescapedQuoteHandling=unescapedQuoteHandling)
if isinstance(path, str):
return self._df(self._jreader.csv(path))
else:
raise TypeError("path can be only a single string")
def table(self, tableName):
"""Define a Streaming DataFrame on a Table. The DataSource corresponding to the table should
support streaming mode.
.. versionadded:: 3.1.0
Parameters
----------
tableName : str
string, for the name of the table.
Returns
--------
:class:`DataFrame`
Notes
-----
This API is evolving.
Examples
--------
>>> spark.readStream.table('input_table') # doctest: +SKIP
"""
if isinstance(tableName, str):
return self._df(self._jreader.table(tableName))
else:
raise TypeError("tableName can be only a single string")
class DataStreamWriter(object):
"""
Interface used to write a streaming :class:`DataFrame <pyspark.sql.DataFrame>` to external
storage systems (e.g. file systems, key-value stores, etc).
Use :attr:`DataFrame.writeStream <pyspark.sql.DataFrame.writeStream>`
to access this.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.writeStream()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
def outputMode(self, outputMode):
"""Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
.. versionadded:: 2.0.0
Options include:
* `append`: Only the new rows in the streaming DataFrame/Dataset will be written to
the sink
* `complete`: All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these are some updates
* `update`: only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
Notes
-----
This API is evolving.
Examples
--------
>>> writer = sdf.writeStream.outputMode('append')
"""
if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0:
raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode)
self._jwrite = self._jwrite.outputMode(outputMode)
return self
def format(self, source):
"""Specifies the underlying output data source.
.. versionadded:: 2.0.0
Parameters
----------
source : str
string, name of the data source, which for now can be 'parquet'.
Notes
-----
This API is evolving.
Examples
--------
>>> writer = sdf.writeStream.format('json')
"""
self._jwrite = self._jwrite.format(source)
return self
def option(self, key, value):
"""Adds an output option for the underlying data source.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
def options(self, **options):
"""Adds output options for the underlying data source.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
.. versionadded:: 2.0.0
Parameters
----------
cols : str or list
name of columns
Notes
-----
This API is evolving.
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
def queryName(self, queryName):
"""Specifies the name of the :class:`StreamingQuery` that can be started with
:func:`start`. This name must be unique among all the currently active queries
in the associated SparkSession.
.. versionadded:: 2.0.0
Parameters
----------
queryName : str
unique name for the query
Notes
-----
This API is evolving.
Examples
--------
>>> writer = sdf.writeStream.queryName('streaming_query')
"""
if not queryName or type(queryName) != str or len(queryName.strip()) == 0:
raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName)
self._jwrite = self._jwrite.queryName(queryName)
return self
@keyword_only
def trigger(self, *, processingTime=None, once=None, continuous=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. versionadded:: 2.0.0
Parameters
----------
processingTime : str, optional
a processing time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a microbatch query periodically based on the
processing time. Only one trigger can be set.
once : bool, optional
if set to True, set a trigger that processes only one batch of data in a
streaming query then terminates the query. Only one trigger can be set.
continuous : str, optional
a time interval as a string, e.g. '5 seconds', '1 minute'.
Set a trigger that runs a continuous query with a given checkpoint
interval. Only one trigger can be set.
Notes
-----
This API is evolving.
Examples
--------
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(continuous='5 seconds')
"""
params = [processingTime, once, continuous]
if params.count(None) == 3:
raise ValueError('No trigger provided')
elif params.count(None) < 2:
raise ValueError('Multiple triggers not allowed.')
jTrigger = None
if processingTime is not None:
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
if type(continuous) != str or len(continuous.strip()) == 0:
raise ValueError('Value for continuous must be a non empty string. Got: %s' %
continuous)
interval = continuous.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous(
interval)
self._jwrite = self._jwrite.trigger(jTrigger)
return self
def foreach(self, f):
"""
Sets the output of the streaming query to be processed using the provided writer ``f``.
This is often used to write the output of a streaming query to arbitrary storage systems.
The processing logic can be specified in two ways.
#. A **function** that takes a row as input.
This is a simple way to express your processing logic. Note that this does
not allow you to deduplicate generated data when failures cause reprocessing of
some input data. That would require you to specify the processing logic in the next
way.
#. An **object** with a ``process`` method and optional ``open`` and ``close`` methods.
The object can have the following methods.
* ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing
(for example, open a connection, start a transaction, etc). Additionally, you can
use the `partition_id` and `epoch_id` to deduplicate regenerated data
(discussed later).
* ``process(row)``: *Non-optional* method that processes each :class:`Row`.
* ``close(error)``: *Optional* method that finalizes and cleans up (for example,
close connection, commit transaction, etc.) after all rows have been processed.
The object will be used by Spark in the following way.
* A single copy of this object is responsible of all the data generated by a
single task in a query. In other words, one instance is responsible for
processing one partition of the data generated in a distributed manner.
* This object must be serializable because each task will get a fresh
serialized-deserialized copy of the provided object. Hence, it is strongly
recommended that any initialization for writing data (e.g. opening a
connection or starting a transaction) is done after the `open(...)`
method has been called, which signifies that the task is ready to generate data.
* The lifecycle of the methods are as follows.
For each partition with ``partition_id``:
... For each batch/epoch of streaming data with ``epoch_id``:
....... Method ``open(partitionId, epochId)`` is called.
....... If ``open(...)`` returns true, for each row in the partition and
batch/epoch, method ``process(row)`` is called.
....... Method ``close(errorOrNull)`` is called with error (if any) seen while
processing rows.
Important points to note:
* The `partitionId` and `epochId` can be used to deduplicate generated data when
failures cause reprocessing of some input data. This depends on the execution
mode of the query. If the streaming query is being executed in the micro-batch
mode, then every partition represented by a unique tuple (partition_id, epoch_id)
is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used
to deduplicate and/or transactionally commit data and achieve exactly-once
guarantees. However, if the streaming query is being executed in the continuous
mode, then this guarantee does not hold and therefore should not be used for
deduplication.
* The ``close()`` method (if exists) will be called if `open()` method exists and
returns successfully (irrespective of the return value), except if the Python
crashes in the middle.
.. versionadded:: 2.4.0
Notes
-----
This API is evolving.
Examples
--------
>>> # Print every row using a function
>>> def print_row(row):
... print(row)
...
>>> writer = sdf.writeStream.foreach(print_row)
>>> # Print every row using a object with process() method
>>> class RowPrinter:
... def open(self, partition_id, epoch_id):
... print("Opened %d, %d" % (partition_id, epoch_id))
... return True
... def process(self, row):
... print(row)
... def close(self, error):
... print("Closed with error: %s" % str(error))
...
>>> writer = sdf.writeStream.foreach(RowPrinter())
"""
from pyspark.rdd import _wrap_function
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.taskcontext import TaskContext
if callable(f):
# The provided object is a callable function that is supposed to be called on each row.
# Construct a function that takes an iterator and calls the provided function on each
# row.
def func_without_process(_, iterator):
for x in iterator:
f(x)
return iter([])
func = func_without_process
else:
# The provided object is not a callable function. Then it is expected to have a
# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and
# 'close(error)' methods.
if not hasattr(f, 'process'):
raise AttributeError("Provided object does not have a 'process' method")
if not callable(getattr(f, 'process')):
raise TypeError("Attribute 'process' in provided object is not callable")
def doesMethodExist(method_name):
exists = hasattr(f, method_name)
if exists and not callable(getattr(f, method_name)):
raise TypeError(
"Attribute '%s' in provided object is not callable" % method_name)
return exists
open_exists = doesMethodExist('open')
close_exists = doesMethodExist('close')
def func_with_open_process_close(partition_id, iterator):
epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId')
if epoch_id:
epoch_id = int(epoch_id)
else:
raise RuntimeError("Could not get batch id from TaskContext")
# Check if the data should be processed
should_process = True
if open_exists:
should_process = f.open(partition_id, epoch_id)
error = None
try:
if should_process:
for x in iterator:
f.process(x)
except Exception as ex:
error = ex
finally:
if close_exists:
f.close(error)
if error:
raise error
return iter([])
func = func_with_open_process_close
serializer = AutoBatchedSerializer(PickleSerializer())
wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer)
jForeachWriter = \
self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter(
wrapped_func, self._df._jdf.schema())
self._jwrite.foreach(jForeachWriter)
return self
def foreachBatch(self, func):
"""
Sets the output of the streaming query to be processed using the provided
function. This is supported only the in the micro-batch execution modes (that is, when the
trigger is not continuous). In every micro-batch, the provided function will be called in
every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier.
The batchId can be used deduplicate and transactionally write the output
(that is, the provided Dataset) to external systems. The output DataFrame is guaranteed
to exactly same for the same batchId (assuming all operations are deterministic in the
query).
.. versionadded:: 2.4.0
Notes
-----
This API is evolving.
Examples
--------
>>> def func(batch_df, batch_id):
... batch_df.collect()
...
>>> writer = sdf.writeStream.foreachBatch(func)
"""
from pyspark.java_gateway import ensure_callback_server_started
gw = self._spark._sc._gateway
java_import(gw.jvm, "org.apache.spark.sql.execution.streaming.sources.*")
wrapped_func = ForeachBatchFunction(self._spark, func)
gw.jvm.PythonForeachBatchHelper.callForeachBatch(self._jwrite, wrapped_func)
ensure_callback_server_started(gw)
return self
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
.. versionadded:: 2.0.0
Parameters
----------
path : str, optional
the path in a Hadoop supported file system
format : str, optional
the format used to save
outputMode : str, optional
specifies how data of a streaming DataFrame/Dataset is written to a
streaming sink.
* `append`: Only the new rows in the streaming DataFrame/Dataset will be written to the
sink
* `complete`: All the rows in the streaming DataFrame/Dataset will be written to the
sink every time these are some updates
* `update`: only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
partitionBy : str or list, optional
names of partitioning columns
queryName : str, optional
unique name for the query
**options : dict
All other string options. You may want to provide a `checkpointLocation`
for most streams, however it is not required for a `memory` stream.
Notes
-----
This API is evolving.
Examples
--------
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.isActive
True
>>> sq.name
'this_query'
>>> sq.stop()
>>> sq.isActive
False
>>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start(
... queryName='that_query', outputMode="append", format='memory')
>>> sq.name
'that_query'
>>> sq.isActive
True
>>> sq.stop()
"""
self.options(**options)
if outputMode is not None:
self.outputMode(outputMode)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if queryName is not None:
self.queryName(queryName)
if path is None:
return self._sq(self._jwrite.start())
else:
return self._sq(self._jwrite.start(path))
def toTable(self, tableName, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""
Starts the execution of the streaming query, which will continually output results to the
given table as new data arrives.
The returned :class:`StreamingQuery` object can be used to interact with the stream.
.. versionadded:: 3.1.0
Parameters
----------
tableName : str
string, for the name of the table.
format : str, optional
the format used to save.
outputMode : str, optional
specifies how data of a streaming DataFrame/Dataset is written to a
streaming sink.
* `append`: Only the new rows in the streaming DataFrame/Dataset will be written to the
sink
* `complete`: All the rows in the streaming DataFrame/Dataset will be written to the
sink every time these are some updates
* `update`: only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
partitionBy : str or list, optional
names of partitioning columns
queryName : str, optional
unique name for the query
**options : dict
All other string options. You may want to provide a `checkpointLocation`.
Notes
-----
This API is evolving.
For v1 table, partitioning columns provided by `partitionBy` will be respected no matter
the table exists or not. A new table will be created if the table not exists.
For v2 table, `partitionBy` will be ignored if the table already exists. `partitionBy` will
be respected only if the v2 table does not exist. Besides, the v2 table created by this API
lacks some functionalities (e.g., customized properties, options, and serde info). If you
need them, please create the v2 table manually before the execution to avoid creating a
table with incomplete information.
Examples
--------
>>> sdf.writeStream.format('parquet').queryName('query').toTable('output_table')
... # doctest: +SKIP
>>> sdf.writeStream.trigger(processingTime='5 seconds').toTable(
... 'output_table',
... queryName='that_query',
... outputMode="append",
... format='parquet',
... checkpointLocation='/tmp/checkpoint') # doctest: +SKIP
"""
self.options(**options)
if outputMode is not None:
self.outputMode(outputMode)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if queryName is not None:
self.queryName(queryName)
return self._sq(self._jwrite.toTable(tableName))
def _test():
import doctest
import os
import tempfile
from pyspark.sql import SparkSession, SQLContext
import pyspark.sql.streaming
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.streaming.__dict__.copy()
try:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError: # noqa: F821
spark = SparkSession(sc) # noqa: F821
globs['tempfile'] = tempfile
globs['os'] = os
globs['spark'] = spark
globs['sqlContext'] = SQLContext.getOrCreate(spark.sparkContext)
globs['sdf'] = \
spark.readStream.format('text').load('python/test_support/sql/streaming')
globs['sdf_schema'] = StructType([StructField("data", StringType(), True)])
globs['df'] = \
globs['spark'].readStream.format('text').load('python/test_support/sql/streaming')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.streaming, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['spark'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/numpy/matrixlib/tests/test_regression.py | 146 | 1144 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
rlevel = 1
class TestRegression(TestCase):
def test_kron_matrix(self, level=rlevel):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
assert_equal(type(np.kron(x, x)), type(x))
def test_matrix_properties(self,level=rlevel):
# Ticket #125
a = np.matrix([1.0], dtype=float)
assert_(type(a.real) is np.matrix)
assert_(type(a.imag) is np.matrix)
c, d = np.matrix([0.0]).nonzero()
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
def test_matrix_multiply_by_1d_vector(self, level=rlevel):
# Ticket #473
def mul():
np.mat(np.eye(2))*np.ones(2)
self.assertRaises(ValueError, mul)
def test_matrix_std_argmax(self,level=rlevel):
# Ticket #83
x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
self.assertEqual(x.std().shape, ())
self.assertEqual(x.argmax().shape, ())
if __name__ == "__main__":
run_module_suite()
| mit |
yongshengwang/builthue | desktop/core/ext-py/ctypes-1.0.2/ctypes/test/test_functions.py | 14 | 12359 | """
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
import sys, unittest
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32":
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, u"x", 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 139)
self.failUnlessEqual(type(result), int)
def test_wchar_result(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.failUnlessEqual(result, u'\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.failUnlessEqual(None, f(1, 2, byref(result)))
self.failUnlessEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 21)
self.failUnlessEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.failUnlessEqual(result, -21)
self.failUnlessEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 21)
self.failUnlessEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.failUnlessEqual(result, 21)
self.failUnlessEqual(type(result), int)
# You cannot assing character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 21)
self.failUnlessEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.failUnlessEqual(result, -21)
self.failUnlessEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 21)
self.failUnlessEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.failUnlessEqual(result, -21)
self.failUnlessEqual(type(result), float)
def test_longlongresult(self):
try:
c_longlong
except NameError:
return
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.failUnlessEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.failUnlessEqual(result, 42)
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f("123")
self.failUnlessEqual(result, "123")
result = f(None)
self.failUnlessEqual(result, None)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.failUnlessEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.failUnlessEqual(type(result), POINTER(c_int))
self.failUnlessEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.failUnlessEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.failUnlessEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.failIfEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.failIfEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.failUnlessEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.failUnlessEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.failUnlessEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.failUnlessEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.failUnlessEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.failUnless(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.failUnlessEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.failUnlessEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.failUnlessEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.failUnlessEqual((s2h.x, s2h.y), (99*2, 88*3))
if sys.platform == "win32":
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.failUnlessEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.failUnlessEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
if sys.platform == "win32":
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.failUnlessEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.failUnlessRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
bchareyre/ratchet | py/tests/core.py | 5 | 8085 | # encoding: utf-8
# 2009 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Core functionality (Scene in c++), such as accessing bodies, materials, interactions. Specific functionality tests should go to engines.py or elsewhere, not here.
"""
import unittest
import random
from yade.wrapper import *
from yade._customConverters import *
from yade import utils
from yade import *
from math import *
try:
from minieigen import *
except ImportError:
from miniEigen import *
## TODO tests
class TestForce(unittest.TestCase): pass
class TestTags(unittest.TestCase): pass
class TestInteractions(unittest.TestCase):
def setUp(self): O.reset()
def testEraseBodiesInInteraction(self):
O.reset()
id1 = O.bodies.append(utils.sphere([0.5,0.5,0.0+0.095],.1))
id2 = O.bodies.append(utils.sphere([0.5,0.5,0.0+0.250],.1))
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_L3Geom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_L3Geom_FrictPhys_ElPerfPl()]
),
NewtonIntegrator(damping=0.1,gravity=(0,0,-9.81))
]
O.dt=.5e-4*utils.PWaveTimeStep()
O.step()
O.bodies.erase(id1)
O.step()
class TestLoop(unittest.TestCase):
def setUp(self): O.reset()
def testSubstepping(self):
'Loop: substepping'
O.engines=[ForceResetter(),PyRunner(initRun=True,iterPeriod=1,command='pass')]
# value outside the loop
self.assert_(O.subStep==-1)
# O.subStep is meaningful when substepping
O.subStepping=True
O.step(); self.assert_(O.subStep==0)
O.step(); self.assert_(O.subStep==1)
# when substepping is turned off in the middle of the loop, the next step finishes the loop
O.subStepping=False
O.step(); self.assert_(O.subStep==-1)
# subStep==0 inside the loop without substepping
O.engines=[PyRunner(initRun=True,iterPeriod=1,command='if O.subStep!=0: raise RuntimeError("O.subStep!=0 inside the loop with O.subStepping==False!")')]
O.step()
def testEnginesModificationInsideLoop(self):
'Loop: O.engines can be modified inside the loop transparently.'
O.engines=[
PyRunner(initRun=True,iterPeriod=1,command='from yade import *; O.engines=[ForceResetter(),GravityEngine(),NewtonIntegrator()]'), # change engines here
ForceResetter() # useless engine
]
O.subStepping=True
# run prologue and the first engine, which modifies O.engines
O.step(); O.step(); self.assert_(O.subStep==1)
self.assert_(len(O.engines)==3) # gives modified engine sequence transparently
self.assert_(len(O._nextEngines)==3)
self.assert_(len(O._currEngines)==2)
O.step(); O.step(); # run the 2nd ForceResetter, and epilogue
self.assert_(O.subStep==-1)
# start the next step, nextEngines should replace engines automatically
O.step()
self.assert_(O.subStep==0)
self.assert_(len(O._nextEngines)==0)
self.assert_(len(O.engines)==3)
self.assert_(len(O._currEngines)==3)
def testDead(self):
'Loop: dead engines are not run'
O.engines=[PyRunner(dead=True,initRun=True,iterPeriod=1,command='pass')]
O.step(); self.assert_(O.engines[0].nDone==0)
class TestIO(unittest.TestCase):
def testSaveAllClasses(self):
'I/O: All classes can be saved and loaded with boost::serialization'
import yade.system
failed=set()
for c in yade.system.childClasses('Serializable'):
O.reset()
try:
O.miscParams=[eval(c)()]
O.saveTmp(quiet=True)
O.loadTmp(quiet=True)
except (RuntimeError,ValueError):
failed.add(c)
failed=list(failed); failed.sort()
self.assert_(len(failed)==0,'Failed classes were: '+' '.join(failed))
class TestMaterialStateAssociativity(unittest.TestCase):
def setUp(self): O.reset()
def testThrowsAtBadCombination(self):
"Material+State: throws when body has material and state that don't work together."
b=Body()
b.mat=CpmMat()
b.state=State() #should be CpmState()
O.bodies.append(b)
self.assertRaises(RuntimeError,lambda: O.step()) # throws runtime_error
def testThrowsAtNullState(self):
"Material+State: throws when body has material but NULL state."
b=Body()
b.mat=Material()
b.state=None # → shared_ptr<State>() by boost::python
O.bodies.append(b)
self.assertRaises(RuntimeError,lambda: O.step())
def testMaterialReturnsState(self):
"Material+State: CpmMat returns CpmState when asked for newAssocState"
self.assert_(CpmMat().newAssocState().__class__==CpmState)
class TestBodies(unittest.TestCase):
def setUp(self):
O.reset()
self.count=100
O.bodies.append([utils.sphere([random.random(),random.random(),random.random()],random.random()) for i in range(0,self.count)])
random.seed()
def testIterate(self):
"Bodies: Iteration"
counted=0
for b in O.bodies: counted+=1
self.assert_(counted==self.count)
def testLen(self):
"Bodies: len(O.bodies)"
self.assert_(len(O.bodies)==self.count)
def testErase(self):
"Bodies: erased bodies are None in python"
O.bodies.erase(0)
self.assert_(O.bodies[0]==None)
def testNegativeIndex(self):
"Bodies: Negative index counts backwards (like python sequences)."
self.assert_(O.bodies[-1]==O.bodies[self.count-1])
def testErasedIterate(self):
"Bodies: Iterator silently skips erased ones"
removed,counted=0,0
for i in range(0,10):
id=random.randint(0,self.count-1)
if O.bodies[id]: O.bodies.erase(id);removed+=1
for b in O.bodies: counted+=1
self.assert_(counted==self.count-removed)
def testErasedAndNewlyCreatedSphere(self):
"Bodies: The bug is described in LP:1001194. If the new body was created after deletion of previous, it has no bounding box"
O.reset()
id1 = O.bodies.append(utils.sphere([0.0, 0.0, 0.0],0.5))
id2 = O.bodies.append(utils.sphere([0.0, 2.0, 0.0],0.5))
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_L3Geom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_L3Geom_FrictPhys_ElPerfPl()]
),
NewtonIntegrator(damping=0.1,gravity=(0,0,-9.81))
]
O.dt=.5e-4*utils.PWaveTimeStep()
#Before first step the bodies should not have bounds
self.assert_(O.bodies[id1].bound==None and O.bodies[id2].bound==None)
O.run(1, True)
#After first step the bodies should have bounds
self.assert_(O.bodies[id1].bound!=None and O.bodies[id2].bound!=None)
#Add 3rd body
id3 = O.bodies.append(utils.sphere([0.0, 4.0, 0.0],0.5))
O.run(1, True)
self.assert_(O.bodies[id1].bound!=None and O.bodies[id2].bound!=None and O.bodies[id3].bound!=None)
#Remove 3rd body
O.bodies.erase(id3)
O.run(1, True)
#Add 4th body
id4 = O.bodies.append(utils.sphere([0.0, 6.0, 0.0],0.5))
O.run(1, True)
self.assert_(O.bodies[id1].bound!=None and O.bodies[id2].bound!=None and O.bodies[id4].bound!=None)
class TestMaterials(unittest.TestCase):
def setUp(self):
# common setup for all tests in this class
O.reset()
O.materials.append([
FrictMat(young=1,label='materialZero'),
ElastMat(young=100,label='materialOne')
])
O.bodies.append([
utils.sphere([0,0,0],.5,material=0),
utils.sphere([1,1,1],.5,material=0),
utils.sphere([1,1,1],.5,material=1)
])
def testShared(self):
"Material: shared_ptr's makes change in material immediate everywhere"
O.bodies[0].mat.young=23423333
self.assert_(O.bodies[0].mat.young==O.bodies[1].mat.young)
def testSharedAfterReload(self):
"Material: shared_ptr's are preserved when saving/loading"
O.saveTmp(quiet=True); O.loadTmp(quiet=True)
O.bodies[0].mat.young=9087438484
self.assert_(O.bodies[0].mat.young==O.bodies[1].mat.young)
def testLen(self):
"Material: len(O.materials)"
self.assert_(len(O.materials)==2)
def testNegativeIndex(self):
"Material: negative index counts backwards."
self.assert_(O.materials[-1]==O.materials[1])
def testIterate(self):
"Material: iteration over O.materials"
counted=0
for m in O.materials: counted+=1
self.assert_(counted==len(O.materials))
def testAccess(self):
"Material: find by index or label; KeyError raised for invalid label."
self.assertRaises(KeyError,lambda: O.materials['nonexistent label'])
self.assert_(O.materials['materialZero']==O.materials[0])
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.