repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
windskyer/nova | nova/tests/unit/scheduler/filters/test_num_instances_filters.py | 59 | 2920 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import num_instances_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestNumInstancesFilter(test.NoDBTestCase):
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
self.filt_cls = num_instances_filter.NumInstancesFilter()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
self.filt_cls = num_instances_filter.NumInstancesFilter()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_filter_aggregate_num_instances_value(self, agg_mock):
self.flags(max_instances_per_host=4)
self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {'context': mock.sentinel.ctx}
agg_mock.return_value = set([])
# No aggregate defined for that host.
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'max_instances_per_host')
agg_mock.return_value = set(['6'])
# Aggregate defined for that host.
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_filter_aggregate_num_instances_value_error(self, agg_mock):
self.flags(max_instances_per_host=6)
self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': mock.sentinel.ctx}
agg_mock.return_value = set(['XXX'])
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'max_instances_per_host')
| gpl-2.0 |
sbalde/edxplatform | common/djangoapps/heartbeat/views.py | 199 | 1440 | from xmodule.modulestore.django import modulestore
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.exceptions import HeartbeatFailure
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc
of service id: status or message. If the status for any service is anything other than True,
it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200.
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
output = modulestore().heartbeat()
except HeartbeatFailure as fail:
return JsonResponse({fail.service: unicode(fail)}, status=503)
cursor = connection.cursor()
try:
cursor.execute("SELECT CURRENT_DATE")
cursor.fetchone()
output['SQL'] = True
except DatabaseError as fail:
return JsonResponse({'SQL': unicode(fail)}, status=503)
return JsonResponse(output)
| agpl-3.0 |
miracle2k/stgit | stgit/commands/delete.py | 1 | 3073 |
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from stgit.argparse import opt
from stgit.commands import common
from stgit.lib import transaction
from stgit import argparse
help = 'Delete patches'
kind = 'patch'
usage = ['[options] <patch1> [<patch2>] [<patch3>..<patch4>]']
description = """
Delete the patches passed as arguments."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('--spill', action = 'store_true',
short = 'Spill patch contents to worktree and index', long = """
Delete the patches, but do not touch the index and worktree.
This only works with applied patches at the top of the stack.
The effect is to "spill" the patch contents into the index and
worktree. This can be useful e.g. if you want to split a patch
into several smaller pieces."""),
opt('-b', '--branch', args = [argparse.stg_branches],
short = 'Use BRANCH instead of the default branch')]
directory = common.DirectoryHasRepositoryLib()
def func(parser, options, args):
"""Delete one or more patches."""
stack = directory.repository.get_stack(options.branch)
if options.branch:
iw = None # can't use index/workdir to manipulate another branch
else:
iw = stack.repository.default_iw
if args:
patches = set(common.parse_patches(args, list(stack.patchorder.all),
len(stack.patchorder.applied)))
else:
parser.error('No patches specified')
if options.spill:
if set(stack.patchorder.applied[-len(patches):]) != patches:
parser.error('Can only spill topmost applied patches')
iw = None # don't touch index+worktree
def allow_conflicts(trans):
# Allow conflicts if the topmost patch stays the same.
if stack.patchorder.applied:
return (trans.applied
and trans.applied[-1] == stack.patchorder.applied[-1])
else:
return not trans.applied
trans = transaction.StackTransaction(stack, 'delete',
allow_conflicts = allow_conflicts)
try:
to_push = trans.delete_patches(lambda pn: pn in patches)
for pn in to_push:
trans.push_patch(pn, iw)
except transaction.TransactionHalted:
pass
return trans.run(iw)
| gpl-2.0 |
frantastic9911/omaha | third_party/gmock/scripts/gmock_doctor.py | 64 | 17418 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts gcc errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in gcc's errors.
_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):\s+'
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, regex, diagnosis, msg):
"""Diagnoses the given disease by pattern matching.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
regex: Regex for matching the symptoms.
diagnosis: Pattern for formatting the diagnosis.
msg: Gcc's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by gcc."""
regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
diagnosis = """
You are using an Return() action in a function that returns a reference.
Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
regex, diagnosis, msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
diagnosis = """
You are using an action that returns void, but it needs to return
*something*. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser('NRS', 'Need to Return Something',
regex, diagnosis, msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
diagnosis = """
You are using an action that returns *something*, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser('NRN', 'Need to Return Nothing',
regex, diagnosis, msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
regex, diagnosis, msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
regex, diagnosis, msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for call to \'Invoke\('
r'<unresolved overloaded function type>')
diagnosis = """
You are passing an overloaded function to Invoke(). Please tell gcc
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
regex, diagnosis, msg)
def _OverloadedMethodActionDiagnoser1(msg):
"""Diagnoses the OMA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: '
r'.*no matching function for call to \'Invoke\(.*, '
r'unresolved overloaded function type>')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
regex, diagnosis, msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser('MOP', 'Mock Object Pointer',
regex, diagnosis, msg)
def _OverloadedMethodActionDiagnoser2(msg):
"""Diagnoses the OMA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function type>\)')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
regex, diagnosis, msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in _FindAllMatches(regex, msg):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by gcc."""
regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'implicit_cast\('
r'long int&\)')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into the right type. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser('NRNULL', 'Need to use ReturnNull',
regex, diagnosis, msg)
_TTB_DIAGNOSIS = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
def _TypeInTemplatedBaseDiagnoser1(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as the mock function's return
type.
"""
gcc_4_3_1_regex = (
r'In member function \'int .*\n' + _FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex = (
r'error: a function call cannot appear in a constant-expression'
+ _FILE_LINE_RE + r'error: template argument 1 is invalid\n')
diagnosis = _TTB_DIAGNOSIS % {'type': 'Foo'}
return (list(_GenericDiagnoser('TTB', 'Type in Template Base',
gcc_4_3_1_regex, diagnosis, msg)) +
list(_GenericDiagnoser('TTB', 'Type in Template Base',
gcc_4_4_0_regex, diagnosis, msg)))
def _TypeInTemplatedBaseDiagnoser2(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as the mock function's sole
parameter type.
"""
regex = (_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
return _GenericDiagnoser('TTB', 'Type in Template Base',
regex, _TTB_DIAGNOSIS, msg)
def _TypeInTemplatedBaseDiagnoser3(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as a parameter of a mock
function that has multiple parameters.
"""
regex = (r'error: expected `;\' before \'::\' token\n'
+ _FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
return _GenericDiagnoser('TTB', 'Type in Template Base',
regex, _TTB_DIAGNOSIS, msg)
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
regex, diagnosis, msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
regex, diagnosis, msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser1,
_OverloadedMethodActionDiagnoser2,
_TypeInTemplatedBaseDiagnoser1,
_TypeInTemplatedBaseDiagnoser2,
_TypeInTemplatedBaseDiagnoser3,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the gcc error message."""
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print '\nGcc complained:'
print '8<------------------------------------------------------------'
print msg
print '------------------------------------------------------------>8'
print """
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and gcc's error messages to
googlemock@googlegroups.com, you can be helped and I can get smarter --
win-win for us!"""
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print """
How did I do? If you think I'm wrong or unhelpful, please send your
source code and gcc's error messages to googlemock@googlegroups.com. Then
you can be helped and I can get smarter -- I promise I won't be upset!"""
if __name__ == '__main__':
main()
| apache-2.0 |
allenai/deep_qa | deep_qa/models/reading_comprehension/gated_attention_reader.py | 3 | 14808 | from typing import Dict
from overrides import overrides
from keras.layers import Input, Dropout, Concatenate
from ...data.instances.reading_comprehension.mc_question_passage_instance import McQuestionPassageInstance
from ...common.checks import ConfigurationError
from ...layers.backend import BatchDot
from ...layers.attention import Attention, MaskedSoftmax, GatedAttention
from ...layers import OptionAttentionSum, Overlap, L1Normalize
from ...layers import VectorMatrixSplit, BiGRUIndexSelector
from ...common.params import Params
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class GatedAttentionReader(TextTrainer):
"""
This TextTrainer implements the Gated Attention Reader model described in
"Gated-Attention Readers for Text Comprehension" by Dhingra et. al 2016. It encodes
the document with a variable number of gated attention layers, and then encodes
the query. It takes the dot product of these two final encodings to generate an
attention over the words in the document, and it then selects the option with the
highest summed or mean weight as the answer.
Parameters
----------
multiword_option_mode: str, optional (default="mean")
Describes how to calculate the probability of options
that contain multiple words. If "mean", the probability of
the option is taken to be the mean of the probabilities of
its constituent words. If "sum", the probability of the option
is taken to be the sum of the probabilities of its constituent
words.
num_gated_attention_layers: int, optional (default=3)
The number of gated attention layers to pass the document
embedding through. Must be at least 1.
cloze_token: str, optional (default=None)
If not None, the string that represents the cloze token in a cloze question.
Used to calculate the attention over the document, as the model does it
differently for cloze vs non-cloze datasets.
gating_function: str, optional (default="*")
The gating function to use in the Gated Attention layer. ``"*"`` is for
elementwise multiplication, ``"+"`` is for elementwise addition, and
``"|"`` is for concatenation.
gated_attention_dropout: float, optional (default=0.3)
The proportion of units to drop out after each gated attention layer.
qd_common_feature: boolean, optional (default=True)
Whether to use the question-document common word feature. This feature simply
indicates, for each word in the document, whether it appears in the query
and has been shown to improve reading comprehension performance.
"""
def __init__(self, params: Params):
self.max_question_length = params.pop('max_question_length', None)
self.max_passage_length = params.pop('max_passage_length', None)
self.max_option_length = params.pop('max_option_length', None)
self.num_options = params.pop('num_options', None)
# either "mean" or "sum"
self.multiword_option_mode = params.pop('multiword_option_mode', "mean")
# number of gated attention layers to use
self.num_gated_attention_layers = params.pop('num_gated_attention_layers', 3)
# gating function to use, either "*", "+", or "|"
self.gating_function = params.pop('gating_function', "*")
# dropout proportion after each gated attention layer.
self.gated_attention_dropout = params.pop('gated_attention_dropout', 0.3)
# If you are using the model on a cloze (fill in the blank) dataset,
# indicate what token indicates the blank.
self.cloze_token = params.pop('cloze_token', None)
self.cloze_token_index = None
# use the question document common word feature
self.use_qd_common_feature = params.pop('qd_common_feature', True)
super(GatedAttentionReader, self).__init__(params)
@overrides
def _build_model(self):
"""
The basic outline here is that we'll pass the questions and the
document / passage (think of this as a collection of possible answer
choices) into a word embedding layer.
"""
# get the index of the cloze token, if applicable
if self.cloze_token is not None:
self.cloze_token_index = self.data_indexer.get_word_index(self.cloze_token)
# First we create input layers and pass the question and document
# through embedding layers.
# shape: (batch size, question_length)
question_input_shape = self._get_sentence_shape(self.max_question_length)
question_input = Input(shape=question_input_shape,
dtype='int32', name="question_input")
# if using character embeddings, split off the question word indices.
if len(question_input_shape) > 1:
question_indices = VectorMatrixSplit(split_axis=-1)(question_input)[0]
else:
question_indices = question_input
# shape: (batch size, document_length)
document_input_shape = self._get_sentence_shape(self.max_passage_length)
document_input = Input(shape=self._get_sentence_shape(self.max_passage_length),
dtype='int32',
name="document_input")
# if using character embeddings, split off the document word indices.
if len(document_input_shape) > 1:
document_indices = VectorMatrixSplit(split_axis=-1)(document_input)[0]
else:
document_indices = document_input
# shape: (batch size, number of options, num words in option)
options_input_shape = ((self.num_options,) +
self._get_sentence_shape(self.max_option_length))
options_input = Input(shape=options_input_shape,
dtype='int32', name="options_input")
# if using character embeddings, split off the option word indices.
if len(options_input_shape) > 2:
options_indices = VectorMatrixSplit(split_axis=-1)(options_input)[0]
else:
options_indices = options_input
# shape: (batch size, question_length, embedding size)
question_embedding = self._embed_input(question_input)
# shape: (batch size, document_length, embedding size)
document_embedding = self._embed_input(document_input)
# We pass the question and document embedding through a variable
# number of gated-attention layers.
if self.num_gated_attention_layers < 1:
raise ConfigurationError("Need at least one gated attention layer.")
for i in range(self.num_gated_attention_layers-1):
# Note that the size of the last dimension of the input
# is not necessarily the embedding size in the second gated
# attention layer and beyond.
# We encode the question embeddings with a seq2seq encoder.
question_encoder = self._get_seq2seq_encoder(name="question_{}".format(i))
# shape: (batch size, question_length, 2*seq2seq hidden size)
encoded_question = question_encoder(question_embedding)
# We encode the document embeddings with a seq2seq encoder.
# Note that this is not the same encoder as used for the question.
document_encoder = self._get_seq2seq_encoder(name="document_{}".format(i))
# shape: (batch size, document_length, 2*seq2seq hidden size)
encoded_document = document_encoder(document_embedding)
# (batch size, document length, question length)
qd_attention = BatchDot()([encoded_document, encoded_question])
# (batch size, document length, question length)
normalized_qd_attention = MaskedSoftmax()(qd_attention)
gated_attention_layer = GatedAttention(self.gating_function,
name="gated_attention_{}".format(i))
# shape: (batch size, document_length, 2*seq2seq hidden size)
document_embedding = gated_attention_layer([encoded_document,
encoded_question,
normalized_qd_attention])
gated_attention_dropout = Dropout(self.gated_attention_dropout)
# shape: (batch size, document_length, 2*seq2seq hidden size)
document_embedding = gated_attention_dropout(document_embedding)
# Last Layer
if self.use_qd_common_feature:
# get the one-hot features for common occurence
# shape (batch size, document_indices, 2)
qd_common_feature = Overlap()([document_indices,
question_indices])
# We concatenate qd_common_feature with the document embeddings.
# shape: (batch size, document_length, (2*seq2seq hidden size) + 2)
document_embedding = Concatenate()([document_embedding, qd_common_feature])
# We encode the document embeddings with a final seq2seq encoder.
document_encoder = self._get_seq2seq_encoder(name="document_final")
# shape: (batch size, document_length, 2*seq2seq hidden size)
final_encoded_document = document_encoder(document_embedding)
if self.cloze_token is None:
# Get a final encoding of the question from a biGRU that does not return
# the sequence, and use it to calculate attention over the document.
final_question_encoder = self._get_encoder(name="question_final")
# shape: (batch size, 2*seq2seq hidden size)
final_encoded_question = final_question_encoder(question_embedding)
else:
# We get a final encoding of the question by concatenating the forward
# and backward GRU at the index of the cloze token.
final_question_encoder = self._get_seq2seq_encoder(name="question_final")
# each are shape (batch size, question_length, seq2seq hidden size)
encoded_question_f, encoded_question_b = final_question_encoder(question_embedding)
# extract the gru outputs at the cloze token from the forward and
# backwards passes
index_selector = BiGRUIndexSelector(self.cloze_token_index)
final_encoded_question = index_selector([question_indices,
encoded_question_f,
encoded_question_b])
# take the softmax of the document_embedding after it has been passed
# through gated attention layers to get document probabilities
# shape: (batch size, document_length)
document_probabilities = Attention(name='question_document_softmax')([final_encoded_question,
final_encoded_document])
# We sum together the weights of words that match each option
# and use the multiword_option_mode to determine how to calculate
# the total probability of the option.
options_sum_layer = OptionAttentionSum(self.multiword_option_mode,
name="options_probability_sum")
# shape: (batch size, num_options)
options_probabilities = options_sum_layer([document_indices,
document_probabilities,
options_indices])
# We normalize the option_probabilities by dividing it by its L1 norm.
l1_norm_layer = L1Normalize()
# shape: (batch size, num_options)
option_normalized_probabilities = l1_norm_layer(options_probabilities)
return DeepQaModel(input=[question_input, document_input, options_input],
output=option_normalized_probabilities)
@overrides
def _instance_type(self):
"""
Return the instance type that the model trains on.
"""
return McQuestionPassageInstance
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
"""
Return a dictionary with the appropriate padding lengths.
"""
padding_lengths = super(GatedAttentionReader, self).get_padding_lengths()
padding_lengths['num_question_words'] = self.max_question_length
padding_lengths['num_passage_words'] = self.max_passage_length
padding_lengths['num_option_words'] = self.max_option_length
padding_lengths['num_options'] = self.num_options
return padding_lengths
@overrides
def _set_padding_lengths(self, padding_lengths: Dict[str, int]):
"""
Set the padding lengths of the model.
"""
# TODO(nelson): superclass complains that there is no
# num_sentence_words key, so we set it to None here.
# We should probably patch up / organize the API.
padding_lengths["num_sentence_words"] = None
super(GatedAttentionReader, self)._set_padding_lengths(padding_lengths)
if self.max_question_length is None:
self.max_question_length = padding_lengths['num_question_words']
if self.max_passage_length is None:
self.max_passage_length = padding_lengths['num_passage_words']
if self.max_option_length is None:
self.max_option_length = padding_lengths['num_option_words']
if self.num_options is None:
self.num_options = padding_lengths['num_options']
@overrides
def _set_padding_lengths_from_model(self):
self._set_text_lengths_from_model_input(self.model.get_input_shape_at(0)[1][1:])
self.max_question_length = self.model.get_input_shape_at(0)[0][1]
self.max_passage_length = self.model.get_input_shape_at(0)[1][1]
self.num_options = self.model.get_input_shape_at(0)[2][1]
self.max_option_length = self.model.get_input_shape_at(0)[2][2]
@classmethod
def _get_custom_objects(cls):
custom_objects = super(GatedAttentionReader, cls)._get_custom_objects()
custom_objects["Attention"] = Attention
custom_objects["BatchDot"] = BatchDot
custom_objects["BiGRUIndexSelector"] = BiGRUIndexSelector
custom_objects["GatedAttention"] = GatedAttention
custom_objects["L1Normalize"] = L1Normalize
custom_objects["MaskedSoftmax"] = MaskedSoftmax
custom_objects["OptionAttentionSum"] = OptionAttentionSum
custom_objects["Overlap"] = Overlap
custom_objects["VectorMatrixSplit"] = VectorMatrixSplit
return custom_objects
| apache-2.0 |
FedoraScientific/salome-smesh | src/Tools/blocFissure/CasTests/fissure_Coude_4.py | 1 | 3081 | # -*- coding: utf-8 -*-
from fissure_Coude import fissure_Coude
class fissure_Coude_4(fissure_Coude):
"""
probleme de fissure du Coude : ASCOU09A
adaptation maillage
"""
# ---------------------------------------------------------------------------
def setParamGeometrieSaine(self):
"""
Paramètres géométriques du tuyau coudé sain:
angleCoude
r_cintr
l_tube_p1
l_tube_p2
epais
de
"""
self.geomParams = dict(angleCoude = 40,
r_cintr = 654,
l_tube_p1 = 1700,
l_tube_p2 = 1700,
epais = 62.5,
de = 912.4)
# ---------------------------------------------------------------------------
def setParamMaillageSain(self):
self.meshParams = dict(n_long_p1 = 16,
n_ep = 5,
n_long_coude = 30,
n_circ_g = 50,
n_circ_d = 20,
n_long_p2 = 12)
# ---------------------------------------------------------------------------
def setParamShapeFissure(self):
"""
paramètres de la fissure
profondeur : 0 < profondeur <= épaisseur
azimut : entre 0 et 360°
alpha : 0 < alpha < angleCoude
longueur : <=2*profondeur ==> ellipse, >2*profondeur = fissure longue
orientation : 0° : longitudinale, 90° : circonférentielle, autre : uniquement fissures elliptiques
externe : True : fissure face externe, False : fissure face interne
"""
print "setParamShapeFissure", self.nomCas
self.shapeFissureParams = dict(nomRep = '.',
nomFicSain = self.nomCas,
nomFicFissure = 'fissure_' + self.nomCas,
profondeur = 10,
azimut = 90,
alpha = 20,
longueur = 240,
orientation = 90,
lgInfluence = 30,
elliptique = False,
convexe = True,
externe = True)
# ---------------------------------------------------------------------------
def setReferencesMaillageFissure(self):
self.referencesMaillageFissure = dict(Entity_Quad_Pyramid = 948,
Entity_Quad_Triangle = 1562,
Entity_Quad_Edge = 1192,
Entity_Quad_Penta = 732,
Entity_Quad_Hexa = 22208,
Entity_Node = 133418,
Entity_Quad_Tetra = 18759,
Entity_Quad_Quadrangle = 11852)
| lgpl-2.1 |
jaeilepp/mne-python | mne/connectivity/effective.py | 3 | 6612 | # Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from ..externals.six.moves import zip
import copy
import numpy as np
from ..utils import logger, verbose
from .spectral import spectral_connectivity
@verbose
def phase_slope_index(data, indices=None, sfreq=2 * np.pi,
mode='multitaper', fmin=None, fmax=np.inf,
tmin=None, tmax=None, mt_bandwidth=None,
mt_adaptive=False, mt_low_bias=True,
cwt_frequencies=None, cwt_n_cycles=7, block_size=1000,
n_jobs=1, verbose=None):
"""Compute the Phase Slope Index (PSI) connectivity measure.
The PSI is an effective connectivity measure, i.e., a measure which can
give an indication of the direction of the information flow (causality).
For two time series, and one computes the PSI between the first and the
second time series as follows
indices = (np.array([0]), np.array([1]))
psi = phase_slope_index(data, indices=indices, ...)
A positive value means that time series 0 is ahead of time series 1 and
a negative value means the opposite.
The PSI is computed from the coherency (see spectral_connectivity), details
can be found in [1].
References
----------
[1] Nolte et al. "Robustly Estimating the Flow Direction of Information in
Complex Physical Systems", Physical Review Letters, vol. 100, no. 23,
pp. 1-4, Jun. 2008.
Parameters
----------
data : array-like, shape=(n_epochs, n_signals, n_times)
Can also be a list/generator of array, shape =(n_signals, n_times);
list/generator of SourceEstimate; or Epochs.
The data from which to compute connectivity. Note that it is also
possible to combine multiple signals by providing a list of tuples,
e.g., data = [(arr_0, stc_0), (arr_1, stc_1), (arr_2, stc_2)],
corresponds to 3 epochs, and arr_* could be an array with the same
number of time points as stc_*.
indices : tuple of arrays | None
Two arrays with indices of connections for which to compute
connectivity. If None, all connections are computed.
sfreq : float
The sampling frequency.
mode : str
Spectrum estimation mode can be either: 'multitaper', 'fourier', or
'cwt_morlet'.
fmin : float | tuple of floats
The lower frequency of interest. Multiple bands are defined using
a tuple, e.g., (8., 20.) for two bands with 8Hz and 20Hz lower freq.
If None the frequency corresponding to an epoch length of 5 cycles
is used.
fmax : float | tuple of floats
The upper frequency of interest. Multiple bands are dedined using
a tuple, e.g. (13., 30.) for two band with 13Hz and 30Hz upper freq.
tmin : float | None
Time to start connectivity estimation.
tmax : float | None
Time to end connectivity estimation.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
cwt_frequencies : array
Array of frequencies of interest. Only used in 'cwt_morlet' mode.
cwt_n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency. Only used in
'cwt_morlet' mode.
block_size : int
How many connections to compute at once (higher numbers are faster
but require more memory).
n_jobs : int
How many epochs to process in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psi : array
Computed connectivity measure(s). The shape of each array is either
(n_signals, n_signals, n_bands) mode: 'multitaper' or 'fourier'
(n_signals, n_signals, n_bands, n_times) mode: 'cwt_morlet'
when "indices" is None, or
(n_con, n_bands) mode: 'multitaper' or 'fourier'
(n_con, n_bands, n_times) mode: 'cwt_morlet'
when "indices" is specified and "n_con = len(indices[0])".
freqs : array
Frequency points at which the connectivity was computed.
times : array
Time points for which the connectivity was computed.
n_epochs : int
Number of epochs used for computation.
n_tapers : int
The number of DPSS tapers used. Only defined in 'multitaper' mode.
Otherwise None is returned.
"""
logger.info('Estimating phase slope index (PSI)')
# estimate the coherency
cohy, freqs_, times, n_epochs, n_tapers = spectral_connectivity(
data, method='cohy', indices=indices, sfreq=sfreq, mode=mode,
fmin=fmin, fmax=fmax, fskip=0, faverage=False, tmin=tmin, tmax=tmax,
mt_bandwidth=mt_bandwidth, mt_adaptive=mt_adaptive,
mt_low_bias=mt_low_bias, cwt_frequencies=cwt_frequencies,
cwt_n_cycles=cwt_n_cycles, block_size=block_size, n_jobs=n_jobs,
verbose=verbose)
logger.info('Computing PSI from estimated Coherency')
# compute PSI in the requested bands
if fmin is None:
fmin = -np.inf # set it to -inf, so we can adjust it later
bands = list(zip(np.asarray((fmin,)).ravel(), np.asarray((fmax,)).ravel()))
n_bands = len(bands)
freq_dim = -2 if mode == 'cwt_morlet' else -1
# allocate space for output
out_shape = list(cohy.shape)
out_shape[freq_dim] = n_bands
psi = np.zeros(out_shape, dtype=np.float)
# allocate accumulator
acc_shape = copy.copy(out_shape)
acc_shape.pop(freq_dim)
acc = np.empty(acc_shape, dtype=np.complex128)
freqs = list()
idx_fi = [slice(None)] * cohy.ndim
idx_fj = [slice(None)] * cohy.ndim
for band_idx, band in enumerate(bands):
freq_idx = np.where((freqs_ > band[0]) & (freqs_ < band[1]))[0]
freqs.append(freqs_[freq_idx])
acc.fill(0.)
for fi, fj in zip(freq_idx, freq_idx[1:]):
idx_fi[freq_dim] = fi
idx_fj[freq_dim] = fj
acc += np.conj(cohy[idx_fi]) * cohy[idx_fj]
idx_fi[freq_dim] = band_idx
psi[idx_fi] = np.imag(acc)
logger.info('[PSI Estimation Done]')
return psi, freqs, times, n_epochs, n_tapers
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/manifold/spectral_embedding_.py | 39 | 20835 | """Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=np.bool)
nodes_to_explore = np.zeros(n_node, dtype=np.bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
norm_laplacian : bool
Whether the value of the diagonal should be changed or not
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = random_state.uniform(-1, 1, laplacian.shape[0])
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol, v0=v0)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None, n_jobs=1):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True,
n_jobs=self.n_jobs)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
ampax/edx-platform-backup | cms/djangoapps/course_creators/models.py | 183 | 4070 | """
Table for storing information about whether or not Studio users have course creation privileges.
"""
from django.db import models
from django.db.models.signals import post_init, post_save
from django.dispatch import receiver, Signal
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext as _
# A signal that will be sent when users should be added or removed from the creator group
update_creator_state = Signal(providing_args=["caller", "user", "state"])
# A signal that will be sent when admin should be notified of a pending user request
send_admin_notification = Signal(providing_args=["user"])
# A signal that will be sent when user should be notified of change in course creator privileges
send_user_notification = Signal(providing_args=["user", "state"])
class CourseCreator(models.Model):
"""
Creates the database table model.
"""
UNREQUESTED = 'unrequested'
PENDING = 'pending'
GRANTED = 'granted'
DENIED = 'denied'
# Second value is the "human-readable" version.
STATES = (
(UNREQUESTED, _(u'unrequested')),
(PENDING, _(u'pending')),
(GRANTED, _(u'granted')),
(DENIED, _(u'denied')),
)
user = models.ForeignKey(User, help_text=_("Studio user"), unique=True)
state_changed = models.DateTimeField('state last updated', auto_now_add=True,
help_text=_("The date when state was last updated"))
state = models.CharField(max_length=24, blank=False, choices=STATES, default=UNREQUESTED,
help_text=_("Current course creator state"))
note = models.CharField(max_length=512, blank=True, help_text=_("Optional notes about this user (for example, "
"why course creation access was denied)"))
def __unicode__(self):
return u"{0} | {1} [{2}]".format(self.user, self.state, self.state_changed)
@receiver(post_init, sender=CourseCreator)
def post_init_callback(sender, **kwargs):
"""
Extend to store previous state.
"""
instance = kwargs['instance']
instance.orig_state = instance.state
@receiver(post_save, sender=CourseCreator)
def post_save_callback(sender, **kwargs):
"""
Extend to update state_changed time and fire event to update course creator group, if appropriate.
"""
instance = kwargs['instance']
# We only wish to modify the state_changed time if the state has been modified. We don't wish to
# modify it for changes to the notes field.
if instance.state != instance.orig_state:
granted_state_change = instance.state == CourseCreator.GRANTED or instance.orig_state == CourseCreator.GRANTED
# If either old or new state is 'granted', we must manipulate the course creator
# group maintained by authz. That requires staff permissions (stored admin).
if granted_state_change:
assert hasattr(instance, 'admin'), 'Must have stored staff user to change course creator group'
update_creator_state.send(
sender=sender,
caller=instance.admin,
user=instance.user,
state=instance.state
)
# If user has been denied access, granted access, or previously granted access has been
# revoked, send a notification message to the user.
if instance.state == CourseCreator.DENIED or granted_state_change:
send_user_notification.send(
sender=sender,
user=instance.user,
state=instance.state
)
# If the user has gone into the 'pending' state, send a notification to interested admin.
if instance.state == CourseCreator.PENDING:
send_admin_notification.send(
sender=sender,
user=instance.user
)
instance.state_changed = timezone.now()
instance.orig_state = instance.state
instance.save()
| agpl-3.0 |
CPB9/mcc | thirdparty/gtest/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| mpl-2.0 |
alimony/django | tests/template_tests/syntax_tests/test_url.py | 71 | 11755 | from django.template import RequestContext, TemplateSyntaxError
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.urls import NoReverseMatch, resolve
from ..utils import setup
@override_settings(ROOT_URLCONF='template_tests.urls')
class UrlTagTests(SimpleTestCase):
# Successes
@setup({'url01': '{% url "client" client.id %}'})
def test_url01(self):
output = self.engine.render_to_string('url01', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/')
@setup({'url02': '{% url "client_action" id=client.id action="update" %}'})
def test_url02(self):
output = self.engine.render_to_string('url02', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02a': '{% url "client_action" client.id "update" %}'})
def test_url02a(self):
output = self.engine.render_to_string('url02a', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})
def test_url02b(self):
output = self.engine.render_to_string('url02b', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url02c': "{% url 'client_action' client.id 'update' %}"})
def test_url02c(self):
output = self.engine.render_to_string('url02c', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/update/')
@setup({'url03': '{% url "index" %}'})
def test_url03(self):
output = self.engine.render_to_string('url03')
self.assertEqual(output, '/')
@setup({'url04': '{% url "named.client" client.id %}'})
def test_url04(self):
output = self.engine.render_to_string('url04', {'client': {'id': 1}})
self.assertEqual(output, '/named-client/1/')
@setup({'url05': '{% url "метка_оператора" v %}'})
def test_url05(self):
output = self.engine.render_to_string('url05', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url06': '{% url "метка_оператора_2" tag=v %}'})
def test_url06(self):
output = self.engine.render_to_string('url06', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url08': '{% url "метка_оператора" v %}'})
def test_url08(self):
output = self.engine.render_to_string('url08', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url09': '{% url "метка_оператора_2" tag=v %}'})
def test_url09(self):
output = self.engine.render_to_string('url09', {'v': 'Ω'})
self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')
@setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})
def test_url10(self):
output = self.engine.render_to_string('url10', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/two%20words/')
@setup({'url11': '{% url "client_action" id=client.id action="==" %}'})
def test_url11(self):
output = self.engine.render_to_string('url11', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/==/')
@setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})
def test_url12(self):
output = self.engine.render_to_string('url12', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')
@setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})
def test_url13(self):
output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})
def test_url14(self):
output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})
self.assertEqual(output, '/client/1/a-b/')
@setup({'url15': '{% url "client_action" 12 "test" %}'})
def test_url15(self):
output = self.engine.render_to_string('url15')
self.assertEqual(output, '/client/12/test/')
@setup({'url18': '{% url "client" "1,2" %}'})
def test_url18(self):
output = self.engine.render_to_string('url18')
self.assertEqual(output, '/client/1,2/')
@setup({'url19': '{% url named_url client.id %}'})
def test_url19(self):
output = self.engine.render_to_string(
'url19', {'client': {'id': 1}, 'named_url': 'client'}
)
self.assertEqual(output, '/client/1/')
@setup({'url20': '{% url url_name_in_var client.id %}'})
def test_url20(self):
output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})
self.assertEqual(output, '/named-client/1/')
@setup({'url21': '{% autoescape off %}'
'{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'
'{% endautoescape %}'})
def test_url21(self):
output = self.engine.render_to_string('url21', {'client': {'id': 1}})
self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')
# Failures
@setup({'url-fail01': '{% url %}'})
def test_url_fail01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail01')
@setup({'url-fail02': '{% url "no_such_view" %}'})
def test_url_fail02(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail02')
@setup({'url-fail03': '{% url "client" %}'})
def test_url_fail03(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail03')
@setup({'url-fail04': '{% url "view" id, %}'})
def test_url_fail04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail04')
@setup({'url-fail05': '{% url "view" id= %}'})
def test_url_fail05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail05')
@setup({'url-fail06': '{% url "view" a.id=id %}'})
def test_url_fail06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail06')
@setup({'url-fail07': '{% url "view" a.id!id %}'})
def test_url_fail07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail07')
@setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})
def test_url_fail08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail08')
@setup({'url-fail09': '{% url "view" id=", %}'})
def test_url_fail09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('url-fail09')
@setup({'url-fail11': '{% url named_url %}'})
def test_url_fail11(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail11')
@setup({'url-fail12': '{% url named_url %}'})
def test_url_fail12(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})
@setup({'url-fail13': '{% url named_url %}'})
def test_url_fail13(self):
with self.assertRaises(NoReverseMatch):
self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})
@setup({'url-fail14': '{% url named_url id, %}'})
def test_url_fail14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail14', {'named_url': 'view'})
@setup({'url-fail15': '{% url named_url id= %}'})
def test_url_fail15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail15', {'named_url': 'view'})
@setup({'url-fail16': '{% url named_url a.id=id %}'})
def test_url_fail16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail16', {'named_url': 'view'})
@setup({'url-fail17': '{% url named_url a.id!id %}'})
def test_url_fail17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail17', {'named_url': 'view'})
@setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})
def test_url_fail18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail18', {'named_url': 'view'})
@setup({'url-fail19': '{% url named_url id=", %}'})
def test_url_fail19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('url-fail19', {'named_url': 'view'})
# {% url ... as var %}
@setup({'url-asvar01': '{% url "index" as url %}'})
def test_url_asvar01(self):
output = self.engine.render_to_string('url-asvar01')
self.assertEqual(output, '')
@setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})
def test_url_asvar02(self):
output = self.engine.render_to_string('url-asvar02')
self.assertEqual(output, '/')
@setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})
def test_url_asvar03(self):
output = self.engine.render_to_string('url-asvar03')
self.assertEqual(output, '')
@setup({'url-namespace01': '{% url "app:named.client" 42 %}'})
def test_url_namespace01(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
template = self.engine.get_template('url-namespace01')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns1/named-client/42/')
@setup({'url-namespace02': '{% url "app:named.client" 42 %}'})
def test_url_namespace02(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns2/')
template = self.engine.get_template('url-namespace02')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace03': '{% url "app:named.client" 42 %}'})
def test_url_namespace03(self):
request = RequestFactory().get('/')
template = self.engine.get_template('url-namespace03')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_no_current_app(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = None
template = self.engine.get_template('url-namespace-no-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
@setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'})
def test_url_namespace_explicit_current_app(self):
request = RequestFactory().get('/')
request.resolver_match = resolve('/ns1/')
request.current_app = 'app'
template = self.engine.get_template('url-namespace-explicit-current-app')
context = RequestContext(request)
output = template.render(context)
self.assertEqual(output, '/ns2/named-client/42/')
| bsd-3-clause |
davidwilson-85/easymap | graphic_output/Pillow-4.2.1/Tests/test_file_wmf.py | 1 | 1215 | from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestFileWmf(PillowTestCase):
def test_load_raw(self):
# Test basic EMF open and rendering
im = Image.open('Tests/images/drawing.emf')
if hasattr(Image.core, "drawwmf"):
# Currently, support for WMF/EMF is Windows-only
im.load()
# Compare to reference rendering
imref = Image.open('Tests/images/drawing_emf_ref.png')
imref.load()
self.assert_image_similar(im, imref, 0)
# Test basic WMF open and rendering
im = Image.open('Tests/images/drawing.wmf')
if hasattr(Image.core, "drawwmf"):
# Currently, support for WMF/EMF is Windows-only
im.load()
# Compare to reference rendering
imref = Image.open('Tests/images/drawing_wmf_ref.png')
imref.load()
self.assert_image_similar(im, imref, 2.0)
def test_save(self):
im = hopper()
for ext in [".wmf", ".emf"]:
tmpfile = self.tempfile("temp"+ext)
self.assertRaises(IOError, lambda: im.save(tmpfile))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
abrt/faf | src/pyfaf/storage/migrations/versions/a2b6d12819f9_drop_yum_type.py | 1 | 2158 | # Copyright (C) 2019 ABRT Team
# Copyright (C) 2019 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
drop_yum_type
Revision ID: a2b6d12819f9
Revises: e5d5cefb8ca4
Create Date: 2019-02-08 11:41:56.967881
"""
from alembic.op import execute, get_bind
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a2b6d12819f9'
down_revision = 'e5d5cefb8ca4'
new_values = ['dnf', 'koji', 'rpmmetadata']
old_values = new_values + ['yum']
old_type = sa.Enum(*old_values, name='repo_type')
new_type = sa.Enum(*new_values, name='repo_type')
tmp_type = sa.Enum(*new_values, name='_repo_type')
def upgrade() -> None:
bind = get_bind()
execute('UPDATE repo SET type=\'dnf\' WHERE type=\'yum\'')
tmp_type.create(bind, checkfirst=False)
execute('ALTER TABLE repo ALTER COLUMN type TYPE _repo_type USING '
'type::text::_repo_type')
old_type.drop(bind, checkfirst=False)
new_type.create(bind, checkfirst=False)
execute('ALTER TABLE repo ALTER COLUMN type TYPE repo_type USING '
'type::text::repo_type')
tmp_type.drop(bind, checkfirst=False)
def downgrade() -> None:
bind = get_bind()
tmp_type.create(bind, checkfirst=False)
execute('ALTER TABLE repo ALTER COLUMN type TYPE _repo_type USING '
'type::text::_repo_type')
new_type.drop(bind, checkfirst=False)
old_type.create(bind, checkfirst=False)
execute('ALTER TABLE repo ALTER COLUMN type TYPE repo_type USING '
'type::text::repo_type')
tmp_type.drop(bind, checkfirst=False)
| gpl-3.0 |
rcurtin/shogun | tests/integration/python_static/test_one.py | 21 | 2421 | #!/usr/bin/env python
"""
Test one data file
"""
from numpy import *
import sys
import kernel
import distance
import classifier
import clustering
import distribution
import regression
import preprocessor
from sg import sg
SUPPORTED=['kernel', 'distance', 'classifier', 'clustering', 'distribution',
'regression', 'preprocessor']
def _get_name_fun (fnam):
module=None
for supported in SUPPORTED:
if fnam.find(supported)>-1:
module=supported
break
if module is None:
print 'Module required for %s not supported yet!' % fnam
return None
return module+'.test'
def _test_mfile (fnam):
try:
mfile=open(fnam, mode='r')
except IOError, e:
print e
return False
indata={}
name_fun=_get_name_fun(fnam)
if name_fun is None:
return False
for line in mfile:
line=line.strip(" \t\n;")
param = line.split('=')[0].strip()
if param=='name':
name=line.split('=')[1].strip().split("'")[1]
indata[param]=name
elif param=='kernel_symdata' or param=='kernel_data':
indata[param]=_read_matrix(line)
elif param.startswith('kernel_matrix') or \
param.startswith('distance_matrix'):
indata[param]=_read_matrix(line)
elif param.find('data_train')>-1 or param.find('data_test')>-1:
# data_{train,test} might also be prepended by *subkernel*
indata[param]=_read_matrix(line)
elif param=='clustering_centers' or param=='clustering_pairs':
indata[param]=_read_matrix(line)
else:
if (line.find("'")==-1):
indata[param]=eval(line.split('=')[1])
else:
indata[param]=line.split('=')[1].strip().split("'")[1]
mfile.close()
fun=eval(name_fun)
# seed random to constant value used at data file's creation
sg('init_random', indata['init_random'])
random.seed(indata['init_random'])
return fun(indata)
def _read_matrix (line):
try:
str_line=(line.split('[')[1]).split(']')[0]
except IndexError:
str_line=(line.split('{')[1]).split('}')[0]
lines=str_line.split(';')
lis2d=list()
for x in lines:
lis=list()
for y in x.split(','):
y=y.replace("'","").strip()
if(y.isalpha()):
lis.append(y)
else:
if y.find('.')!=-1:
lis.append(float(y))
else:
try:
lis.append(int(y))
except ValueError: # not int, RAWDNA?
lis.append(y)
lis2d.append(lis)
return array(lis2d)
for filename in sys.argv:
if (filename.endswith('.m')):
res=_test_mfile(filename)
if res:
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 |
imperodesign/paas-tools | deis/prd/controller/api/south_migrations/0016_drop_allauth.py | 2 | 12814 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
"Drop django-allauth tables."
tables_to_drop = [
'account_emailaddress',
'account_emailconfirmation',
]
for table in tables_to_drop:
if table in orm:
db.delete_table(table)
from django.contrib.contenttypes.models import ContentType
ContentType.objects.filter(app_label='account').delete()
def backwards(self, orm):
raise RuntimeError('Cannot reverse this migration')
models = {
u'api.app': {
'Meta': {'object_name': 'App'},
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Cluster']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'structure': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.build': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Build'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dockerfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'procfile': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.cluster': {
'Meta': {'object_name': 'Cluster'},
'auth': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'hosts': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "u'coreos'", 'max_length': '16'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.config': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Config'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'cpu': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'memory': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'tags': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'values': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
u'api.container': {
'Meta': {'ordering': "[u'created']", 'object_name': 'Container'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Release']"}),
'state': ('django_fsm.FSMField', [], {'default': "u'initialized'", 'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.domain': {
'Meta': {'object_name': 'Domain'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'api.key': {
'Meta': {'unique_together': "((u'owner', u'id'),)", 'object_name': 'Key'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'public': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.push': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Push'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'receive_repo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'receive_user': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'ssh_connection': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ssh_original_command': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.release': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'version'),)", 'object_name': 'Release'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Build']"}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Config']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'default': "u'deis/helloworld'", 'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api']
| mit |
switchboardOp/ansible | lib/ansible/module_utils/crypto.py | 4 | 1667 | # -*- coding: utf-8 -*-
#
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
try:
from OpenSSL import crypto
except ImportError:
# An error will be raised in the calling class to let the end
# user know that OpenSSL couldn't be found.
pass
import hashlib
def get_fingerprint(path):
"""Generate the fingerprint of the public key. """
fingerprint = {}
privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, open(path, 'r').read())
try:
publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
for algo in hashlib.algorithms:
f = getattr(hashlib, algo)
pubkey_digest = f(publickey).hexdigest()
fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
except AttributeError:
# If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
# By doing this we prevent the code from raising an error
# yet we return no value in the fingerprint hash.
pass
return fingerprint
| gpl-3.0 |
lucius-feng/tg2 | tests/test_middlewares.py | 2 | 3091 | from webtest import TestApp
from tg.support.middlewares import StatusCodeRedirect
from tg.support.middlewares import DBSessionRemoverMiddleware
from tg.support.middlewares import MingSessionRemoverMiddleware
def FakeApp(environ, start_response):
if environ['PATH_INFO'].startswith('/error'):
start_response('403 Forbidden', [])
else:
start_response('200 Success', [])
if environ['PATH_INFO'] == '/error/document':
yield b'ERROR!!!'
else:
yield b'HI'
yield b'MORE'
class TestStatusCodeRedirectMiddleware(object):
def setup(self):
self.app = TestApp(StatusCodeRedirect(FakeApp, [403]))
def test_error_redirection(self):
r = self.app.get('/error_test', status=403)
assert 'ERROR!!!' in r, r
def test_success_passthrough(self):
r = self.app.get('/success_test')
assert 'HI' in r, r
class FakeDBSession(object):
removed = False
def remove(self):
self.removed = True
def close_all(self):
self.remove()
class FakeAppWithClose(object):
closed = False
step = 0
def __call__(self, environ, start_response):
start_response('200 Success', [])
if environ['PATH_INFO'] == '/crash':
raise Exception('crashed')
return self
def __iter__(self):
return self
def next(self):
self.step += 1
if self.step > 3:
raise StopIteration()
return str(self.step)
def close(self):
self.closed = True
def __repr__(self):
return '%s - %s' % (self.step, self.closed)
class TestDBSessionRemoverMiddleware(object):
def setup(self):
self.app_with_close = FakeAppWithClose()
self.session = FakeDBSession()
self.app = TestApp(DBSessionRemoverMiddleware(self.session, self.app_with_close))
def test_close_is_called(self):
r = self.app.get('/nonerror')
assert self.app_with_close.closed == True, self.app_with_close
def test_session_is_removed(self):
r = self.app.get('/nonerror')
assert self.session.removed == True, self.app_with_close
def test_session_is_removed_on_crash(self):
try:
r = self.app.get('/crash')
except:
pass
assert self.session.removed == True, self.app_with_close
class TestMingSessionRemoverMiddlewaree(object):
def setup(self):
self.app_with_close = FakeAppWithClose()
self.session = FakeDBSession()
self.app = TestApp(MingSessionRemoverMiddleware(self.session, self.app_with_close))
def test_close_is_called(self):
r = self.app.get('/nonerror')
assert self.app_with_close.closed == True, self.app_with_close
def test_session_is_removed(self):
r = self.app.get('/nonerror')
assert self.session.removed == True, self.app_with_close
def test_session_is_removed_on_crash(self):
try:
r = self.app.get('/crash')
except:
pass
assert self.session.removed == True, self.app_with_close
| mit |
Gillu13/scipy | scipy/io/harwell_boeing/hb.py | 83 | 18473 | """
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
from scipy._lib.six import string_types
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containg a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(file):
"""Read HB-format file.
Parameters
----------
file : str-like or file-like
If a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if isinstance(file, string_types):
fid = open(file)
try:
return _get_matrix(fid)
finally:
fid.close()
else:
return _get_matrix(file)
def hb_write(file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
file : str-like or file-like
if a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if isinstance(file, string_types):
fid = open(file, "w")
try:
return _set_matrix(fid)
finally:
fid.close()
else:
return _set_matrix(file)
| bsd-3-clause |
youprofit/kivy | examples/widgets/lists/list_reset_data.py | 48 | 2400 | # -*- coding: utf-8 -*-
from kivy.uix.listview import ListView
from kivy.uix.floatlayout import FloatLayout
from kivy.clock import Clock
from kivy.adapters.listadapter import ListAdapter
from kivy.adapters.models import SelectableDataItem
from kivy.uix.listview import ListItemButton
from random import choice
from string import ascii_uppercase, digits
import random
class DataItem(SelectableDataItem):
def __init__(self, **kwargs):
super(DataItem, self).__init__(**kwargs)
self.name = ''.join(choice(ascii_uppercase + digits) for x in range(6))
class MainView(FloatLayout):
"""
Implementation of a ListView using the kv language.
"""
def __init__(self, **kwargs):
super(MainView, self).__init__(**kwargs)
data_items = []
data_items.append(DataItem())
data_items.append(DataItem())
data_items.append(DataItem())
list_item_args_converter = lambda row_index, obj: {'text': obj.name,
'size_hint_y': None,
'height': 25}
self.list_adapter = \
ListAdapter(data=data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=False,
allow_empty_selection=False,
cls=ListItemButton)
self.list_view = ListView(adapter=self.list_adapter)
self.add_widget(self.list_view)
self.toggle = 'adding'
Clock.schedule_interval(self.update_list_data, 1)
def update_list_data(self, dt):
items = self.list_adapter.data
if self.toggle == 'adding':
item = DataItem(name='New ' * random.randint(1, 2))
items.append(item)
self.toggle = 'changing'
print('added ' + item.name)
else:
random_index = random.randint(0, len(items) - 1)
item = items[random_index]
items[random_index] = DataItem()
self.toggle = 'adding'
print('changed {0} to {1}'.format(item.name,
items[random_index].name))
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
| mit |
pombredanne/progress | progress/bar.py | 510 | 2685 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = u'∙'
fill = u'█'
class FillingSquaresBar(ChargingBar):
empty_fill = u'▢'
fill = u'▣'
class FillingCirclesBar(ChargingBar):
empty_fill = u'◯'
fill = u'◉'
class IncrementalBar(Bar):
phases = (u' ', u'▏', u'▎', u'▍', u'▌', u'▋', u'▊', u'▉', u'█')
def update(self):
nphases = len(self.phases)
expanded_length = int(nphases * self.width * self.progress)
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
phase = expanded_length - (filled_length * nphases)
message = self.message % self
bar = self.phases[-1] * filled_length
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, empty_length - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class ShadyBar(IncrementalBar):
phases = (u' ', u'░', u'▒', u'▓', u'█')
| isc |
t0in4/django | tests/user_commands/tests.py | 205 | 7165 | import os
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.test.utils import captured_stderr, captured_stdout, extend_sys_path
from django.utils import translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.six import StringIO
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_false', stdout=out)
self.assertEqual(out.getvalue(), "")
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_true', stdout=out)
self.assertEqual(out.getvalue(), "pl\n")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
@ignore_warnings(category=RemovedInDjango110Warning)
def test_optparse_compatibility(self):
"""
optparse should be supported during Django 1.8/1.9 releases.
"""
out = StringIO()
management.call_command('optparse_cmd', stdout=out)
self.assertEqual(out.getvalue(), "All right, let's dance Rock'n'Roll.\n")
# Simulate command line execution
with captured_stdout() as stdout, captured_stderr():
management.execute_from_command_line(['django-admin', 'optparse_cmd'])
self.assertEqual(stdout.getvalue(), "All right, let's dance Rock'n'Roll.\n")
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command('hal', stdout=out)
def test_output_transaction(self):
out = StringIO()
management.call_command('transaction', stdout=out, no_color=True)
output = out.getvalue().strip()
self.assertTrue(output.startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
self.assertRaises(CommandError, popen_wrapper, ['a_42_command_that_doesnt_exist_42'])
| bsd-3-clause |
Konstantin-Posudevskiy/pythonnet | src/tests/test_subclass.py | 2 | 7634 | # -*- coding: utf-8 -*-
# FIXME: This test module randomly passes/fails even if all tests are skipped.
# Something fishy is going on with the Test fixtures. Behavior seen on CI on
# both Linux and Windows
# TODO: Remove delay of class creations. Adding SetUp/TearDown may help
"""Test sub-classing managed types"""
import System
import pytest
from Python.Test import (IInterfaceTest, SubClassTest, EventArgsTest,
FunctionsTest)
from System.Collections.Generic import List
from ._compat import range
def interface_test_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class InterfaceTestClass(IInterfaceTest):
"""class that implements the test interface"""
__namespace__ = "Python.Test." + subnamespace
def foo(self):
return "InterfaceTestClass"
def bar(self, x, i):
return "/".join([x] * i)
return InterfaceTestClass
def derived_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class DerivedClass(SubClassTest):
"""class that derives from a class deriving from IInterfaceTest"""
__namespace__ = "Python.Test." + subnamespace
def foo(self):
return "DerivedClass"
def base_foo(self):
return SubClassTest.foo(self)
def super_foo(self):
return super(DerivedClass, self).foo()
def bar(self, x, i):
return "_".join([x] * i)
def return_list(self):
l = List[str]()
l.Add("A")
l.Add("B")
l.Add("C")
return l
return DerivedClass
def derived_event_test_class_fixture(subnamespace):
"""Delay creation of class until test starts."""
class DerivedEventTest(IInterfaceTest):
"""class that implements IInterfaceTest.TestEvent"""
__namespace__ = "Python.Test." + subnamespace
def __init__(self):
self.event_handlers = []
# event handling
def add_TestEvent(self, handler):
self.event_handlers.append(handler)
def remove_TestEvent(self, handler):
self.event_handlers.remove(handler)
def OnTestEvent(self, value):
args = EventArgsTest(value)
for handler in self.event_handlers:
handler(self, args)
return DerivedEventTest
def test_base_class():
"""Test base class managed type"""
ob = SubClassTest()
assert ob.foo() == "foo"
assert FunctionsTest.test_foo(ob) == "foo"
assert ob.bar("bar", 2) == "bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar"
assert ob.not_overriden() == "not_overriden"
assert list(ob.return_list()) == ["a", "b", "c"]
assert list(SubClassTest.test_list(ob)) == ["a", "b", "c"]
def test_interface():
"""Test python classes can derive from C# interfaces"""
InterfaceTestClass = interface_test_class_fixture(test_interface.__name__)
ob = InterfaceTestClass()
assert ob.foo() == "InterfaceTestClass"
assert FunctionsTest.test_foo(ob) == "InterfaceTestClass"
assert ob.bar("bar", 2) == "bar/bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar/bar"
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
def test_derived_class():
"""Test python class derived from managed type"""
DerivedClass = derived_class_fixture(test_derived_class.__name__)
ob = DerivedClass()
assert ob.foo() == "DerivedClass"
assert ob.base_foo() == "foo"
assert ob.super_foo() == "foo"
assert FunctionsTest.test_foo(ob) == "DerivedClass"
assert ob.bar("bar", 2) == "bar_bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar_bar"
assert ob.not_overriden() == "not_overriden"
assert list(ob.return_list()) == ["A", "B", "C"]
assert list(SubClassTest.test_list(ob)) == ["A", "B", "C"]
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
def test_create_instance():
"""Test derived instances can be created from managed code"""
DerivedClass = derived_class_fixture(test_create_instance.__name__)
ob = FunctionsTest.create_instance(DerivedClass)
assert ob.foo() == "DerivedClass"
assert FunctionsTest.test_foo(ob) == "DerivedClass"
assert ob.bar("bar", 2) == "bar_bar"
assert FunctionsTest.test_bar(ob, "bar", 2) == "bar_bar"
assert ob.not_overriden() == "not_overriden"
x = FunctionsTest.pass_through(ob)
assert id(x) == id(ob)
InterfaceTestClass = interface_test_class_fixture(test_create_instance.__name__)
ob2 = FunctionsTest.create_instance(InterfaceTestClass)
assert ob2.foo() == "InterfaceTestClass"
assert FunctionsTest.test_foo(ob2) == "InterfaceTestClass"
assert ob2.bar("bar", 2) == "bar/bar"
assert FunctionsTest.test_bar(ob2, "bar", 2) == "bar/bar"
y = FunctionsTest.pass_through(ob2)
assert id(y) == id(ob2)
def test_events():
class EventHandler(object):
def handler(self, x, args):
self.value = args.value
event_handler = EventHandler()
x = SubClassTest()
x.TestEvent += event_handler.handler
assert FunctionsTest.test_event(x, 1) == 1
assert event_handler.value == 1
InterfaceTestClass = interface_test_class_fixture(test_events.__name__)
i = InterfaceTestClass()
with pytest.raises(System.NotImplementedException):
FunctionsTest.test_event(i, 2)
DerivedEventTest = derived_event_test_class_fixture(test_events.__name__)
d = DerivedEventTest()
d.add_TestEvent(event_handler.handler)
assert FunctionsTest.test_event(d, 3) == 3
assert event_handler.value == 3
assert len(d.event_handlers) == 1
def test_isinstance_check():
a = [str(x) for x in range(0, 1000)]
b = [System.String(x) for x in a]
for x in a:
assert not isinstance(x, System.Object)
assert not isinstance(x, System.String)
for x in b:
assert isinstance(x, System.Object)
assert isinstance(x, System.String)
def test_namespace_and_init():
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_with_init_args"
def __init__(self, *args, **kwargs):
calls.append((args, kwargs))
t = TestX(1,2,3,foo="bar")
assert len(calls) == 1
assert calls[0][0] == (1,2,3)
assert calls[0][1] == {"foo":"bar"}
def test_namespace_and_argless_init():
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_without_init_args"
def __init__(self):
calls.append(True)
t = TestX()
assert len(calls) == 1
assert calls[0] == True
def test_namespace_and_no_init():
class TestX(System.Object):
__namespace__ = "test_clr_subclass_without_init"
q = 1
t = TestX()
assert t.q == 1
def test_construction_from_clr():
import clr
calls = []
class TestX(System.Object):
__namespace__ = "test_clr_subclass_init_from_clr"
@clr.clrmethod(None, [int, str])
def __init__(self, i, s):
calls.append((i, s))
# Construct a TestX from Python
t = TestX(1, "foo")
assert len(calls) == 1
assert calls[0][0] == 1
assert calls[0][1] == "foo"
# Reset calls and construct a TestX from CLR
calls = []
tp = t.GetType()
t2 = tp.GetConstructors()[0].Invoke(None)
assert len(calls) == 0
# The object has only been constructed, now it needs to be initialized as well
tp.GetMethod("__init__").Invoke(t2, [1, "foo"])
assert len(calls) == 1
assert calls[0][0] == 1
assert calls[0][1] == "foo"
| mit |
cloud-fan/spark | python/pyspark/pandas/data_type_ops/base.py | 1 | 12265 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
UserDefinedType,
)
from pyspark.pandas.typedef import Dtype, extension_dtypes
from pyspark.pandas.typedef.typehints import extension_object_dtypes_available
if extension_object_dtypes_available:
from pandas import BooleanDtype
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
return operand.spark.transform(lambda scol: scol.cast(spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: Union["Series", "Index"], dtype: CategoricalDtype, spark_type: DataType
) -> Union["Index", "Series"]:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = F.lit(-1)
else:
kvs = chain(
*[(F.lit(category), F.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol.getItem(index_ops.spark.column), F.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type).alias(index_ops._internal.data_fields[0].name),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(
index_ops: Union["Series", "Index"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(BooleanType())
else:
scol = F.when(index_ops.spark.column.isNull(), F.lit(False)).otherwise(
index_ops.spark.column.cast(BooleanType())
)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_string_type(
index_ops: Union["Series", "Index"],
dtype: Union[str, type, Dtype],
*,
null_str: str = str(None)
) -> Union["Index", "Series"]:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column.
"""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(StringType())
else:
casted = index_ops.spark.column.cast(StringType())
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_other_type(
index_ops: Union["Series", "Index"], dtype: Union[str, type, Dtype], spark_type: DataType
) -> Union["Index", "Series"]:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType):
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import IntegralOps, FractionalOps
from pyspark.pandas.data_type_ops.string_ops import StringOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, FractionalType):
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def __or__(self, left, right) -> Union["Series", "Index"]:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left, right) -> Union["Series", "Index"]:
return left.__and__(right)
def ror(self, left, right) -> Union["Series", "Index"]:
return left.__or__(right)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def astype(
self, index_ops: Union["Index", "Series"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| apache-2.0 |
HoracioAlvarado/fwd | venv/Lib/site-packages/pip/operations/freeze.py | 24 | 3925 | from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
default_vcs=None,
isolated=False,
wheel_cache=None):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match.search(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
| mit |
valtech-mooc/edx-platform | lms/djangoapps/dashboard/tests/test_sysadmin.py | 13 | 23822 | """
Provide tests for sysadmin dashboard feature in sysadmin.py
"""
import glob
import os
import re
import shutil
import unittest
from util.date_utils import get_time_display, DEFAULT_DATE_TIME_FORMAT
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.utils import override_settings
from django.utils.timezone import utc as UTC
from django.utils.translation import ugettext as _
import mongoengine
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import TEST_DATA_XML_MODULESTORE
from dashboard.models import CourseImportLog
from dashboard.sysadmin import Users
from dashboard.git_import import GitImportError
from datetime import datetime
from external_auth.models import ExternalAuthMap
from student.roles import CourseStaffRole, GlobalStaff
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.modulestore.xml import XMLModuleStore
TEST_MONGODB_LOG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'user': '',
'password': '',
'db': 'test_xlog',
}
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
class SysadminBaseTestCase(ModuleStoreTestCase):
"""
Base class with common methods used in XML and Mongo tests
"""
TEST_REPO = 'https://github.com/mitocw/edx4edx_lite.git'
TEST_BRANCH = 'testing_do_not_delete'
TEST_BRANCH_COURSE = SlashSeparatedCourseKey('MITx', 'edx4edx_branch', 'edx4edx')
def setUp(self):
"""Setup test case by adding primary user."""
super(SysadminBaseTestCase, self).setUp(create_user=False)
self.user = UserFactory.create(username='test_user',
email='test_user+sysadmin@edx.org',
password='foo')
self.client = Client()
def _setstaff_login(self):
"""Makes the test user staff and logs them in"""
GlobalStaff().add_users(self.user)
self.client.login(username=self.user.username, password='foo')
def _add_edx4edx(self, branch=None):
"""Adds the edx4edx sample course"""
post_dict = {'repo_location': self.TEST_REPO, 'action': 'add_course', }
if branch:
post_dict['repo_branch'] = branch
return self.client.post(reverse('sysadmin_courses'), post_dict)
def _rm_edx4edx(self):
"""Deletes the sample course from the XML store"""
def_ms = modulestore()
course_path = '{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR))
try:
# using XML store
course = def_ms.courses.get(course_path, None)
except AttributeError:
# Using mongo store
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
# Delete git loaded course
response = self.client.post(
reverse('sysadmin_courses'),
{
'course_id': course.id.to_deprecated_string(),
'action': 'del_course',
}
)
self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))
return response
def _rm_glob(self, path):
"""
Create a shell expansion of passed in parameter and iteratively
remove them. Must only expand to directories.
"""
for path in glob.glob(path):
shutil.rmtree(path)
def _mkdir(self, path):
"""
Create directory and add the cleanup for it.
"""
os.mkdir(path)
self.addCleanup(shutil.rmtree, path)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
@override_settings(GIT_IMPORT_WITH_XMLMODULESTORE=True)
class TestSysadmin(SysadminBaseTestCase):
"""
Test sysadmin dashboard features using XMLModuleStore
"""
MODULESTORE = TEST_DATA_XML_MODULESTORE
def test_staff_access(self):
"""Test access controls."""
test_views = ['sysadmin', 'sysadmin_courses', 'sysadmin_staffing', ]
for view in test_views:
response = self.client.get(reverse(view))
self.assertEqual(response.status_code, 302)
self.user.is_staff = False
self.user.save()
logged_in = self.client.login(username=self.user.username,
password='foo')
self.assertTrue(logged_in)
for view in test_views:
response = self.client.get(reverse(view))
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('gitlogs'))
self.assertEqual(response.status_code, 404)
self.user.is_staff = True
self.user.save()
self.client.logout()
self.client.login(username=self.user.username, password='foo')
for view in test_views:
response = self.client.get(reverse(view))
self.assertTrue(response.status_code, 200)
response = self.client.get(reverse('gitlogs'))
self.assertTrue(response.status_code, 200)
def test_user_mod(self):
"""Create and delete a user"""
self._setstaff_login()
self.client.login(username=self.user.username, password='foo')
# Create user tests
# No uname
response = self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_fullname': 'blah',
'student_password': 'foozor', })
self.assertIn('Must provide username', response.content.decode('utf-8'))
# no full name
response = self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_uname': 'test_cuser+sysadmin@edx.org',
'student_password': 'foozor', })
self.assertIn('Must provide full name', response.content.decode('utf-8'))
# Test create valid user
self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_uname': 'test_cuser+sysadmin@edx.org',
'student_fullname': 'test cuser',
'student_password': 'foozor', })
self.assertIsNotNone(
User.objects.get(username='test_cuser+sysadmin@edx.org',
email='test_cuser+sysadmin@edx.org'))
# login as new user to confirm
self.assertTrue(self.client.login(
username='test_cuser+sysadmin@edx.org', password='foozor'))
self.client.logout()
self.client.login(username=self.user.username, password='foo')
# Delete user tests
# Try no username
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user', })
self.assertIn('Must provide username', response.content.decode('utf-8'))
# Try bad usernames
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': 'flabbergast@example.com',
'student_fullname': 'enigma jones', })
self.assertIn('Cannot find user with email address', response.content.decode('utf-8'))
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': 'flabbergast',
'student_fullname': 'enigma jones', })
self.assertIn('Cannot find user with username', response.content.decode('utf-8'))
self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': 'test_cuser+sysadmin@edx.org',
'student_fullname': 'test cuser', })
self.assertEqual(0, len(User.objects.filter(
username='test_cuser+sysadmin@edx.org',
email='test_cuser+sysadmin@edx.org')))
self.assertEqual(1, len(User.objects.all()))
def test_user_csv(self):
"""Download and validate user CSV"""
num_test_users = 100
self._setstaff_login()
# Stuff full of users to test streaming
for user_num in xrange(num_test_users):
Users().create_user('testingman_with_long_name{}'.format(user_num),
'test test')
response = self.client.post(reverse('sysadmin'),
{'action': 'download_users', })
self.assertIn('attachment', response['Content-Disposition'])
self.assertEqual('text/csv', response['Content-Type'])
self.assertIn('test_user', response.content)
self.assertTrue(num_test_users + 2, len(response.content.splitlines()))
# Clean up
User.objects.filter(
username__startswith='testingman_with_long_name').delete()
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH)
def test_authmap_repair(self):
"""Run authmap check and repair"""
self._setstaff_login()
Users().create_user('test0', 'test test')
# Will raise exception, so no assert needed
eamap = ExternalAuthMap.objects.get(external_name='test test')
mitu = User.objects.get(username='test0')
self.assertTrue(check_password(eamap.internal_password, mitu.password))
mitu.set_password('not autogenerated')
mitu.save()
self.assertFalse(check_password(eamap.internal_password, mitu.password))
# Create really non user AuthMap
ExternalAuthMap(external_id='ll',
external_domain='ll',
external_credentials='{}',
external_email='a@b.c',
external_name='c',
internal_password='').save()
response = self.client.post(reverse('sysadmin'),
{'action': 'repair_eamap', })
self.assertIn('{0} test0'.format('Failed in authenticating'),
response.content)
self.assertIn('fixed password', response.content.decode('utf-8'))
self.assertTrue(self.client.login(username='test0',
password=eamap.internal_password))
# Check for all OK
self._setstaff_login()
response = self.client.post(reverse('sysadmin'),
{'action': 'repair_eamap', })
self.assertIn('All ok!', response.content.decode('utf-8'))
def test_xml_course_add_delete(self):
"""add and delete course from xml module store"""
self._setstaff_login()
# Try bad git repo
response = self.client.post(reverse('sysadmin_courses'), {
'repo_location': 'github.com/mitocw/edx4edx_lite',
'action': 'add_course', })
self.assertIn(_("The git repo location should end with '.git', "
"and be a valid url"), response.content.decode('utf-8'))
response = self.client.post(reverse('sysadmin_courses'), {
'repo_location': 'http://example.com/not_real.git',
'action': 'add_course', })
self.assertIn('Unable to clone or pull repository',
response.content.decode('utf-8'))
# Create git loaded course
response = self._add_edx4edx()
def_ms = modulestore()
self.assertIn('xml', str(def_ms.__class__))
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNotNone(course)
# Delete a course
self._rm_edx4edx()
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNone(course)
# Load a bad git branch
response = self._add_edx4edx('asdfasdfasdf')
self.assertIn(GitImportError.REMOTE_BRANCH_MISSING,
response.content.decode('utf-8'))
# Load a course from a git branch
self._add_edx4edx(self.TEST_BRANCH)
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNotNone(course)
self.assertEqual(self.TEST_BRANCH_COURSE, course.id)
self._rm_edx4edx()
# Try and delete a non-existent course
response = self.client.post(reverse('sysadmin_courses'),
{'course_id': 'foobar/foo/blah',
'action': 'del_course', })
self.assertIn('Error - cannot get course with ID',
response.content.decode('utf-8'))
@override_settings(GIT_IMPORT_WITH_XMLMODULESTORE=False)
def test_xml_safety_flag(self):
"""Make sure the settings flag to disable xml imports is working"""
self._setstaff_login()
response = self._add_edx4edx()
self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)
def_ms = modulestore()
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNone(course)
def test_git_pull(self):
"""Make sure we can pull"""
self._setstaff_login()
response = self._add_edx4edx()
response = self._add_edx4edx()
self.assertIn(_("The course {0} already exists in the data directory! "
"(reloading anyway)").format('edx4edx_lite'),
response.content.decode('utf-8'))
self._rm_edx4edx()
def test_staff_csv(self):
"""Download and validate staff CSV"""
self._setstaff_login()
self._add_edx4edx()
def_ms = modulestore()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
CourseStaffRole(course.id).add_users(self.user)
response = self.client.post(reverse('sysadmin_staffing'),
{'action': 'get_staff_csv', })
self.assertIn('attachment', response['Content-Disposition'])
self.assertEqual('text/csv', response['Content-Type'])
columns = ['course_id', 'role', 'username',
'email', 'full_name', ]
self.assertIn(','.join('"' + c + '"' for c in columns),
response.content)
self._rm_edx4edx()
def test_enrollment_page(self):
"""
Adds a course and makes sure that it shows up on the staffing and
enrollment page
"""
self._setstaff_login()
self._add_edx4edx()
response = self.client.get(reverse('sysadmin_staffing'))
self.assertIn('edx4edx', response.content)
self._rm_edx4edx()
@override_settings(MONGODB_LOG=TEST_MONGODB_LOG)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
class TestSysAdminMongoCourseImport(SysadminBaseTestCase):
"""
Check that importing into the mongo module store works
"""
@classmethod
def tearDownClass(cls):
"""Delete mongo log entries after test."""
super(TestSysAdminMongoCourseImport, cls).tearDownClass()
try:
mongoengine.connect(TEST_MONGODB_LOG['db'])
CourseImportLog.objects.all().delete()
except mongoengine.connection.ConnectionError:
pass
def _setstaff_login(self):
"""
Makes the test user staff and logs them in
"""
self.user.is_staff = True
self.user.save()
self.client.login(username=self.user.username, password='foo')
def test_missing_repo_dir(self):
"""
Ensure that we handle a missing repo dir
"""
self._setstaff_login()
if os.path.isdir(getattr(settings, 'GIT_REPO_DIR')):
shutil.rmtree(getattr(settings, 'GIT_REPO_DIR'))
# Create git loaded course
response = self._add_edx4edx()
self.assertIn(GitImportError.NO_DIR,
response.content.decode('UTF-8'))
def test_mongo_course_add_delete(self):
"""
This is the same as TestSysadmin.test_xml_course_add_delete,
but it uses a mongo store
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
def_ms = modulestore()
self.assertFalse(isinstance(def_ms, XMLModuleStore))
self._add_edx4edx()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
self.assertIsNotNone(course)
self._rm_edx4edx()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
self.assertIsNone(course)
def test_course_info(self):
"""
Check to make sure we are getting git info for courses
"""
# Regex of first 3 columns of course information table row for
# test course loaded from git. Would not have sha1 if
# git_info_for_course failed.
table_re = re.compile(r"""
<tr>\s+
<td>edX\sAuthor\sCourse</td>\s+ # expected test git course name
<td>MITx/edx4edx/edx4edx</td>\s+ # expected test git course_id
<td>[a-fA-F\d]{40}</td> # git sha1 hash
""", re.VERBOSE)
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
# Make sure we don't have any git hashes on the page
response = self.client.get(reverse('sysadmin_courses'))
self.assertNotRegexpMatches(response.content, table_re)
# Now add the course and make sure it does match
response = self._add_edx4edx()
self.assertRegexpMatches(response.content, table_re)
def test_gitlogs(self):
"""
Create a log entry and make sure it exists
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
response = self.client.get(reverse('gitlogs'))
# Check that our earlier import has a log with a link to details
self.assertIn('/gitlogs/MITx/edx4edx/edx4edx', response.content)
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'}))
self.assertIn('======> IMPORTING course',
response.content)
self._rm_edx4edx()
def test_gitlog_date(self):
"""
Make sure the date is timezone-aware and being converted/formatted
properly.
"""
tz_names = [
'America/New_York', # UTC - 5
'Asia/Pyongyang', # UTC + 9
'Europe/London', # UTC
'Canada/Yukon', # UTC - 8
'Europe/Moscow', # UTC + 4
]
tz_format = DEFAULT_DATE_TIME_FORMAT
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
date = CourseImportLog.objects.first().created.replace(tzinfo=UTC)
for timezone in tz_names:
with (override_settings(TIME_ZONE=timezone)):
date_text = get_time_display(date, tz_format, settings.TIME_ZONE)
response = self.client.get(reverse('gitlogs'))
self.assertIn(date_text, response.content.decode('UTF-8'))
self._rm_edx4edx()
def test_gitlog_bad_course(self):
"""
Make sure we gracefully handle courses that don't exist.
"""
self._setstaff_login()
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'Not/Real/Testing'}))
self.assertEqual(404, response.status_code)
def test_gitlog_no_logs(self):
"""
Make sure the template behaves well when rendered despite there not being any logs.
(This is for courses imported using methods other than the git_add_course command)
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
# Simulate a lack of git import logs
import_logs = CourseImportLog.objects.all()
import_logs.delete()
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
})
)
self.assertIn(
'No git import logs have been recorded for this course.',
response.content
)
self._rm_edx4edx()
def test_gitlog_pagination_out_of_range_invalid(self):
"""
Make sure the pagination behaves properly when the requested page is out
of range.
"""
self._setstaff_login()
mongoengine.connect(TEST_MONGODB_LOG['db'])
for _ in xrange(15):
CourseImportLog(
course_id=SlashSeparatedCourseKey("test", "test", "test"),
location="location",
import_log="import_log",
git_log="git_log",
repo_dir="repo_dir",
created=datetime.now()
).save()
for page, expected in [(-1, 1), (1, 1), (2, 2), (30, 2), ('abc', 1)]:
response = self.client.get(
'{}?page={}'.format(
reverse('gitlogs'),
page
)
)
self.assertIn(
'Page {} of 2'.format(expected),
response.content
)
CourseImportLog.objects.delete()
def test_gitlog_courseteam_access(self):
"""
Ensure course team users are allowed to access only their own course.
"""
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._setstaff_login()
self._add_edx4edx()
self.user.is_staff = False
self.user.save()
logged_in = self.client.login(username=self.user.username,
password='foo')
response = self.client.get(reverse('gitlogs'))
# Make sure our non privileged user doesn't have access to all logs
self.assertEqual(response.status_code, 404)
# Or specific logs
response = self.client.get(reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
}))
self.assertEqual(response.status_code, 404)
# Add user as staff in course team
def_ms = modulestore()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
CourseStaffRole(course.id).add_users(self.user)
self.assertTrue(CourseStaffRole(course.id).has_user(self.user))
logged_in = self.client.login(username=self.user.username,
password='foo')
self.assertTrue(logged_in)
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
}))
self.assertIn('======> IMPORTING course',
response.content)
self._rm_edx4edx()
| agpl-3.0 |
bygit/shadowsocks | shadowsocks/encrypt.py | 990 | 5180 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| apache-2.0 |
SmartInfrastructures/neutron | neutron/tests/unit/extensions/test_quotasv2.py | 7 | 19779 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
import testtools
from webob import exc
import webtest
from neutron.api import extensions
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions
from neutron import context
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_get_path = test_base._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self.api = None
self.plugin = None
super(QuotaExtensionTestCase, self).tearDown()
def _test_quota_default_values(self, expected_values):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
for resource, expected_value in expected_values.items():
self.assertEqual(expected_value,
quota['quota'][resource])
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
cfg.CONF.set_override(
'quota_network', -10, group='QUOTAS')
cfg.CONF.set_override(
'quota_subnet', -50, group='QUOTAS')
self._test_quota_default_values(
{'network': -1,
'subnet': -1,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_out_of_range_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_limit_check_with_not_registered_resource_fails(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.limit_check,
context.get_admin_context(load_admin_roles=False),
tenant_id,
foobar=1)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas)
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
| apache-2.0 |
mitsuhiko/sqlalchemy | lib/sqlalchemy/sql/elements.py | 2 | 80420 | # sql/elements.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`.ColumnElement` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type,
on the Python side only.
:func:`.type_coerce` is roughly similar to :func:`.cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
type_ = type_api.to_instance(type_)
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif isinstance(expr, BindParameter):
bp = expr._clone()
bp.type = type_
return bp
elif not isinstance(expr, Visitable):
if expr is None:
return Null()
else:
return literal(expr, type_=type_)
else:
return Label(None, expr, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all :class:`.ColumnElement`
subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
_order_by_label_element = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(ClauseElement, operators.ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
:class:`.ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`.ColumnElement` may be associated with
a :class:`.Selectable` which was derived from another :class:`.Selectable`.
An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
:class:`~sqlalchemy.schema.Table`. For the ambitious, an in-depth
discussion of this concept can be found at
`Expression Transformations <http://techspot.zzzeek.org/2008/01/23/expression-transformations/>`_.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = None
_alt_names = ()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class BindParameter(ColumnElement):
"""Represent a bound parameter value.
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a new :class:`.BindParameter`.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
Defaults to ``None``, however if neither ``value`` nor
``callable`` are passed explicitly, the ``required`` flag will be
set to ``True`` which has the effect of requiring a value be present
when the statement is actually executed.
.. versionchanged:: 0.8 The ``required`` flag is set to ``True``
automatically if ``value`` or ``callable`` is not passed.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
If ``True``, a value is required at execution time. If not passed,
is set to ``True`` or ``False`` based on whether or not
one of ``value`` or ``callable`` were passed..
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
.. seealso::
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None):
"""Construct a new :class:`.TextClause` clause.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if typemap is not None:
for key in typemap:
typemap[key] = type_api.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self.bindparams = dict((b.key, clone(b, **kw))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return list(self.bindparams.values())
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
"""
__visit_name__ = 'null'
def __init__(self):
"""Return a :class:`Null` object, which compiles to ``NULL``.
"""
self.type = type_api.NULLTYPE
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword in a SQL statement.
"""
__visit_name__ = 'false'
def __init__(self):
"""Return a :class:`False_` object.
"""
self.type = type_api.BOOLEANTYPE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword in a SQL statement.
"""
__visit_name__ = 'true'
def __init__(self):
"""Return a :class:`True_` object.
"""
self.type = type_api.BOOLEANTYPE
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = type_api.to_instance(kwargs.get('type_',
type_api.BOOLEANTYPE))
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self.type = kw.pop('type_', None)
if self.type is None:
self.type = _type_from_args(clauses)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class Case(ColumnElement):
"""Represent a SQL ``CASE`` construct.
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a :class:`.Case` object.
:param whens: A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
:param value: Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
:param else\_: Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`.literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent the SQL ``CAST`` construct."""
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
"""Return a :class:`.Cast` object.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
:class:`.Cast` is available using :func:`.cast` or alternatively
``func.cast`` from the :data:`.func` namespace.
"""
self.type = type_api.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Return a NULLS FIRST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullsfirst())
produces::
ORDER BY mycol DESC NULLS FIRST
"""
return UnaryExpression(column, modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Return a NULLS LAST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullslast())
produces::
ORDER BY mycol DESC NULLS LAST
"""
return UnaryExpression(column, modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol))
produces::
ORDER BY mycol DESC
"""
return UnaryExpression(column, modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(asc(table1.mycol))
produces::
ORDER BY mycol ASC
"""
return UnaryExpression(column, modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Return a ``DISTINCT`` clause.
e.g.::
distinct(a)
renders::
DISTINCT a
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return Grouping(self)
else:
return self
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`.Selectable`.
:class:`.ColumnClause` is constructed by itself typically via
the :func:`~.expression.column` function. It may be placed directly
into constructs such as :func:`.select` constructs::
from sqlalchemy.sql import column, select
c1, c2 = column("c1"), column("c2")
s = select([c1, c2]).where(c1==5)
There is also a variant on :func:`~.expression.column` known
as :func:`~.expression.literal_column` - the difference is that
in the latter case, the string value is assumed to be an exact
expression, rather than a column name, so that no quoting rules
or similar are applied::
from sqlalchemy.sql import literal_column, select
s = select([literal_column("5 + 7")])
:class:`.ColumnClause` can also be used in a table-like
fashion by combining the :func:`~.expression.column` function
with the :func:`~.expression.table` function, to produce
a "lightweight" form of table metadata::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The above construct can be created in an ad-hoc fashion and is
not associated with any :class:`.schema.MetaData`, unlike it's
more full fledged :class:`.schema.Table` counterpart.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Construct a :class:`.ColumnClause` object.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`.ColumnClause`.
:param text: the name of the column. Quoting rules will be applied
to the clause like any other column name. For textual column constructs
that are not to be quoted, use the :func:`literal_column` function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
which will provide result-set translation for this column.
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and other.is_literal
):
return super(ColumnClause, self).\
_compare_name_for_result(other)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as :class:`.Table`,
:class:`.Column`, and others. The class can also be passed explicitly
as the name to any function that receives a name which can be quoted.
Such as to use the :meth:`.Engine.has_table` method with an unconditionally
quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
#def __new__(cls, value, quote, sprcls=False):
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
return "'%s'" % self
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
#return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
| mit |
paulmadore/Eric-IDE | 6-6.0.9/eric/Helpviewer/Bookmarks/BookmarksImporters/BookmarksImporter.py | 2 | 2150 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a base class for the bookmarks importers.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import QObject
class BookmarksImporter(QObject):
"""
Class implementing the base class for the bookmarks importers.
"""
def __init__(self, id="", parent=None):
"""
Constructor
@param id source ID (string)
@param parent reference to the parent object (QObject)
"""
super(BookmarksImporter, self).__init__(parent)
self._path = ""
self._file = ""
self._error = False
self._errorString = ""
self._id = id
def setPath(self, path):
"""
Public method to set the path of the bookmarks file or directory.
@param path bookmarks file or directory (string)
@exception NotImplementedError raised to indicate this method must
be implemented by a subclass
"""
raise NotImplementedError
def open(self):
"""
Public method to open the bookmarks file.
It must return a flag indicating success (boolean).
@exception NotImplementedError raised to indicate this method must
be implemented by a subclass
"""
raise NotImplementedError
def importedBookmarks(self):
"""
Public method to get the imported bookmarks.
It must return the imported bookmarks (BookmarkNode).
@exception NotImplementedError raised to indicate this method must
be implemented by a subclass
"""
raise NotImplementedError
def error(self):
"""
Public method to check for an error.
@return flag indicating an error (boolean)
"""
return self._error
def errorString(self):
"""
Public method to get the error description.
@return error description (string)
"""
return self._errorString
| gpl-3.0 |
MechCoder/sympy | sympy/utilities/exceptions.py | 93 | 5777 | """
General SymPy exceptions and warnings.
"""
from __future__ import print_function, division
import warnings
from sympy.utilities.misc import filldedent
class SymPyDeprecationWarning(DeprecationWarning):
r"""A warning for deprecated features of SymPy.
This class is expected to be used with the warnings.warn function (note
that one has to explicitly turn on deprecation warnings):
>>> import warnings
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> warnings.simplefilter(
... "always", SymPyDeprecationWarning)
>>> warnings.warn(
... "Don't do this, it's deprecated",
... SymPyDeprecationWarning) #doctest:+SKIP
__main__:1: SymPyDeprecationWarning: "Don't do this, it's deprecated"
The recommended way to use this class is, however, is by calling
the warn method after constructing the message:
>>> SymPyDeprecationWarning("Don't do this, it's deprecated.").warn() #doctest:+SKIP
__main__:1: SymPyDeprecationWarning:
Don't do this, it's deprecated.
warning (see_above, SymPyDeprecationWarning)
To provide additional information, create an instance of this
class in this way:
>>> SymPyDeprecationWarning(
... feature="Such and such",
... last_supported_version="1.2.3",
... useinstead="this other feature")
Such and such has been deprecated. It will be last supported in SymPy
version 1.2.3. Use this other feature instead.
Note that the text in ``feature`` begins a sentence, so if it begins with
a plain English word, the first letter of that word should be capitalized.
Either (or both) of the arguments ``last_supported_version`` and
``useinstead`` can be omitted. In this case the corresponding sentence
will not be shown:
>>> SymPyDeprecationWarning(feature="Such and such",
... useinstead="this other feature")
Such and such has been deprecated. Use this other feature instead.
You can still provide the argument value. If it is a string, it
will be appended to the end of the message:
>>> SymPyDeprecationWarning(
... feature="Such and such",
... useinstead="this other feature",
... value="Contact the developers for further information.")
Such and such has been deprecated. Use this other feature instead.
Contact the developers for further information.
If, however, the argument value does not hold a string, a string
representation of the object will be appended to the message:
>>> SymPyDeprecationWarning(
... feature="Such and such",
... useinstead="this other feature",
... value=[1,2,3])
Such and such has been deprecated. Use this other feature instead.
([1, 2, 3])
To associate an issue with a deprecation, use the ``issue`` flag.
>>> SymPyDeprecationWarning(
... feature="Old feature",
... useinstead="new feature",
... issue=5241)
Old feature has been deprecated. Use new feature instead. See
https://github.com/sympy/sympy/issues/5241 for more info.
Every formal deprecation should have an associated issue in the GitHub
issue tracker. All such issues should have the DeprecationRemoval
tag.
Additionally, each formal deprecation should mark the first release for
which it was deprecated. Use the ``deprecated_since_version`` flag for
this.
>>> SymPyDeprecationWarning(
... feature="Old feature",
... useinstead="new feature",
... deprecated_since_version="0.7.2")
Old feature has been deprecated since SymPy 0.7.2. Use new feature
instead.
Note that it may be necessary to go back through all the deprecations
before a release to make sure that the version number is correct. So just
use what you believe will be the next release number (this usually means
bumping the minor number by one).
To mark a function as deprecated, you can use the decorator
@deprecated.
See Also
========
sympy.core.decorators.deprecated
"""
def __init__(self, value=None, feature=None, last_supported_version=None,
useinstead=None, issue=None, deprecated_since_version=None):
self.fullMessage = ""
if feature:
if deprecated_since_version:
self.fullMessage = "%s has been deprecated since SymPy %s. " % \
(feature, deprecated_since_version)
else:
self.fullMessage = "%s has been deprecated. " % feature
if last_supported_version:
self.fullMessage += ("It will be last supported in SymPy "
"version %s. ") % last_supported_version
if useinstead:
self.fullMessage += "Use %s instead. " % useinstead
if issue:
self.fullMessage += ("See "
"https://github.com/sympy/sympy/issues/%d for more "
"info. ") % issue
if value:
if not isinstance(value, str):
value = "(%s)" % repr(value)
value = " " + value
else:
value = ""
self.fullMessage += value
def __str__(self):
return '\n%s\n' % filldedent(self.fullMessage)
def warn(self, stacklevel=2):
see_above = self.fullMessage
# the next line is what the user would see after the error is printed
# if stacklevel was set to 1. If you are writting a wrapper around this,
# increase the stacklevel accordingly.
warnings.warn(see_above, SymPyDeprecationWarning, stacklevel=stacklevel)
# Python by default hides DeprecationWarnings, which we do not want.
warnings.simplefilter("once", SymPyDeprecationWarning)
| bsd-3-clause |
Isendir/brython | www/src/Lib/test/time_hashlib.py | 167 | 2887 | # It's intended that this script be run by hand. It runs speed tests on
# hashlib functions; it does not test for correctness.
import sys, time
import hashlib
def creatorFunc():
raise RuntimeError("eek, creatorFunc not overridden")
def test_scaled_msg(scale, name):
iterations = 106201/scale * 20
longStr = 'Z'*scale
localCF = creatorFunc
start = time.time()
for f in range(iterations):
x = localCF(longStr).digest()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", iterations, "x", len(longStr), "bytes", name)
def test_create():
start = time.time()
for f in range(20000):
d = creatorFunc()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", '[20000 creations]')
def test_zero():
start = time.time()
for f in range(20000):
x = creatorFunc().digest()
end = time.time()
print(('%2.2f' % (end-start)), "seconds", '[20000 "" digests]')
hName = sys.argv[1]
#
# setup our creatorFunc to test the requested hash
#
if hName in ('_md5', '_sha'):
exec('import '+hName)
exec('creatorFunc = '+hName+'.new')
print("testing speed of old", hName, "legacy interface")
elif hName == '_hashlib' and len(sys.argv) > 3:
import _hashlib
exec('creatorFunc = _hashlib.%s' % sys.argv[2])
print("testing speed of _hashlib.%s" % sys.argv[2], getattr(_hashlib, sys.argv[2]))
elif hName == '_hashlib' and len(sys.argv) == 3:
import _hashlib
exec('creatorFunc = lambda x=_hashlib.new : x(%r)' % sys.argv[2])
print("testing speed of _hashlib.new(%r)" % sys.argv[2])
elif hasattr(hashlib, hName) and hasattr(getattr(hashlib, hName), '__call__'):
creatorFunc = getattr(hashlib, hName)
print("testing speed of hashlib."+hName, getattr(hashlib, hName))
else:
exec("creatorFunc = lambda x=hashlib.new : x(%r)" % hName)
print("testing speed of hashlib.new(%r)" % hName)
try:
test_create()
except ValueError:
print()
print("pass argument(s) naming the hash to run a speed test on:")
print(" '_md5' and '_sha' test the legacy builtin md5 and sha")
print(" '_hashlib' 'openssl_hName' 'fast' tests the builtin _hashlib")
print(" '_hashlib' 'hName' tests builtin _hashlib.new(shaFOO)")
print(" 'hName' tests the hashlib.hName() implementation if it exists")
print(" otherwise it uses hashlib.new(hName).")
print()
raise
test_zero()
test_scaled_msg(scale=106201, name='[huge data]')
test_scaled_msg(scale=10620, name='[large data]')
test_scaled_msg(scale=1062, name='[medium data]')
test_scaled_msg(scale=424, name='[4*small data]')
test_scaled_msg(scale=336, name='[3*small data]')
test_scaled_msg(scale=212, name='[2*small data]')
test_scaled_msg(scale=106, name='[small data]')
test_scaled_msg(scale=creatorFunc().digest_size, name='[digest_size data]')
test_scaled_msg(scale=10, name='[tiny data]')
| bsd-3-clause |
grap/OpenUpgrade | addons/base_action_rule/migrations/8.0.1.0/post-migration.py | 14 | 1449 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 HBEE (http://www.hbee.eu)
# @author: Paulius Sladkevičius <paulius@hbee.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
openupgrade.logged_query(
cr,
"UPDATE base_action_rule SET kind = 'on_create_or_write', "
"trg_date_range = null, trg_date_range_type = null "
"where trg_date_id = null"
)
openupgrade.logged_query(
cr,
"UPDATE base_action_rule SET kind = 'on_time', filter_pre_id = null "
"where trg_date_id != null"
)
| agpl-3.0 |
rnirmal/savanna | savanna/tests/unit/db/models/test_clusters.py | 1 | 1607 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from savanna import context as ctx
from savanna.db import models as m
from savanna.tests.unit import base as models_test_base
class ClusterModelTest(models_test_base.DbTestCase):
def testCreateCluster(self):
session = ctx.current().session
with session.begin():
c = m.Cluster('c-1', 't-1', 'p-1', 'hv-1')
session.add(c)
with session.begin():
res = session.query(m.Cluster).filter_by().first()
self.assertIsValidModelObject(res)
def testCreateClusterFromDict(self):
c = m.Cluster('c-1', 't-1', 'p-1', 'hv-1')
c_dict = c.dict
del c_dict['created']
del c_dict['updated']
del c_dict['id']
del c_dict['node_groups']
del c_dict['status']
del c_dict['status_description']
del c_dict['info']
c_dict.update({
'tenant_id': 't-1'
})
self.assertEqual(self.get_clean_dict(c),
self.get_clean_dict(m.Cluster(**c_dict)))
| apache-2.0 |
taedori81/gentlecoffee | saleor/order/forms.py | 10 | 1249 | from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from .models import Payment
class PaymentMethodsForm(forms.Form):
method = forms.ChoiceField(choices=settings.CHECKOUT_PAYMENT_CHOICES, widget=forms.RadioSelect,
initial=settings.CHECKOUT_PAYMENT_CHOICES[0][0])
class PaymentDeleteForm(forms.Form):
payment_id = forms.IntegerField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
super(PaymentDeleteForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(PaymentDeleteForm, self).clean()
payment_id = cleaned_data.get('payment_id')
waiting_payments = self.order.payments.filter(status='waiting')
try:
payment = waiting_payments.get(id=payment_id)
except Payment.DoesNotExist:
self._errors['number'] = self.error_class(
[_('Payment does not exist')])
else:
cleaned_data['payment'] = payment
return cleaned_data
def save(self):
payment = self.cleaned_data['payment']
payment.status = 'rejected'
payment.save()
| bsd-3-clause |
sigma-random/wireshark | tools/dftestlib/tvb.py | 40 | 2451 | # Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import unittest
from dftestlib import dftest
class testTVB(dftest.DFTest):
trace_file = "http.pcap"
def test_eq_1(self):
# We expect 0 because even though this byte
# string matches the 'eth' protocol, protocols cannot
# work in an '==' comparison yet.
dfilter = "eth == 00:e0:81:00:b0:28:00:09:6b:88:f6:c9:08:00"
self.assertDFilterCount(dfilter, 0)
def test_slice_1(self):
dfilter = "ip[0:2] == 45:00"
self.assertDFilterCount(dfilter, 1)
def test_slice_2(self):
dfilter = "ip[0:2] == 00:00"
self.assertDFilterCount(dfilter, 0)
def test_slice_3(self):
dfilter = "ip[2:2] == 00:c1"
self.assertDFilterCount(dfilter, 1)
@unittest.skip("This doesn't work yet in Wireshark")
def test_slice_4(self):
dfilter = "ip[-5] == 0x86"
self.assertDFilterCount(dfilter, 0)
@unittest.skip("This doesn't work yet in Wireshark")
def test_slice_5(self):
dfilter = "ip[-1] == 0x86"
self.assertDFilterCount(dfilter, 1)
def test_contains_1(self):
dfilter = "eth contains 6b"
self.assertDFilterCount(dfilter, 1)
def test_contains_2(self):
dfilter = "eth contains 09:6b:88"
self.assertDFilterCount(dfilter, 1)
def test_contains_3(self):
dfilter = "eth contains 00:e0:81:00:b0:28:00:09:6b:88:f5:c9:08:00"
self.assertDFilterCount(dfilter, 1)
def test_contains_4(self):
dfilter = "eth contains ff:ff:ff"
self.assertDFilterCount(dfilter, 0)
def test_contains_5(self):
dfilter = 'http contains "HEAD"'
self.assertDFilterCount(dfilter, 1)
| gpl-2.0 |
MTK6580/walkie-talkie | ALPS.L1.MP6.V2_HEXING6580_WE_L/alps/build/tools/releasetools/img_from_target_files.py | 1 | 4926 | #!/usr/bin/env python
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given a target-files zipfile, produces an image zipfile suitable for
use with 'fastboot update'.
Usage: img_from_target_files [flags] input_target_files output_image_zip
-z (--bootable_zip)
Include only the bootable images (eg 'boot' and 'recovery') in
the output.
"""
import sys
if sys.hexversion < 0x02070000:
print >> sys.stderr, "Python 2.7 or newer is required."
sys.exit(1)
import errno
import os
import re
import shutil
import subprocess
import tempfile
import zipfile
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
os.SEEK_SET = 0
import common
OPTIONS = common.OPTIONS
def CopyInfo(output_zip):
"""Copy the android-info.txt file from the input to the output."""
output_zip.write(os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
"android-info.txt")
def main(argv):
bootable_only = [False]
def option_handler(o, a):
if o in ("-z", "--bootable_zip"):
bootable_only[0] = True
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
extra_opts="z",
extra_long_opts=["bootable_zip"],
extra_option_handler=option_handler)
bootable_only = bootable_only[0]
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip)
try:
done = False
images_path = os.path.join(OPTIONS.input_tmp, "IMAGES")
if os.path.exists(images_path):
# If this is a new target-files, it already contains the images,
# and all we have to do is copy them to the output zip.
images = os.listdir(images_path)
if images:
for i in images:
if bootable_only and i not in ("boot.img", "recovery.img"): continue
if not i.endswith(".img"): continue
with open(os.path.join(images_path, i), "r") as f:
common.ZipWriteStr(output_zip, i, f.read())
done = True
if not done:
# We have an old target-files that doesn't already contain the
# images, so build them.
import add_img_to_target_files
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
# If this image was originally labelled with SELinux contexts,
# make sure we also apply the labels in our new image. During
# building, the "file_contexts" is in the out/ directory tree,
# but for repacking from target-files.zip it's in the root
# directory of the ramdisk.
if "selinux_fc" in OPTIONS.info_dict:
OPTIONS.info_dict["selinux_fc"] = os.path.join(
OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts")
boot_image = common.GetBootableImage(
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
if boot_image:
boot_image.AddToZip(output_zip)
recovery_image = common.GetBootableImage(
"recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
if recovery_image:
recovery_image.AddToZip(output_zip)
def banner(s):
print "\n\n++++ " + s + " ++++\n\n"
if not bootable_only:
banner("AddSystem")
add_img_to_target_files.AddSystem(output_zip, prefix="")
try:
input_zip.getinfo("VENDOR/")
banner("AddVendor")
add_img_to_target_files.AddVendor(output_zip, prefix="")
except KeyError:
pass # no vendor partition for this device
try:
input_zip.getinfo("CUSTOM/")
banner("AddCustom")
add_img_to_target_files.AddCustom(output_zip, prefix="")
except KeyError:
pass # no custom partition for this device
banner("AddUserdata")
add_img_to_target_files.AddUserdata(output_zip, prefix="")
banner("AddCache")
add_img_to_target_files.AddCache(output_zip, prefix="")
finally:
print "cleaning up..."
output_zip.close()
shutil.rmtree(OPTIONS.input_tmp)
print "done."
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError, e:
print
print " ERROR: %s" % (e,)
print
sys.exit(1)
| gpl-3.0 |
weimingtom/python-for-android | tools/googlecode_upload.py | 304 | 8912 | #!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
JKarathiya/Lean | Algorithm.Python/FutureOptionShortPutOTMExpiryRegressionAlgorithm.py | 1 | 5416 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from datetime import datetime, timedelta
import clr
from System import *
from System.Reflection import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
from QuantConnect.Securities import *
from QuantConnect.Securities.Future import *
from QuantConnect import Market
### <summary>
### This regression algorithm tests Out of The Money (OTM) future option expiry for short puts.
### We expect 2 orders from the algorithm, which are:
###
### * Initial entry, sell ES Put Option (expiring OTM)
### - Profit the option premium, since the option was not assigned.
###
### * Liquidation of ES put OTM contract on the last trade date
###
### Additionally, we test delistings for future options and assert that our
### portfolio holdings reflect the orders the algorithm has submitted.
### </summary>
class FutureOptionShortPutOTMExpiryRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2020, 1, 5)
self.SetEndDate(2020, 6, 30)
self.es19m20 = self.AddFutureContract(
Symbol.CreateFuture(
Futures.Indices.SP500EMini,
Market.CME,
datetime(2020, 6, 19)),
Resolution.Minute).Symbol
# Select a future option expiring ITM, and adds it to the algorithm.
self.esOption = self.AddFutureOptionContract(
list(
sorted(
[x for x in self.OptionChainProvider.GetOptionContractList(self.es19m20, self.Time) if x.ID.StrikePrice <= 3000.0 and x.ID.OptionRight == OptionRight.Put],
key=lambda x: x.ID.StrikePrice,
reverse=True
)
)[0], Resolution.Minute).Symbol
self.expectedContract = Symbol.CreateOption(self.es19m20, Market.CME, OptionStyle.American, OptionRight.Put, 3000.0, datetime(2020, 6, 19))
if self.esOption != self.expectedContract:
raise AssertionError(f"Contract {self.expectedContract} was not found in the chain");
self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.AfterMarketOpen(self.es19m20, 1), self.ScheduledMarketOrder)
def ScheduledMarketOrder(self):
self.MarketOrder(self.esOption, -1)
def OnData(self, data: Slice):
# Assert delistings, so that we can make sure that we receive the delisting warnings at
# the expected time. These assertions detect bug #4872
for delisting in data.Delistings.Values:
if delisting.Type == DelistingType.Warning:
if delisting.Time != datetime(2020, 6, 19):
raise AssertionError(f"Delisting warning issued at unexpected date: {delisting.Time}");
if delisting.Type == DelistingType.Delisted:
if delisting.Time != datetime(2020, 6, 20):
raise AssertionError(f"Delisting happened at unexpected date: {delisting.Time}");
def OnOrderEvent(self, orderEvent: OrderEvent):
if orderEvent.Status != OrderStatus.Filled:
# There's lots of noise with OnOrderEvent, but we're only interested in fills.
return
if not self.Securities.ContainsKey(orderEvent.Symbol):
raise AssertionError(f"Order event Symbol not found in Securities collection: {orderEvent.Symbol}")
security = self.Securities[orderEvent.Symbol]
if security.Symbol == self.es19m20:
raise AssertionError(f"Expected no order events for underlying Symbol {security.Symbol}")
if security.Symbol == self.expectedContract:
self.AssertFutureOptionContractOrder(orderEvent, security)
else:
raise AssertionError(f"Received order event for unknown Symbol: {orderEvent.Symbol}")
self.Log(f"{orderEvent}");
def AssertFutureOptionContractOrder(self, orderEvent: OrderEvent, optionContract: Security):
if orderEvent.Direction == OrderDirection.Sell and optionContract.Holdings.Quantity != -1:
raise AssertionError(f"No holdings were created for option contract {optionContract.Symbol}")
if orderEvent.Direction == OrderDirection.Buy and optionContract.Holdings.Quantity != 0:
raise AssertionError("Expected no options holdings after closing position")
if orderEvent.IsAssignment:
raise AssertionError(f"Assignment was not expected for {orderEvent.Symbol}")
def OnEndOfAlgorithm(self):
if self.Portfolio.Invested:
raise AssertionError(f"Expected no holdings at end of algorithm, but are invested in: {', '.join([str(i.ID) for i in self.Portfolio.Keys])}") | apache-2.0 |
Vimos/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
manasapte/pants | src/python/pants/backend/jvm/tasks/properties.py | 16 | 3411 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from collections import OrderedDict
import six
class Properties(object):
"""A Python reader for java.util.Properties formatted data.
Based on:
http://download.oracle.com/javase/6/docs/api/java/util/Properties.html#load(java.io.Reader)
Originally copied from:
https://github.com/twitter/commons/blob/master/src/python/twitter/common/config/properties.py
:API: public
"""
@staticmethod
def load(data):
"""Loads properties from an open stream or the contents of a string.
:API: public
:param (string | open stream) data: An open stream or a string.
:returns: A dict of parsed property data.
:rtype: dict
"""
if hasattr(data, 'read') and callable(data.read):
contents = data.read()
elif isinstance(data, six.string_types):
contents = data
else:
raise TypeError('Can only process data from a string or a readable object, given: %s' % data)
return Properties._parse(contents.splitlines())
# An unescaped '=' or ':' forms an explicit separator
_EXPLICIT_KV_SEP = re.compile(r'(?<!\\)[=:]')
@staticmethod
def _parse(lines):
def coalesce_lines():
line_iter = iter(lines)
try:
buffer = ''
while True:
line = next(line_iter)
if line.strip().endswith('\\'):
# Continuation.
buffer += line.strip()[:-1]
else:
if buffer:
# Continuation join, preserve left hand ws (could be a kv separator)
buffer += line.rstrip()
else:
# Plain old line
buffer = line.strip()
try:
yield buffer
finally:
buffer = ''
except StopIteration:
pass
def normalize(atom):
return re.sub(r'\\([:=\s])', r'\1', atom.strip())
def parse_line(line):
if line and not (line.startswith('#') or line.startswith('!')):
match = Properties._EXPLICIT_KV_SEP.search(line)
if match:
return normalize(line[:match.start()]), normalize(line[match.end():])
else:
space_sep = line.find(' ')
if space_sep == -1:
return normalize(line), ''
else:
return normalize(line[:space_sep]), normalize(line[space_sep:])
props = OrderedDict()
for line in coalesce_lines():
kv_pair = parse_line(line)
if kv_pair:
key, value = kv_pair
props[key] = value
return props
@staticmethod
def dump(props, output):
"""Dumps a dict of properties to the specified open stream or file path.
:API: public
"""
def escape(token):
return re.sub(r'([=:\s])', r'\\\1', token)
def write(out):
for k, v in props.items():
out.write('%s=%s\n' % (escape(str(k)), escape(str(v))))
if hasattr(output, 'write') and callable(output.write):
write(output)
elif isinstance(output, six.string_types):
with open(output, 'w+a') as out:
write(out)
else:
raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
| apache-2.0 |
Johnzero/erp | openerp/addons/clivia_analysis/report/analysis_report.py | 1 | 2399 | # -*- encoding: utf-8 -*-
import tools
from osv import fields, osv
class common_report(osv.osv):
_name = "clivia_analysis.production_report"
_description = "报表视图"
_auto = False
_rec_name = 'date'
_columns = {
'year': fields.char('年份', size=4, readonly=True),
'month': fields.selection([('01', '一月'), ('02', '二月'), ('03', '三月'), ('04', '四月'),
('05', '五月'), ('06', '六月'), ('07', '七月'), ('08', '八月'), ('09', '九月'), ('10', '十月'),
('11', '十一月'), ('12', '十二月')], '月份', readonly=True),
'date': fields.date('上报时间', required=True, readonly=True),
'product_id': fields.many2one('clivia_analysis.stocked_product', '产品', readonly=True),
'produced': fields.integer('生产', readonly=True),
'sent': fields.float('发出', readonly=True),
'sold': fields.integer('销售', readonly=True),
'hefei_today_inventory':fields.integer('君子兰结存', readonly=True),
'sanhe_last_inventory':fields.integer('三河实际库存', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'clivia_analysis_production_report')
cr.execute("""
CREATE OR REPLACE VIEW clivia_analysis_production_report AS
SELECT DISTINCT ON (product.id) product.id, product.id AS product_id,
mpl.production AS produced,
mpl.hefei_warning_level,
mpl.sanhe_warning_level,
drl.hefei_today_inventory AS hefei_today_inventory,
drl.sanhe_real_inventory AS sanhe_real_inventory,
dr.date_created date,
to_char(dr.date_created::timestamp with time zone, 'YYYY'::text) AS year,
to_char(dr.date_created::timestamp with time zone, 'MM'::text) AS month,
drl.sent,
drl.sold
FROM clivia_analysis_stocked_product product
JOIN clivia_analysis_daily_report_line drl ON product.id = drl.product_id
JOIN clivia_analysis_daily_report dr ON dr.id = drl.report_id
JOIN clivia_analysis_monthly_plan_line mpl ON mpl.product_id = product.id
WHERE dr.state::text = 'review'::text
ORDER BY product.id, dr.date_created DESC;
""")
| agpl-3.0 |
eahneahn/free | lib/python2.7/site-packages/selenium/webdriver/common/alert.py | 43 | 2320 | #Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
The Alert implementation.
"""
from selenium.webdriver.remote.command import Command
class Alert(object):
"""
Allows to work with alerts.
Use this class to interact with alert prompts. It contains methods for dismissing,
accepting, inputting, and getting text from alert prompts.
Accepting / Dismissing alert prompts::
Alert(driver).accept()
Alert(driver).dismiss()
Inputting a value into an alert prompt:
name_prompt = Alert(driver)
name_prompt.send_keys("Willian Shakesphere")
name_prompt.accept()
Reading a the text of a prompt for verification:
alert_text = Alert(driver).text
self.assertEqual("Do you wish to quit?", alert_text)
"""
def __init__(self, driver):
"""
Creates a new Alert.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self.driver = driver
@property
def text(self):
"""
Gets the text of the Alert.
"""
return self.driver.execute(Command.GET_ALERT_TEXT)["value"]
def dismiss(self):
"""
Dismisses the alert available.
"""
self.driver.execute(Command.DISMISS_ALERT)
def accept(self):
"""
Accepts the alert available.
Usage::
Alert(driver).accept() # Confirm a alert dialog.
"""
self.driver.execute(Command.ACCEPT_ALERT)
def send_keys(self, keysToSend):
"""
Send Keys to the Alert.
:Args:
- keysToSend: The text to be sent to Alert.
"""
self.driver.execute(Command.SET_ALERT_VALUE, {'text': keysToSend})
| agpl-3.0 |
saun4app/python_lib_sphinx | tests/test_utils.py | 1 | 1148 | """
python_lib_sphinx tests.
"""
import unittest
from python_lib_sphinx.utils import Constants, orbital_speed, circumference, orbital_period
class TestUtils(unittest.TestCase):
"""
Test python_lib_sphinx's utils.
"""
def setUp(self):
pass
def test_orbital_speed(self):
"""
Calculate the orbital speed of an object.
"""
answer = orbital_speed(
Constants.Earth,
600000,
70
)
answer = round(answer, 3)
self.assertEqual(
answer,
2425.552
)
def test_circumference(self):
"""
2*pi*r
"""
answer = circumference(600000)
answer = round(answer, 3)
self.assertEqual(
answer,
3769911.184
)
def test_orbital_period(self):
"""
Calculate the orbital period of an object.
"""
answer = orbital_period(
Constants.Earth,
600000,
70
)
answer = round(answer, 3)
self.assertEqual(
answer,
1554.43
)
| mit |
neilhan/tensorflow | tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py | 21 | 6743 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def _SparseToDense(sparse_indices, output_size, sparse_values,
default_value, validate_indices=True):
return tf.sparse_to_dense(sparse_indices, output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices)
class SparseToDenseTest(tf.test.TestCase):
def testInt(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, 0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testString(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], "a", "b").eval()
np_ans = np.array(["b", "a", "b", "a", "b"]).astype(np.string_)
self.assertAllEqual(np_ans, tf_ans)
def testSetValue(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1).eval()
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, -1).eval()
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1).eval()
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.test_session():
x = tf.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1).eval()
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testBadShape(self):
with self.test_session():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [[5], [3]], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
dense.eval()
def testBadNumValues(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [1, 2, 3], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
dense.eval()
def testBadDefault(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [1, 2], [0])
with self.assertRaisesOpError("default_value should be a scalar"):
dense.eval()
def testOutOfBoundsIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[1], [10]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(
r"indices\[1\] = \[10\] is out of bounds: need 0 <= index < \[5\]"):
dense.eval()
# Disable checks, the allocation should still fail.
with self.assertRaisesOpError("out of bounds"):
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [10]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0,
validate_indices=False)
dense_without_validation.eval()
def testRepeatingIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[1], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is repeated"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
dense_without_validation.eval()
def testUnsortedIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[2], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is out of order"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[2], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
dense_without_validation.eval()
def testShapeInferenceKnownShape(self):
with self.test_session(use_gpu=False):
indices = tf.placeholder(tf.int64)
shape = [4, 5, 6]
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
shape = tf.placeholder(tf.int64, shape=(3,))
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
def testShapeInferenceUnknownShape(self):
with self.test_session(use_gpu=False):
indices = tf.placeholder(tf.int64)
shape = tf.placeholder(tf.int64)
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().ndims, None)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
ApplauseAQI/androguard | elsim/elsim/similarity/similarity.py | 35 | 14893 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import zlib, bz2
import math, json, re
def simhash(x):
import simhash
return simhash.simhash(x)
def entropy(data):
entropy = 0.0
if len(data) == 0:
return entropy
for x in range(256):
p_x = float(data.count(chr(x)))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
try:
from ctypes import cdll, c_float, c_double, c_int, c_uint, c_void_p, Structure, addressof, cast, c_size_t
#struct libsimilarity {
# void *orig;
# unsigned int size_orig;
# void *cmp;
# unsigned size_cmp;
# unsigned int *corig;
# unsigned int *ccmp;
#
# float res;
#};
class LIBSIMILARITY_T(Structure):
_fields_ = [("orig", c_void_p),
("size_orig", c_size_t),
("cmp", c_void_p),
("size_cmp", c_size_t),
("corig", c_size_t),
("ccmp", c_size_t),
("res", c_float),
]
def new_zero_native():
return c_size_t( 0 )
NATIVE_LIB = True
except:
NATIVE_LIB = False
def new_zero_python():
return 0
ZLIB_COMPRESS = 0
BZ2_COMPRESS = 1
SMAZ_COMPRESS = 2
LZMA_COMPRESS = 3
XZ_COMPRESS = 4
SNAPPY_COMPRESS = 5
VCBLOCKSORT_COMPRESS = 6
H_COMPRESSOR = { "BZ2" : BZ2_COMPRESS,
"ZLIB" : ZLIB_COMPRESS,
"LZMA" : LZMA_COMPRESS,
"XZ" : XZ_COMPRESS,
"SNAPPY" : SNAPPY_COMPRESS,
}
HR_COMPRESSOR = {
BZ2_COMPRESS : "BZ2",
ZLIB_COMPRESS : "ZLIB",
LZMA_COMPRESS : "LZMA",
XZ_COMPRESS : "XZ",
SNAPPY_COMPRESS : "SNAPPY",
}
class SIMILARITYBase(object):
def __init__(self, native_lib=False):
self.ctype = ZLIB_COMPRESS
self.__caches = {
ZLIB_COMPRESS : {},
BZ2_COMPRESS : {},
SMAZ_COMPRESS : {},
LZMA_COMPRESS : {},
XZ_COMPRESS : {},
SNAPPY_COMPRESS : {},
VCBLOCKSORT_COMPRESS : {},
}
self.__rcaches = {
ZLIB_COMPRESS : {},
BZ2_COMPRESS : {},
SMAZ_COMPRESS : {},
LZMA_COMPRESS : {},
XZ_COMPRESS : {},
SNAPPY_COMPRESS : {},
VCBLOCKSORT_COMPRESS : {},
}
self.__ecaches = {}
self.level = 9
if native_lib == True:
self.new_zero = new_zero_native
else:
self.new_zero = new_zero_python
def set_level(self, level):
self.level = level
def get_in_caches(self, s):
try:
return self.__caches[ self.ctype ][ zlib.adler32( s ) ]
except KeyError:
return self.new_zero()
def get_in_rcaches(self, s1, s2):
try:
return self.__rcaches[ self.ctype ][ zlib.adler32( s1 + s2 ) ]
except KeyError:
try:
return self.__rcaches[ self.ctype ][ zlib.adler32( s2 + s1 ) ]
except KeyError:
return -1, -1
def add_in_caches(self, s, v):
h = zlib.adler32( s )
if h not in self.__caches[ self.ctype ]:
self.__caches[ self.ctype ][ h ] = v
def add_in_rcaches(self, s, v, r):
h = zlib.adler32( s )
if h not in self.__rcaches[ self.ctype ]:
self.__rcaches[ self.ctype ][ h ] = (v, r)
def clear_caches(self):
for i in self.__caches:
self.__caches[i] = {}
def add_in_ecaches(self, s, v, r):
h = zlib.adler32( s )
if h not in self.__ecaches:
self.__ecaches[ h ] = (v, r)
def get_in_ecaches(self, s1):
try:
return self.__ecaches[ zlib.adler32( s1 ) ]
except KeyError:
return -1, -1
def __nb_caches(self, caches):
nb = 0
for i in caches:
nb += len(caches[i])
return nb
def set_compress_type(self, t):
self.ctype = t
def show(self):
print "ECACHES", len(self.__ecaches)
print "RCACHES", self.__nb_caches( self.__rcaches )
print "CACHES", self.__nb_caches( self.__caches )
class SIMILARITYNative(SIMILARITYBase):
def __init__(self, path="./libsimilarity/libsimilarity.so"):
super(SIMILARITYNative, self).__init__(True)
self._u = cdll.LoadLibrary( path )
self._u.compress.restype = c_uint
self._u.ncd.restype = c_int
self._u.ncs.restype = c_int
self._u.cmid.restype = c_int
self._u.entropy.restype = c_double
self._u.levenshtein.restype = c_uint
self._u.kolmogorov.restype = c_uint
self._u.bennett.restype = c_double
self._u.RDTSC.restype = c_double
self.__libsim_t = LIBSIMILARITY_T()
self.set_compress_type( ZLIB_COMPRESS )
def raz(self):
del self._u
del self.__libsim_t
def compress(self, s1):
res = self._u.compress( self.level, cast( s1, c_void_p ), len( s1 ) )
return res
def _sim(self, s1, s2, func):
end, ret = self.get_in_rcaches( s1, s2 )
if end != -1:
return end, ret
self.__libsim_t.orig = cast( s1, c_void_p )
self.__libsim_t.size_orig = len(s1)
self.__libsim_t.cmp = cast( s2, c_void_p )
self.__libsim_t.size_cmp = len(s2)
corig = self.get_in_caches(s1)
ccmp = self.get_in_caches(s2)
self.__libsim_t.corig = addressof( corig )
self.__libsim_t.ccmp = addressof( ccmp )
ret = func( self.level, addressof( self.__libsim_t ) )
self.add_in_caches(s1, corig)
self.add_in_caches(s2, ccmp)
self.add_in_rcaches(s1+s2, self.__libsim_t.res, ret)
return self.__libsim_t.res, ret
def ncd(self, s1, s2):
return self._sim( s1, s2, self._u.ncd )
def ncs(self, s1, s2):
return self._sim( s1, s2, self._u.ncs )
def cmid(self, s1, s2):
return self._sim( s1, s2, self._u.cmid )
def kolmogorov(self, s1):
ret = self._u.kolmogorov( self.level, cast( s1, c_void_p ), len( s1 ) )
return ret, 0
def bennett(self, s1):
ret = self._u.bennett( self.level, cast( s1, c_void_p ), len( s1 ) )
return ret, 0
def entropy(self, s1):
end, ret = self.get_in_ecaches( s1 )
if end != -1:
return end, ret
res = self._u.entropy( cast( s1, c_void_p ), len( s1 ) )
self.add_in_ecaches( s1, res, 0 )
return res, 0
def RDTSC(self):
return self._u.RDTSC()
def levenshtein(self, s1, s2):
res = self._u.levenshtein( cast( s1, c_void_p ), len( s1 ), cast( s2, c_void_p ), len( s2 ) )
return res, 0
def set_compress_type(self, t):
self.ctype = t
self._u.set_compress_type(t)
class SIMILARITYPython(SIMILARITYBase):
def __init__(self):
super(SIMILARITYPython, self).__init__()
def set_compress_type(self, t):
self.ctype = t
if self.ctype != ZLIB_COMPRESS and self.ctype != BZ2_COMPRESS:
print "warning: compressor %s is not supported (use zlib default compressor)" % HR_COMPRESSOR[ t ]
self.ctype = ZLIB_COMPRESS
def compress(self, s1):
return len(self._compress(s1))
def _compress(self, s1):
if self.ctype == ZLIB_COMPRESS:
return zlib.compress( s1, self.level )
elif self.ctype == BZ2_COMPRESS:
return bz2.compress( s1, self.level )
def _sim(self, s1, s2, func):
end, ret = self.get_in_rcaches( s1, s2 )
if end != -1:
return end, ret
corig = self.get_in_caches(s1)
ccmp = self.get_in_caches(s2)
res, corig, ccmp, ret = func( s1, s2, corig, ccmp )
self.add_in_caches(s1, corig)
self.add_in_caches(s2, ccmp)
self.add_in_rcaches(s1+s2, res, ret)
return res, ret
def _ncd(self, s1, s2, s1size=0, s2size=0):
if s1size == 0:
s1size = self.compress(s1)
if s2size == 0:
s2size = self.compress(s2)
s3size = self.compress(s1+s2)
smax = max(s1size, s2size)
smin = min(s1size, s2size)
res = (abs(s3size - smin)) / float(smax)
if res > 1.0:
res = 1.0
return res, s1size, s2size, 0
def ncd(self, s1, s2):
return self._sim( s1, s2, self._ncd )
def ncs(self, s1, s2):
return self._sim( s1, s2, self._u.ncs )
def entropy(self, s1):
end, ret = self.get_in_ecaches( s1 )
if end != -1:
return end, ret
res = entropy( s1 )
self.add_in_ecaches( s1, res, 0 )
return res, 0
def levenshtein(self, a, b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
class SIMILARITY(object):
def __init__(self, path="./libsimilarity/libsimilarity.so", native_lib=True):
if native_lib == True and NATIVE_LIB == True:
try:
self.s = SIMILARITYNative( path )
except:
self.s = SIMILARITYPython()
else:
self.s = SIMILARITYPython()
def raz(self):
return self.s.raz()
def set_level(self, level):
return self.s.set_level(level)
def compress(self, s1):
return self.s.compress(s1)
def ncd(self, s1, s2):
return self.s.ncd(s1, s2)
def ncs(self, s1, s2):
return self.s.ncs(s1, s2)
def cmid(self, s1, s2):
return self.s.cmid(s1, s2)
def kolmogorov(self, s1):
return self.s.kolmogorov(s1)
def bennett(self, s1):
return self.s.bennett(s1)
def entropy(self, s1):
return self.s.entropy(s1)
def RDTSC(self):
return self.s.RDTSC()
def levenshtein(self, s1, s2):
return self.s.levenshtein(s1, s2)
def set_compress_type(self, t):
return self.s.set_compress_type(t)
def show(self):
self.s.show()
class DBFormat(object):
def __init__(self, filename):
self.filename = filename
self.D = {}
fd = None
try:
with open(self.filename, "r+") as fd:
self.D = json.load( fd )
except IOError:
print "Impossible to open filename: " + filename
self.D = {}
self.H = {}
self.N = {}
for i in self.D:
self.H[i] = {}
for j in self.D[i]:
if j == "NAME":
self.N[ i ] = re.compile( self.D[i][j] )
continue
self.H[i][j] = {}
for k in self.D[i][j]:
if isinstance(self.D[i][j][k], dict):
self.H[i][j][k] = set()
for e in self.D[i][j][k].keys():
self.H[i][j][k].add( long(e) )
def add_name(self, name, value):
if name not in self.D:
self.D[ name ] = {}
self.D[ name ]["NAME"] = value
def add_element(self, name, sname, sclass, size, elem):
try:
if elem not in self.D[ name ][ sname ][ sclass ]:
self.D[ name ][ sname ][ sclass ][ elem ] = size
self.D[ name ][ sname ][ "SIZE" ] += size
except KeyError:
if name not in self.D:
self.D[ name ] = {}
self.D[ name ][ sname ] = {}
self.D[ name ][ sname ][ "SIZE" ] = 0
self.D[ name ][ sname ][ sclass ] = {}
elif sname not in self.D[ name ]:
self.D[ name ][ sname ] = {}
self.D[ name ][ sname ][ "SIZE" ] = 0
self.D[ name ][ sname ][ sclass ] = {}
elif sclass not in self.D[ name ][ sname ]:
self.D[ name ][ sname ][ sclass ] = {}
self.D[ name ][ sname ][ "SIZE" ] += size
self.D[ name ][ sname ][ sclass ][ elem ] = size
def is_present(self, elem):
for i in self.D:
if elem in self.D[i]:
return True, i
return False, None
def elems_are_presents(self, elems):
ret = {}
info = {}
for i in self.H:
ret[i] = {}
info[i] = {}
for j in self.H[i]:
ret[i][j] = {}
info[i][j] = {}
for k in self.H[i][j]:
val = [self.H[i][j][k].intersection(elems), len(self.H[i][j][k]), 0, 0]
size = 0
for z in val[0]:
size += self.D[i][j][k][str(z)]
val[2] = (float(len(val[0]))/(val[1])) * 100
val[3] = size
if val[3] != 0:
ret[i][j][k] = val
info[i][j][ "SIZE" ] = self.D[i][j]["SIZE"]
return ret, info
def classes_are_presents(self, classes):
m = set()
for j in classes:
for i in self.N:
if self.N[i].search(j) != None:
m.add( i )
return m
def show(self):
for i in self.D:
print i, ":"
for j in self.D[i]:
print "\t", j, len(self.D[i][j])
for k in self.D[i][j]:
print "\t\t", k, len(self.D[i][j][k])
def save(self):
with open(self.filename, "w") as fd:
json.dump(self.D, fd)
| apache-2.0 |
xleng/YCM_WIN_X86 | third_party/ycmd/third_party/jedi/test/static_analysis/attribute_error.py | 11 | 2087 | class Cls():
class_attr = ''
def __init__(self, input):
self.instance_attr = 3
self.input = input
def f(self):
#! 12 attribute-error
return self.not_existing
def undefined_object(self, obj):
"""
Uses an arbitrary object and performs an operation on it, shouldn't
be a problem.
"""
obj.arbitrary_lookup
def defined_lookup(self, obj):
"""
`obj` is defined by a call into this function.
"""
obj.upper
#! 4 attribute-error
obj.arbitrary_lookup
#! 13 name-error
class_attr = a
Cls(1).defined_lookup('')
c = Cls(1)
c.class_attr
Cls.class_attr
#! 4 attribute-error
Cls.class_attr_error
c.instance_attr
#! 2 attribute-error
c.instance_attr_error
c.something = None
#! 12 name-error
something = a
something
# -----------------
# Unused array variables should still raise attribute errors.
# -----------------
# should not raise anything.
for loop_variable in [1, 2]:
#! 4 name-error
x = undefined
loop_variable
#! 28 name-error
for loop_variable in [1, 2, undefined]:
pass
#! 7 attribute-error
[1, ''.undefined_attr]
def return_one(something):
return 1
#! 14 attribute-error
return_one(''.undefined_attribute)
#! 12 name-error
[r for r in undefined]
#! 1 name-error
[undefined for r in [1, 2]]
[r for r in [1, 2]]
# some random error that showed up
class NotCalled():
def match_something(self, param):
seems_to_need_an_assignment = param
return [value.match_something() for value in []]
# -----------------
# decorators
# -----------------
#! 1 name-error
@undefined_decorator
def func():
return 1
# -----------------
# operators
# -----------------
string = '%s %s' % (1, 2)
# Shouldn't raise an error, because `string` is really just a string, not an
# array or something.
string.upper
# -----------------
# imports
# -----------------
# Star imports and the like in modules should not cause attribute errors in
# this module.
import import_tree
import_tree.a
import_tree.b
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/docutils/parsers/rst/directives/misc.py | 106 | 22888 | # $Id: misc.py 7487 2012-07-22 21:20:28Z milde $
# Authors: David Goodger <goodger@python.org>; Dethe Elza
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
__docformat__ = 'reStructuredText'
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.utils.error_reporting import locale_encoding
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
class Include(Directive):
"""
Include content read from a separate source file.
Content may be parsed by the parser, or included as a literal
block. The encoding of the included file can be specified. Only
a part of the given file argument may be included by specifying
start and end line or text to match before and/or after the text
to be used.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'literal': directives.flag,
'code': directives.unchanged,
'encoding': directives.encoding,
'tab-width': int,
'start-line': int,
'end-line': int,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
# ignored except for 'literal' or 'code':
'number-lines': directives.unchanged, # integer or None
'class': directives.class_option,
'name': directives.unchanged}
standard_include_path = os.path.join(os.path.dirname(states.__file__),
'include')
def run(self):
"""Include a file as part of the content of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError, error:
raise self.severe(u'Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError, error:
raise self.severe(u'Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
class Raw(Directive):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding}
has_content = True
def run(self):
if (not self.state.document.settings.raw_enabled
or (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options))):
raise self.warning('"%s" directive disabled.' % self.name)
attributes = {'format': ' '.join(self.arguments[0].lower().split())}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
if self.content:
if 'file' in self.options or 'url' in self.options:
raise self.error(
'"%s" directive may not both specify an external file '
'and have content.' % self.name)
text = '\n'.join(self.content)
elif 'file' in self.options:
if 'url' in self.options:
raise self.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % self.name)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
path = os.path.normpath(os.path.join(source_dir,
self.options['file']))
path = utils.relative_path(None, path)
try:
raw_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
# TODO: currently, raw input files are recorded as
# dependencies even if not used for the chosen output format.
self.state.document.settings.record_dependencies.add(path)
except IOError, error:
raise self.severe(u'Problems with "%s" directive path:\n%s.'
% (self.name, ErrorString(error)))
try:
text = raw_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = path
elif 'url' in self.options:
source = self.options['url']
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
try:
raw_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError), error:
raise self.severe(u'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], ErrorString(error)))
raw_file = io.StringInput(source=raw_text, source_path=source,
encoding=encoding,
error_handler=e_handler)
try:
text = raw_file.read()
except UnicodeError, error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = source
else:
# This will always fail because there is no content.
self.assert_has_content()
raw_node = nodes.raw('', text, **attributes)
(raw_node.source,
raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
class Replace(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
self.assert_has_content()
text = '\n'.join(self.content)
element = nodes.Element(text)
self.state.nested_parse(self.content, self.content_offset,
element)
# element might contain [paragraph] + system_message(s)
node = None
messages = []
for elem in element:
if not node and isinstance(elem, nodes.paragraph):
node = elem
elif isinstance(elem, nodes.system_message):
elem['backrefs'] = []
messages.append(elem)
else:
return [
self.state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
'only.' % (self.name), line=self.lineno) ]
if node:
return messages + node.children
return messages
class Unicode(Directive):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character
entities (e.g. ``☮``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'trim': directives.flag,
'ltrim': directives.flag,
'rtrim': directives.flag}
comment_pattern = re.compile(r'( |\n|^)\.\. ')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError, error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
class Class(Directive):
"""
Set a "class" attribute on the directive content or the next element.
When applied to the next element, a "pending" element is inserted, and a
transform does the work later.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
def run(self):
try:
class_value = directives.class_option(self.arguments[0])
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node_list = []
if self.content:
container = nodes.Element()
self.state.nested_parse(self.content, self.content_offset,
container)
for node in container:
node['classes'].extend(class_value)
node_list.extend(container.children)
else:
pending = nodes.pending(
misc.ClassAttribute,
{'class': class_value, 'directive': self.name},
self.block_text)
self.state_machine.document.note_pending(pending)
node_list.append(pending)
return node_list
class Role(Directive):
has_content = True
argument_pattern = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
% ((states.Inliner.simplename,) * 2))
def run(self):
"""Dynamically create and register a custom interpreted text role."""
if self.content_offset > self.lineno or not self.content:
raise self.error('"%s" directive requires arguments on the first '
'line.' % self.name)
args = self.content[0]
match = self.argument_pattern.match(args)
if not match:
raise self.error('"%s" directive arguments not valid role names: '
'"%s".' % (self.name, args))
new_role_name = match.group(1)
base_role_name = match.group(3)
messages = []
if base_role_name:
base_role, messages = roles.role(
base_role_name, self.state_machine.language, self.lineno,
self.state.reporter)
if base_role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % base_role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
else:
base_role = roles.generic_custom_role
assert not hasattr(base_role, 'arguments'), (
'Supplemental directive arguments for "%s" directive not '
'supported (specified by "%r" role).' % (self.name, base_role))
try:
converted_role = convert_directive_function(base_role)
(arguments, options, content, content_offset) = (
self.state.parse_directive_block(
self.content[1:], self.content_offset, converted_role,
option_presets={}))
except states.MarkupError, detail:
error = self.state_machine.reporter.error(
'Error in "%s" directive:\n%s.' % (self.name, detail),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
if 'class' not in options:
try:
options['class'] = directives.class_option(new_role_name)
except ValueError, detail:
error = self.state_machine.reporter.error(
u'Invalid argument for "%s" directive:\n%s.'
% (self.name, SafeString(detail)), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return messages + [error]
role = roles.CustomRole(new_role_name, base_role, options, content)
roles.register_local_role(new_role_name, role)
return messages
class DefaultRole(Directive):
"""Set the default interpreted text role."""
optional_arguments = 1
final_argument_whitespace = False
def run(self):
if not self.arguments:
if '' in roles._roles:
# restore the "default" default role
del roles._roles['']
return []
role_name = self.arguments[0]
role, messages = roles.role(role_name, self.state_machine.language,
self.lineno, self.state.reporter)
if role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
roles._roles[''] = role
# @@@ should this be local to the document, not the parser?
return messages
class Title(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.state_machine.document['title'] = self.arguments[0]
return []
class Date(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
format_str = '\n'.join(self.content) or '%Y-%m-%d'
if sys.version_info< (3, 0):
try:
format_str = format_str.encode(locale_encoding or 'utf-8')
except UnicodeEncodeError:
raise self.warning(u'Cannot encode date format string '
u'with locale encoding "%s".' % locale_encoding)
text = time.strftime(format_str)
if sys.version_info< (3, 0):
# `text` is a byte string that may contain non-ASCII characters:
try:
text = text.decode(locale_encoding or 'utf-8')
except UnicodeDecodeError:
text = text.decode(locale_encoding or 'utf-8', 'replace')
raise self.warning(u'Error decoding "%s"'
u'with locale encoding "%s".' % (text, locale_encoding))
return [nodes.Text(text)]
class TestDirective(Directive):
"""This directive is useful only for testing purposes."""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'option': directives.unchanged_required}
has_content = True
def run(self):
if self.content:
text = '\n'.join(self.content)
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:' % (self.name, self.arguments, self.options),
nodes.literal_block(text, text), line=self.lineno)
else:
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None' % (self.name, self.arguments, self.options),
line=self.lineno)
return [info]
# Old-style, functional definition:
#
# def directive_test_function(name, arguments, options, content, lineno,
# content_offset, block_text, state, state_machine):
# """This directive is useful only for testing purposes."""
# if content:
# text = '\n'.join(content)
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content:' % (name, arguments, options),
# nodes.literal_block(text, text), line=lineno)
# else:
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content: None' % (name, arguments, options), line=lineno)
# return [info]
#
# directive_test_function.arguments = (0, 1, 1)
# directive_test_function.options = {'option': directives.unchanged_required}
# directive_test_function.content = 1
| gpl-2.0 |
gylian/sickrage | lib/hachoir_parser/audio/aiff.py | 90 | 4043 | """
Audio Interchange File Format (AIFF) parser.
Author: Victor Stinner
Creation: 27 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt16, UInt32, Float80, TimestampMac32,
RawBytes, NullBytes,
String, Enum, PascalString32)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler
from lib.hachoir_core.tools import alignValue
from lib.hachoir_parser.audio.id3 import ID3v2
CODEC_NAME = {
'ACE2': u"ACE 2-to-1",
'ACE8': u"ACE 8-to-3",
'MAC3': u"MAC 3-to-1",
'MAC6': u"MAC 6-to-1",
'NONE': u"None",
'sowt': u"Little-endian, no compression",
}
class Comment(FieldSet):
def createFields(self):
yield TimestampMac32(self, "timestamp")
yield PascalString32(self, "text")
def parseText(self):
yield String(self, "text", self["size"].value)
def parseID3(self):
yield ID3v2(self, "id3v2", size=self["size"].value*8)
def parseComment(self):
yield UInt16(self, "nb_comment")
for index in xrange(self["nb_comment"].value):
yield Comment(self, "comment[]")
def parseCommon(self):
yield UInt16(self, "nb_channel")
yield UInt32(self, "nb_sample")
yield UInt16(self, "sample_size")
yield Float80(self, "sample_rate")
yield Enum(String(self, "codec", 4, strip="\0", charset="ASCII"), CODEC_NAME)
def parseVersion(self):
yield TimestampMac32(self, "timestamp")
def parseSound(self):
yield UInt32(self, "offset")
yield UInt32(self, "block_size")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class Chunk(FieldSet):
TAG_INFO = {
'COMM': ('common', "Common chunk", parseCommon),
'COMT': ('comment', "Comment", parseComment),
'NAME': ('name', "Name", parseText),
'AUTH': ('author', "Author", parseText),
'FVER': ('version', "Version", parseVersion),
'SSND': ('sound', "Sound data", parseSound),
'ID3 ': ('id3', "ID3", parseID3),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["type"].value
if tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield String(self, "type", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "size"))
size = self["size"].value
if size:
if self._parser:
for field in self._parser(self):
yield field
if size % 2:
yield NullBytes(self, "padding", 1)
else:
yield RawBytes(self, "data", size)
class AiffFile(Parser):
PARSER_TAGS = {
"id": "aiff",
"category": "audio",
"file_ext": ("aif", "aiff", "aifc"),
"mime": (u"audio/x-aiff",),
"magic_regex": (("FORM.{4}AIF[CF]", 0),),
"min_size": 12*8,
"description": "Audio Interchange File Format (AIFF)"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "FORM":
return "Invalid signature"
if self.stream.readBytes(8*8, 4) not in ("AIFF", "AIFC"):
return "Invalid type"
return True
def createFields(self):
yield String(self, "signature", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield String(self, "type", 4, "Form type (AIFF or AIFC)", charset="ASCII")
while not self.eof:
yield Chunk(self, "chunk[]")
def createDescription(self):
if self["type"].value == "AIFC":
return "Audio Interchange File Format Compressed (AIFC)"
else:
return "Audio Interchange File Format (AIFF)"
def createContentSize(self):
return self["filesize"].value * 8
| gpl-3.0 |
michhar/flask-webapp-aml | env/Lib/site-packages/pip/_vendor/requests/utils.py | 177 | 21845 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
current_position = o.tell()
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname)
if proxy is None:
proxy = proxies.get(urlparts.scheme)
return proxy
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
return '%s/%s' % (name, __version__)
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| mit |
cyberintruder/wfuzz | framework/plugins/api.py | 11 | 5938 | from framework.plugins.pluginobjects import PluginResult
from framework.plugins.pluginobjects import PluginRequest
from framework.core.myexception import FuzzException
from framework.core.facade import Facade
import os
import urlparse
import urllib2
import json
# Util methods when processing fuzz results
def url_filename(fuzzresult):
u = urlparse.urlsplit(fuzzresult.url).path.split('/')[-1:][0]
return u
def url_same_domain(url1, url2):
return url_domain(url1) == url_domain(url2)
def url_domain(url):
return '.'.join(urlparse.urlparse(url).netloc.split(".")[-2:])
def url_filename_ext(url):
path = urlparse.urlparse(url).path
ext = os.path.splitext(path)[1]
return ext
# Util methods for accessing search results
def search_bing(dork, key = None, raw = False):
if key is None:
key = Facade().sett.get('plugins', 'bing_apikey')
if not key:
raise FuzzException(FuzzException.FATAL, "An api Bing key is needed. Please chek wfuzz.ini.")
# some code taken from http://www.securitybydefault.com/2014/07/search2auditpy-deja-que-bing-haga-el.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+SecurityByDefault+%28Security+By+Default%29
user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)'
creds = (':%s' % key).encode('base64')[:-1]
auth = 'Basic %s' % creds
# temporary solution, wf should have a process performing http requests. even plugins might need this.
try:
request = urllib2.Request('https://api.datamarket.azure.com/Data.ashx/Bing/Search/Composite?Sources=%27web%27&Query=%27'+dork+'%27&$format=json')
request.add_header('Authorization', auth)
request.add_header('User-Agent', user_agent)
requestor = urllib2.build_opener()
result = requestor.open(request)
except Exception, e:
raise FuzzException(FuzzException.FATAL, "Error when retrieving Bing API results: %s." % e.msg)
results = json.loads(result.read())
#test results = {u'd': {u'results': [{u'Web': [{u'Description': u'Diario de informaci\xf3n general de USA, noticias de \xfaltima hora de USA, el mundo, local, deportes, noticias curiosas y m\xe1s', u'Title': u'20minutos.com - El medio social - \xdaltima hora en USA y el ...', u'Url': u'http://www.20minutos.com/', u'__metadata': {u'type': u'WebResult', u'uri': u"https://api.datamarket.azure.com/Data.ashx/Bing/Search/ExpandableSearchResultSet(guid'b670a6b6-6ae7-4830-ad6f-83b525d6266d')/Web?$skip=0&$top=1"}, u'DisplayUrl': u'www.20minutos.com', u'ID': u'546995b5-587a-4618-984d-93bc5041e067'}, {u'Description': u'Informaci\xf3n, noticias y resultados de deportes: F\xfatbol, Baloncesto, NBA, Beisbol, F\xf3rmula 1, MotoGP, Tenis y m\xe1s en 20minutos.com', u'Title': u'Noticias deportivas - 20minutos.com', u'Url': u'http://www.20minutos.com/deportes/', u'__metadata': {u'type': u'WebResult', u'uri': u"https://api.datamarket.azure.com/Data.ashx/Bing/Search/ExpandableSearchResultSet(guid'b670a6b6-6ae7-4830-ad6f-83b525d6266d')/Web?$skip=1&$top=1"}, u'DisplayUrl': u'www.20minutos.com/deportes', u'ID': u'2ff2cd36-eece-4810-9b00-cba7d5ecfa47'}], u'VideoTotal': u'', u'RelatedSearch': [], u'Image': [], u'__metadata': {u'type': u'ExpandableSearchResult', u'uri': u"https://api.datamarket.azure.com/Data.ashx/Bing/Search/Composite?Sources='web'&Query='ip:193.148.34.26'&$skip=0&$top=1"}, u'ImageOffset': u'', u'AlterationOverrideQuery': u'', u'ImageTotal': u'', u'WebTotal': u'20', u'SpellingSuggestionsTotal': u'', u'WebOffset': u'0', u'Video': [], u'News': [], u'AlteredQuery': u'', u'SpellingSuggestions': [], u'VideoOffset': u'', u'NewsTotal': u'', u'ID': u'b670a6b6-6ae7-4830-ad6f-83b525d6266d', u'NewsOffset': u''}]}}
if raw:
return results
else:
return results['d']['results'][0]['Web']
class BasePlugin():
def __init__(self):
self.results_queue = None
self.base_fuzz_res = None
def run(self, fuzzresult, control_queue, results_queue):
try:
self.results_queue = results_queue
self.base_fuzz_res = fuzzresult
self.process(fuzzresult)
except Exception, e:
plres = PluginResult()
plres.source = "$$exception$$"
plres.issue = "Exception within plugin %s: %s" % (self.name, str(e))
results_queue.put(plres)
finally:
control_queue.get()
control_queue.task_done()
return
def process(self, fuzzresult):
'''
This is were the plugin processing is done. Any wfuzz plugin must implement this method, do its job with the fuzzresult received and:
- queue_url: if it is a discovery plugin enqueing more HTTP request that at some point will generate more results
- add_result: Add information about the obtained results after the processing with an accurate description
A kbase (get_kbase, has_kbase, add_kbase) is shared between all plugins. this can be used to store and retrieve relevant "collaborative" information.
'''
raise NotImplemented
def add_result(self, issue):
plres = PluginResult()
plres.source = self.name
plres.issue = issue
self.results_queue.put(plres)
def queue_raw_request(self, raw):
self.results_queue.put(raw)
def queue_url(self, url):
self.results_queue.put(PluginRequest.from_fuzzRes(self.base_fuzz_res, url, self.name))
def get_kbase(self, key):
v = self.kbase.get(key)
if not v:
raise FuzzException(FuzzException.FATAL, "Key not in kbase")
return v
def has_kbase(self, key):
return self.kbase.has(key)
def add_kbase(self, key, value):
self.kbase.add(key, value)
# Plugins specializations with common methods useful for their own type
class DiscoveryPlugin(BasePlugin):
def __init__(self):
self.black_list = Facade().sett.get('plugins', 'file_bl').split(",")
if self.has_kbase("discovery.bl"):
self.black_list = self.get_kbase("discovery.bl")[0].split("-")
def blacklisted_extension(self, url):
return url_filename_ext(url) in self.black_list
| gpl-2.0 |
brijeshkesariya/odoo | addons/website_blog/__init__.py | 373 | 1036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
import wizard
| agpl-3.0 |
CN-UPB/OpenBarista | components/decaf-masta/decaf_masta/components/database/datacenter.py | 1 | 1976 | ##
# Copyright 2016 DECaF Project Group, University of Paderborn
# This file is part of the decaf orchestration framework
# All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
__author__ = 'Kristian Hinnenthal'
__date__ = '$13-okt-2015 14:15:27$'
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from .mastadatabase import Base
from .keystone import Keystone
import json
class Datacenter(Base):
__tablename__ = 'datacenters'
datacenter_id = Column(Integer, primary_key=True,autoincrement=True)
datacenter_name = Column(String(250), nullable=False)
keystone_id = Column(Integer, ForeignKey('keystone_credentials.keystone_id'), nullable=False)
keystone_region = Column(String(250), nullable=False)
flavors = relationship('Flavor', backref='datacenters')
images = relationship('Image', backref='datacenters')
monitoring_alarms = relationship('MonitoringAlarm', backref='datacenters')
management_networks = relationship('ManagementNetwork', backref='datacenters')
public_networks = relationship('PublicNetwork', backref='datacenters')
vm_instances = relationship('VMInstance', backref='datacenters')
internal_edges = relationship('InternalEdge', backref='datacenters')
public_ports = relationship('PublicPort', backref='datacenters')
keypairs = relationship('KeyPair', backref='datacenter')
def to_json(self):
return json.dumps(self.to_dict())
def to_dict(self):
return_dict = {
"datacenter" : {
"datacenter_id": self.datacenter_id,
"datacenter_name": self.datacenter_name,
"keystone_id": self.keystone_id,
"keystone_region": self.keystone_region
}
}
return return_dict | mpl-2.0 |
backtou/longlab | gr-digital/python/gmsk.py | 16 | 11078 | #
# GMSK modulation and demodulation.
#
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
from gnuradio import gr
import modulation_utils
import digital_swig as digital
from math import pi
import numpy
from pprint import pprint
import inspect
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bt = 0.35
_def_verbose = False
_def_log = False
_def_gain_mu = None
_def_mu = 0.5
_def_freq_error = 0.0
_def_omega_relative_limit = 0.005
# FIXME: Figure out how to make GMSK work with pfb_arb_resampler_fff for both
# transmit and receive so we don't require integer samples per symbol.
# /////////////////////////////////////////////////////////////////////////////
# GMSK modulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_mod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bt=_def_bt,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
@param samples_per_symbol: samples per baud >= 2
@type samples_per_symbol: integer
@param bt: Gaussian filter bandwidth * symbol time
@type bt: float
@param verbose: Print information about modulator?
@type verbose: bool
@param debug: Print modualtion data to files?
@type debug: bool
"""
gr.hier_block2.__init__(self, "gmsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
samples_per_symbol = int(samples_per_symbol)
self._samples_per_symbol = samples_per_symbol
self._bt = bt
self._differential = False
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
ntaps = 4 * samples_per_symbol # up to 3 bits in filter at once
sensitivity = (pi / 2) / samples_per_symbol # phase change per bit = pi / 2
# Turn it into NRZ data.
self.nrz = gr.bytes_to_syms()
# Form Gaussian filter
# Generate Gaussian response (Needs to be convolved with window below).
self.gaussian_taps = gr.firdes.gaussian(
1, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
ntaps # number of taps
)
self.sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(self.gaussian_taps),numpy.array(self.sqwave))
self.gaussian_filter = gr.interp_fir_filter_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = gr.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.nrz, self.gaussian_filter, self.fmmod, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "Gaussian filter bt = %.2f" % self._bt
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.nrz,
gr.file_sink(gr.sizeof_float, "nrz.dat"))
self.connect(self.gaussian_filter,
gr.file_sink(gr.sizeof_float, "gaussian_filter.dat"))
self.connect(self.fmmod,
gr.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
def add_options(parser):
"""
Adds GMSK modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# GMSK demodulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_demod(gr.hier_block2):
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
freq_error=_def_freq_error,
verbose=_def_verbose,
log=_def_log):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
demodulation.
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (the LSB)
@param samples_per_symbol: samples per baud
@type samples_per_symbol: integer
@param verbose: Print information about modulator?
@type verbose: bool
@param log: Print modualtion data to files?
@type log: bool
Clock recovery parameters. These all have reasonble defaults.
@param gain_mu: controls rate of mu adjustment
@type gain_mu: float
@param mu: fractional delay [0.0, 1.0]
@type mu: float
@param omega_relative_limit: sets max variation in omega
@type omega_relative_limit: float, typically 0.000200 (200 ppm)
@param freq_error: bit rate error as a fraction
@param float
"""
gr.hier_block2.__init__(self, "gmsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._gain_mu = gain_mu
self._mu = mu
self._omega_relative_limit = omega_relative_limit
self._freq_error = freq_error
self._differential = False
if samples_per_symbol < 2:
raise TypeError, "samples_per_symbol >= 2, is %f" % samples_per_symbol
self._omega = samples_per_symbol*(1+self._freq_error)
if not self._gain_mu:
self._gain_mu = 0.175
self._gain_omega = .25 * self._gain_mu * self._gain_mu # critically damped
# Demodulate FM
sensitivity = (pi / 2) / samples_per_symbol
self.fmdemod = gr.quadrature_demod_cf(1.0 / sensitivity)
# the clock recovery block tracks the symbol clock and resamples as needed.
# the output of the block is a stream of soft symbols (float)
self.clock_recovery = digital.clock_recovery_mm_ff(self._omega, self._gain_omega,
self._mu, self._gain_mu,
self._omega_relative_limit)
# slice the floats at 0, outputting 1 bit (the LSB of the output byte) per sample
self.slicer = digital.binary_slicer_fb()
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.fmdemod, self.clock_recovery, self.slicer, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "M&M clock recovery omega = %f" % self._omega
print "M&M clock recovery gain mu = %f" % self._gain_mu
print "M&M clock recovery mu = %f" % self._mu
print "M&M clock recovery omega rel. limit = %f" % self._omega_relative_limit
print "frequency error = %f" % self._freq_error
def _setup_logging(self):
print "Demodulation logging turned on."
self.connect(self.fmdemod,
gr.file_sink(gr.sizeof_float, "fmdemod.dat"))
self.connect(self.clock_recovery,
gr.file_sink(gr.sizeof_float, "clock_recovery.dat"))
self.connect(self.slicer,
gr.file_sink(gr.sizeof_char, "slicer.dat"))
def add_options(parser):
"""
Adds GMSK demodulation-specific options to the standard parser
"""
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="M&M clock recovery gain mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="M&M clock recovery mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--omega-relative-limit", type="float", default=_def_omega_relative_limit,
help="M&M clock recovery omega relative limit [default=%default] (GMSK/PSK)")
parser.add_option("", "--freq-error", type="float", default=_def_freq_error,
help="M&M clock recovery frequency error [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_demod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('gmsk', gmsk_mod)
modulation_utils.add_type_1_demod('gmsk', gmsk_demod)
| gpl-3.0 |
tjanez/ansible | test/integration/gce_credentials.py | 84 | 1852 | import collections
import os
import sys
import yaml
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
def add_credentials_options(parser):
default_service_account_email = None
default_pem_file = None
default_project_id = None
# Load details from credentials.yml
if os.path.isfile('credentials.yml'):
credentials = yaml.load(open('credentials.yml', 'r'))
default_service_account_email = credentials[
'gce_service_account_email']
default_pem_file = credentials['gce_pem_file']
default_project_id = credentials['gce_project_id']
parser.add_option(
"--service_account_email", action="store",
dest="service_account_email", default=default_service_account_email,
help="GCE service account email. Default is loaded from credentials.yml.")
parser.add_option(
"--pem_file", action="store", dest="pem_file",
default=default_pem_file,
help="GCE client key. Default is loaded from credentials.yml.")
parser.add_option(
"--project_id", action="store", dest="project_id",
default=default_project_id,
help="Google Cloud project ID. Default is loaded from credentials.yml.")
def check_required(opts, parser):
for required in ['service_account_email', 'pem_file', 'project_id']:
if getattr(opts, required) is None:
parser.error("Missing required parameter: --%s" % required)
def get_gce_driver(opts):
# Connect to GCE
gce_cls = get_driver(Provider.GCE)
return gce_cls(opts.service_account_email, opts.pem_file,
project=opts.project_id)
| gpl-3.0 |
google/fuzzbench | fuzzbench/test_e2e/test_e2e_run.py | 1 | 3103 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks the result of a test experiment run. Note that this is not a
standalone unit test module, but used as part of our end-to-end integration
test."""
import os
import pytest
import redis
import rq
from common import config_utils, yaml_utils
from experiment.build import docker_images
@pytest.fixture(scope='class')
def experiment_config():
"""Returns the default configuration for end-to-end testing."""
return config_utils.validate_and_expand(
yaml_utils.read('fuzzbench/test_e2e/end-to-end-test-config.yaml'))
@pytest.fixture(scope='class')
def redis_connection():
"""Returns the default redis server connection."""
return redis.Redis(host='queue-server')
# pylint: disable=no-self-use,redefined-outer-name
@pytest.mark.skipif('E2E_INTEGRATION_TEST' not in os.environ,
reason='Not running end-to-end test.')
@pytest.mark.usefixtures('redis_connection', 'experiment_config')
class TestEndToEndRunResults:
"""Checks the result of a test experiment run."""
def test_jobs_dependency(self, experiment_config, redis_connection):
"""Tests that jobs dependency preserves during working."""
all_images = docker_images.get_images_to_build(
experiment_config['fuzzers'], experiment_config['benchmarks'])
jobs = {
name: rq.job.Job.fetch(name, connection=redis_connection)
for name in all_images
}
for name, image in all_images.items():
if 'depends_on' in image:
for dep in image['depends_on']:
assert jobs[dep].ended_at <= jobs[name].started_at
def test_all_jobs_finished_successfully(self, experiment_config,
redis_connection):
"""Tests all jobs finished successully."""
all_images = docker_images.get_images_to_build(
experiment_config['fuzzers'], experiment_config['benchmarks'])
jobs = rq.job.Job.fetch_many(all_images.keys(),
connection=redis_connection)
for job in jobs:
assert job.get_status() == 'finished'
def test_measurement_jobs_were_started_before_trial_jobs_finished(self):
"""Fake test to be implemented later."""
assert True
def test_db_contains_experiment_results(self):
"""Fake test to be implemented later."""
assert True
def test_experiment_report_is_generated(self):
"""Fake test to be implemented later."""
assert True
| apache-2.0 |
mogotest/selenium | setup.py | 4 | 2438 | #!/usr/bin/env python
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from setuptools.command.install import install
setup(
cmdclass={'install': install},
name='selenium',
version="2.0-dev",
description='Python bindings for WebDriver',
url='http://code.google.com/p/selenium/',
package_dir={
'selenium':'.',
'selenium.ie': 'jobbie/src/py',
'selenium.firefox': 'firefox/src/py',
'selenium.chrome' : 'chrome/src/py',
'selenium.chrome_tests': 'chrome/test/py',
'selenium.common': 'common/src/py',
'selenium.remote': 'remote/client/src/py',
'selenium.common_tests': 'common/test/py',
'selenium.common_web': 'common/src/web',
'selenium.firefox_tests': 'firefox/test/py',
'selenium.ie_tests': 'jobbie/test/py',
'selenium.remote_tests': 'remote/client/test/py',
},
packages=['selenium',
'selenium.common',
'selenium.firefox',
'selenium.ie',
'selenium.chrome',
'selenium.remote',
'selenium.common_tests',
'selenium.common_web',
'selenium.firefox_tests',
'selenium.ie_tests',
'selenium.chrome_tests',
'selenium.remote_tests'],
include_package_data=True,
install_requires=['distribute'],
)
# FIXME: Do manually
# == IE ==
# cp jobbie/prebuilt/Win32/Release/InternetExplorerDriver.dll \
# build/lib.<platform>/webdriver/ie
# == Chrome ==
# cp chrome/src/extension build/lib.<platform>/webdriver/chrome
# On win32
# cp chrome/prebuilt/Win32/Release/npchromedriver.dll build/lib/webdriver/chrome
| apache-2.0 |
airodactyl/qutebrowser | tests/helpers/stubs.py | 1 | 16414 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=invalid-name,abstract-method
"""Fake objects/stubs."""
from unittest import mock
import contextlib
import shutil
import attr
from PyQt5.QtCore import pyqtSignal, QPoint, QProcess, QObject, QUrl
from PyQt5.QtGui import QIcon
from PyQt5.QtNetwork import (QNetworkRequest, QAbstractNetworkCache,
QNetworkCacheMetaData)
from PyQt5.QtWidgets import QCommonStyle, QLineEdit, QWidget, QTabBar
from qutebrowser.browser import browsertab, downloads
from qutebrowser.utils import usertypes
from qutebrowser.mainwindow import mainwindow
class FakeNetworkCache(QAbstractNetworkCache):
"""Fake cache with no data."""
def cacheSize(self):
return 0
def data(self, _url):
return None
def insert(self, _dev):
pass
def metaData(self, _url):
return QNetworkCacheMetaData()
def prepare(self, _metadata):
return None
def remove(self, _url):
return False
def updateMetaData(self, _url):
pass
class FakeKeyEvent:
"""Fake QKeyPressEvent stub."""
def __init__(self, key, modifiers=0, text=''):
self.key = mock.Mock(return_value=key)
self.text = mock.Mock(return_value=text)
self.modifiers = mock.Mock(return_value=modifiers)
class FakeWebFrame:
"""A stub for QWebFrame."""
def __init__(self, geometry=None, *, scroll=None, plaintext=None,
html=None, parent=None, zoom=1.0):
"""Constructor.
Args:
geometry: The geometry of the frame as QRect.
scroll: The scroll position as QPoint.
plaintext: Return value of toPlainText
html: Return value of tohtml.
zoom: The zoom factor.
parent: The parent frame.
"""
if scroll is None:
scroll = QPoint(0, 0)
self.geometry = mock.Mock(return_value=geometry)
self.scrollPosition = mock.Mock(return_value=scroll)
self.parentFrame = mock.Mock(return_value=parent)
self.toPlainText = mock.Mock(return_value=plaintext)
self.toHtml = mock.Mock(return_value=html)
self.zoomFactor = mock.Mock(return_value=zoom)
class FakeChildrenFrame:
"""A stub for QWebFrame to test get_child_frames."""
def __init__(self, children=None):
if children is None:
children = []
self.childFrames = mock.Mock(return_value=children)
class FakeQApplication:
"""Stub to insert as QApplication module."""
UNSET = object()
def __init__(self, style=None, all_widgets=None, active_window=None,
instance=UNSET):
if instance is self.UNSET:
self.instance = mock.Mock(return_value=self)
else:
self.instance = mock.Mock(return_value=instance)
self.style = mock.Mock(spec=QCommonStyle)
self.style().metaObject().className.return_value = style
self.allWidgets = lambda: all_widgets
self.activeWindow = lambda: active_window
class FakeNetworkReply:
"""QNetworkReply stub which provides a Content-Disposition header."""
KNOWN_HEADERS = {
QNetworkRequest.ContentTypeHeader: 'Content-Type',
}
def __init__(self, headers=None, url=None):
if url is None:
url = QUrl()
if headers is None:
self.headers = {}
else:
self.headers = headers
self.url = mock.Mock(return_value=url)
def hasRawHeader(self, name):
"""Check if the reply has a certain header.
Args:
name: The name of the header as ISO-8859-1 encoded bytes object.
Return:
True if the header is present, False if not.
"""
return name.decode('iso-8859-1') in self.headers
def rawHeader(self, name):
"""Get the raw header data of a header.
Args:
name: The name of the header as ISO-8859-1 encoded bytes object.
Return:
The header data, as ISO-8859-1 encoded bytes() object.
"""
name = name.decode('iso-8859-1')
return self.headers[name].encode('iso-8859-1')
def header(self, known_header):
"""Get a known header.
Args:
known_header: A QNetworkRequest::KnownHeaders member.
"""
key = self.KNOWN_HEADERS[known_header]
try:
return self.headers[key]
except KeyError:
return None
def setHeader(self, known_header, value):
"""Set a known header.
Args:
known_header: A QNetworkRequest::KnownHeaders member.
value: The value to set.
"""
key = self.KNOWN_HEADERS[known_header]
self.headers[key] = value
def fake_qprocess():
"""Factory for a QProcess mock which has the QProcess enum values."""
m = mock.Mock(spec=QProcess)
for name in ['NormalExit', 'CrashExit', 'FailedToStart', 'Crashed',
'Timedout', 'WriteError', 'ReadError', 'UnknownError']:
setattr(m, name, getattr(QProcess, name))
return m
class FakeWebTabScroller(browsertab.AbstractScroller):
"""Fake AbstractScroller to use in tests."""
def __init__(self, tab, pos_perc):
super().__init__(tab)
self._pos_perc = pos_perc
def pos_perc(self):
return self._pos_perc
class FakeWebTabHistory(browsertab.AbstractHistory):
"""Fake for Web{Kit,Engine}History."""
def __init__(self, tab, *, can_go_back, can_go_forward):
super().__init__(tab)
self._can_go_back = can_go_back
self._can_go_forward = can_go_forward
def can_go_back(self):
assert self._can_go_back is not None
return self._can_go_back
def can_go_forward(self):
assert self._can_go_forward is not None
return self._can_go_forward
class FakeWebTabAudio(browsertab.AbstractAudio):
def is_muted(self):
return False
def is_recently_audible(self):
return False
class FakeWebTab(browsertab.AbstractTab):
"""Fake AbstractTab to use in tests."""
def __init__(self, url=QUrl(), title='', tab_id=0, *,
scroll_pos_perc=(0, 0),
load_status=usertypes.LoadStatus.success,
progress=0, can_go_back=None, can_go_forward=None):
super().__init__(win_id=0, mode_manager=None, private=False)
self._load_status = load_status
self._title = title
self._url = url
self._progress = progress
self.history = FakeWebTabHistory(self, can_go_back=can_go_back,
can_go_forward=can_go_forward)
self.scroller = FakeWebTabScroller(self, scroll_pos_perc)
self.audio = FakeWebTabAudio()
wrapped = QWidget()
self._layout.wrap(self, wrapped)
def url(self, requested=False):
assert not requested
return self._url
def title(self):
return self._title
def progress(self):
return self._progress
def load_status(self):
return self._load_status
def shutdown(self):
pass
def icon(self):
return QIcon()
class FakeSignal:
"""Fake pyqtSignal stub which does nothing.
Attributes:
signal: The name of the signal, like pyqtSignal.
_func: The function to be invoked when the signal gets called.
"""
def __init__(self, name='fake', func=None):
self.signal = '2{}(int, int)'.format(name)
self._func = func
def __call__(self):
if self._func is None:
raise TypeError("'FakeSignal' object is not callable")
else:
return self._func()
def connect(self, slot):
"""Connect the signal to a slot.
Currently does nothing, but could be improved to do some sanity
checking on the slot.
"""
pass
def disconnect(self, slot=None):
"""Disconnect the signal from a slot.
Currently does nothing, but could be improved to do some sanity
checking on the slot and see if it actually got connected.
"""
pass
def emit(self, *args):
"""Emit the signal.
Currently does nothing, but could be improved to do type checking based
on a signature given to __init__.
"""
pass
@attr.s
class FakeCmdUtils:
"""Stub for cmdutils which provides a cmd_dict."""
cmd_dict = attr.ib()
@attr.s(frozen=True)
class FakeCommand:
"""A simple command stub which has a description."""
name = attr.ib('')
desc = attr.ib('')
hide = attr.ib(False)
debug = attr.ib(False)
deprecated = attr.ib(False)
completion = attr.ib(None)
maxsplit = attr.ib(None)
takes_count = attr.ib(lambda: False)
modes = attr.ib((usertypes.KeyMode.normal, ))
class FakeTimer(QObject):
"""Stub for a usertypes.Timer."""
timeout_signal = pyqtSignal()
def __init__(self, parent=None, name=None):
super().__init__(parent)
self.timeout = mock.Mock(spec=['connect', 'disconnect', 'emit'])
self.timeout.connect.side_effect = self.timeout_signal.connect
self.timeout.disconnect.side_effect = self.timeout_signal.disconnect
self.timeout.emit.side_effect = self._emit
self._started = False
self._singleshot = False
self._interval = 0
self._name = name
def __repr__(self):
return '<{} name={!r}>'.format(self.__class__.__name__, self._name)
def _emit(self):
"""Called when the timeout "signal" gets emitted."""
if self._singleshot:
self._started = False
self.timeout_signal.emit()
def setInterval(self, interval):
self._interval = interval
def interval(self):
return self._interval
def setSingleShot(self, singleshot):
self._singleshot = singleshot
def isSingleShot(self):
return self._singleshot
def start(self, interval=None):
if interval:
self._interval = interval
self._started = True
def stop(self):
self._started = False
def isActive(self):
return self._started
class InstaTimer(QObject):
"""Stub for a QTimer that fires instantly on start().
Useful to test a time-based event without inserting an artificial delay.
"""
timeout = pyqtSignal()
def start(self, interval=None):
self.timeout.emit()
def setSingleShot(self, yes):
pass
def setInterval(self, interval):
pass
@staticmethod
def singleShot(_interval, fun):
fun()
class StatusBarCommandStub(QLineEdit):
"""Stub for the statusbar command prompt."""
got_cmd = pyqtSignal(str)
clear_completion_selection = pyqtSignal()
hide_completion = pyqtSignal()
update_completion = pyqtSignal()
show_cmd = pyqtSignal()
hide_cmd = pyqtSignal()
def prefix(self):
return self.text()[0]
class UrlMarkManagerStub(QObject):
"""Stub for the quickmark-manager or bookmark-manager object."""
added = pyqtSignal(str, str)
removed = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.marks = {}
def delete(self, key):
del self.marks[key]
self.removed.emit(key)
class BookmarkManagerStub(UrlMarkManagerStub):
"""Stub for the bookmark-manager object."""
pass
class QuickmarkManagerStub(UrlMarkManagerStub):
"""Stub for the quickmark-manager object."""
def quickmark_del(self, key):
self.delete(key)
class HostBlockerStub:
"""Stub for the host-blocker object."""
def __init__(self):
self.blocked_hosts = set()
class SessionManagerStub:
"""Stub for the session-manager object."""
def __init__(self):
self.sessions = []
def list_sessions(self):
return self.sessions
def save_autosave(self):
pass
class TabbedBrowserStub(QObject):
"""Stub for the tabbed-browser object."""
def __init__(self, parent=None):
super().__init__(parent)
self.widget = TabWidgetStub()
self.shutting_down = False
self.opened_url = None
def on_tab_close_requested(self, idx):
del self.widget.tabs[idx]
def widgets(self):
return self.widget.tabs
def tabopen(self, url):
self.opened_url = url
def openurl(self, url, *, newtab):
self.opened_url = url
class TabWidgetStub(QObject):
"""Stub for the tab-widget object."""
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, parent=None):
super().__init__(parent)
self.tabs = []
self._qtabbar = QTabBar()
self.index_of = None
self.current_index = None
def count(self):
return len(self.tabs)
def widget(self, i):
return self.tabs[i]
def page_title(self, i):
return self.tabs[i].title()
def tabBar(self):
return self._qtabbar
def indexOf(self, _tab):
if self.index_of is None:
raise ValueError("indexOf got called with index_of None!")
elif self.index_of is RuntimeError:
raise RuntimeError
else:
return self.index_of
def currentIndex(self):
if self.current_index is None:
raise ValueError("currentIndex got called with current_index "
"None!")
return self.current_index
def currentWidget(self):
idx = self.currentIndex()
if idx == -1:
return None
return self.tabs[idx - 1]
class ApplicationStub(QObject):
"""Stub to insert as the app object in objreg."""
new_window = pyqtSignal(mainwindow.MainWindow)
class HTTPPostStub(QObject):
"""A stub class for HTTPClient.
Attributes:
url: the last url send by post()
data: the last data send by post()
"""
success = pyqtSignal(str)
error = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self.url = None
self.data = None
def post(self, url, data=None):
self.url = url
self.data = data
class FakeDownloadItem(QObject):
"""Mock browser.downloads.DownloadItem."""
finished = pyqtSignal()
def __init__(self, fileobj, name, parent=None):
super().__init__(parent)
self.fileobj = fileobj
self.name = name
self.successful = False
class FakeDownloadManager:
"""Mock browser.downloads.DownloadManager."""
def __init__(self, tmpdir):
self._tmpdir = tmpdir
self.downloads = []
@contextlib.contextmanager
def _open_fileobj(self, target):
"""Ensure a DownloadTarget's fileobj attribute is available."""
if isinstance(target, downloads.FileDownloadTarget):
target.fileobj = open(target.filename, 'wb')
try:
yield target.fileobj
finally:
target.fileobj.close()
else:
yield target.fileobj
def get(self, url, target, **kwargs):
"""Return a FakeDownloadItem instance with a fileobj.
The content is copied from the file the given url links to.
"""
with self._open_fileobj(target):
download_item = FakeDownloadItem(target.fileobj, name=url.path())
with (self._tmpdir / url.path()).open('rb') as fake_url_file:
shutil.copyfileobj(fake_url_file, download_item.fileobj)
self.downloads.append(download_item)
return download_item
| gpl-3.0 |
rrrene/django | django/contrib/postgres/operations.py | 374 | 1377 | from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != 'postgresql':
return
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super(HStoreExtension, self).database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
| bsd-3-clause |
googleapis/python-essential-contacts | google/cloud/essential_contacts_v1/services/essential_contacts_service/client.py | 1 | 36542 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.essential_contacts_v1.services.essential_contacts_service import (
pagers,
)
from google.cloud.essential_contacts_v1.types import enums
from google.cloud.essential_contacts_v1.types import service
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import EssentialContactsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import EssentialContactsServiceGrpcTransport
from .transports.grpc_asyncio import EssentialContactsServiceGrpcAsyncIOTransport
class EssentialContactsServiceClientMeta(type):
"""Metaclass for the EssentialContactsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[EssentialContactsServiceTransport]]
_transport_registry["grpc"] = EssentialContactsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = EssentialContactsServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[EssentialContactsServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class EssentialContactsServiceClient(metaclass=EssentialContactsServiceClientMeta):
"""Manages contacts for important Google Cloud notifications."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "essentialcontacts.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EssentialContactsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EssentialContactsServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> EssentialContactsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
EssentialContactsServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def contact_path(project: str, contact: str,) -> str:
"""Returns a fully-qualified contact string."""
return "projects/{project}/contacts/{contact}".format(
project=project, contact=contact,
)
@staticmethod
def parse_contact_path(path: str) -> Dict[str, str]:
"""Parses a contact path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/contacts/(?P<contact>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, EssentialContactsServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the essential contacts service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, EssentialContactsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, EssentialContactsServiceTransport):
# transport is a EssentialContactsServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_contact(
self,
request: service.CreateContactRequest = None,
*,
parent: str = None,
contact: service.Contact = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Adds a new contact for a resource.
Args:
request (google.cloud.essential_contacts_v1.types.CreateContactRequest):
The request object. Request message for the
CreateContact method.
parent (str):
Required. The resource to save this contact for. Format:
organizations/{organization_id}, folders/{folder_id} or
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
contact (google.cloud.essential_contacts_v1.types.Contact):
Required. The contact to create. Must
specify an email address and language
tag.
This corresponds to the ``contact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, contact])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.CreateContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.CreateContactRequest):
request = service.CreateContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if contact is not None:
request.contact = contact
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_contact(
self,
request: service.UpdateContactRequest = None,
*,
contact: service.Contact = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Updates a contact.
Note: A contact's email address cannot be changed.
Args:
request (google.cloud.essential_contacts_v1.types.UpdateContactRequest):
The request object. Request message for the
UpdateContact method.
contact (google.cloud.essential_contacts_v1.types.Contact):
Required. The contact resource to
replace the existing saved contact.
Note: the email address of the contact
cannot be modified.
This corresponds to the ``contact`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The update mask applied to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([contact, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.UpdateContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.UpdateContactRequest):
request = service.UpdateContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if contact is not None:
request.contact = contact
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("contact.name", request.contact.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_contacts(
self,
request: service.ListContactsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListContactsPager:
r"""Lists the contacts that have been set on a resource.
Args:
request (google.cloud.essential_contacts_v1.types.ListContactsRequest):
The request object. Request message for the ListContacts
method.
parent (str):
Required. The parent resource name. Format:
organizations/{organization_id}, folders/{folder_id} or
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.services.essential_contacts_service.pagers.ListContactsPager:
Response message for the ListContacts
method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.ListContactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.ListContactsRequest):
request = service.ListContactsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_contacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListContactsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_contact(
self,
request: service.GetContactRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.Contact:
r"""Gets a single contact.
Args:
request (google.cloud.essential_contacts_v1.types.GetContactRequest):
The request object. Request message for the GetContact
method.
name (str):
Required. The name of the contact to retrieve. Format:
organizations/{organization_id}/contacts/{contact_id},
folders/{folder_id}/contacts/{contact_id} or
projects/{project_id}/contacts/{contact_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.types.Contact:
A contact that will receive
notifications from Google Cloud.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.GetContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.GetContactRequest):
request = service.GetContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_contact(
self,
request: service.DeleteContactRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a contact.
Args:
request (google.cloud.essential_contacts_v1.types.DeleteContactRequest):
The request object. Request message for the
DeleteContact method.
name (str):
Required. The name of the contact to delete. Format:
organizations/{organization_id}/contacts/{contact_id},
folders/{folder_id}/contacts/{contact_id} or
projects/{project_id}/contacts/{contact_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a service.DeleteContactRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.DeleteContactRequest):
request = service.DeleteContactRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_contact]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def compute_contacts(
self,
request: service.ComputeContactsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ComputeContactsPager:
r"""Lists all contacts for the resource that are
subscribed to the specified notification categories,
including contacts inherited from any parent resources.
Args:
request (google.cloud.essential_contacts_v1.types.ComputeContactsRequest):
The request object. Request message for the
ComputeContacts method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.essential_contacts_v1.services.essential_contacts_service.pagers.ComputeContactsPager:
Response message for the
ComputeContacts method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.ComputeContactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.ComputeContactsRequest):
request = service.ComputeContactsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.compute_contacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ComputeContactsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def send_test_message(
self,
request: service.SendTestMessageRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Allows a contact admin to send a test message to
contact to verify that it has been configured correctly.
Args:
request (google.cloud.essential_contacts_v1.types.SendTestMessageRequest):
The request object. Request message for the
SendTestMessage method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.SendTestMessageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.SendTestMessageRequest):
request = service.SendTestMessageRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.send_test_message]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-essential-contacts",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("EssentialContactsServiceClient",)
| apache-2.0 |
t1m0thy/django-extensions | tests/test_uuid_field.py | 24 | 2355 | import re
import uuid
import six
from django.test import TestCase
from django_extensions.db.fields import PostgreSQLUUIDField
from .testapp.models import (
UUIDTestAgregateModel, UUIDTestManyToManyModel, UUIDTestModel_field,
UUIDTestModel_pk,
)
class UUIDFieldTest(TestCase):
def test_UUID_field_create(self):
j = UUIDTestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_create(self):
j = UUIDTestModel_pk.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_agregate_create(self):
j = UUIDTestAgregateModel.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440001'))
self.assertEqual(j.a, 6)
self.assertIsInstance(j.pk, six.string_types)
self.assertEqual(len(j.pk), 36)
def test_UUID_field_manytomany_create(self):
j = UUIDTestManyToManyModel.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440010'))
class PostgreSQLUUIDFieldTest(TestCase):
def test_uuid_casting(self):
# As explain by postgres documentation
# http://www.postgresql.org/docs/9.1/static/datatype-uuid.html
# an uuid needs to be a sequence of lower-case hexadecimal digits, in
# several groups separated by hyphens, specifically a group of 8 digits
# followed by three groups of 4 digits followed by a group of 12 digits
matcher = re.compile('^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}'
'-[\da-f]{12}$')
field = PostgreSQLUUIDField()
for value in (str(uuid.uuid4()), uuid.uuid4().urn, uuid.uuid4().hex,
uuid.uuid4().int, uuid.uuid4().bytes):
prepared_value = field.get_db_prep_value(value, None)
self.assertTrue(matcher.match(prepared_value) is not None,
prepared_value)
| mit |
jcchin/MagnePlane | src/hyperloop/Python/ticket_cost.py | 4 | 8796 | from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp
import matplotlib.pylab as plt
class TicketCost(Component):
'''
Notes
-------
This Component takes into account various cost figures from the system model and combines them to estimate tickt cost per passenger.
Params
-------
length_cost : float
Cost of materials per unit length. Default value is 2.437e6 USD/km
pod_cost : float
Cost per individual pod. Default value is 1.0e6 USD.
capital_cost : float
Estimate of overhead capital cost. Default value is 1.0e10 USD.
energy_cost : float
Cost of electricity. Default value is .13 USD/kWh
ib : float
Bond interest rate. Default value is .04
bm : float
Bond maturity. Default value is 20.0 years.
operating_time : float
operating time per day. Default value is 16.0*3600 s
JtokWh : float
Convert J to kWh. Default value is J/kWh
m_pod : float
Pod mass. Default value is 3100 kg
n_passengers : float
Number of passengers. Default value is 28.0
pod_period : float
Time in between pod departures. Default value is 120.0 s
avg_speed : float
average pod speed. Default value is 286.86 m/s
track_length : float
length of the track. Default value is 600e3 m
pod_power : float
Power consumption of the pod. Default value is 1.5e6 W
prop_power : float
power of an individual propulsion section. Default value is 350e3 W
vac_power : float
Power of the vacuum pumps. Default value is 71.049e6 W
alpha : float
percent of vacuum power used in steady state. Default value is .0001
vf : float
Pod top speed. Default value is 286.86 m/s
g : float
Gravity. Default value is 9.81 m/s/s
Cd : float
Pod drag coefficient. Default value is .2
S : float
Pod planform area. Default value is 40.42 m**2
p_tunnel : float
Tunnel pressure. Default value is 850.0 Pa
T_tunnel : float
Tunnel temperature. Default value is 320 K
R : float
Ideal gas constant. Default value is 287 J/kg/K
eta : float
Efficiency of propulsion system
D_mag : float
Magnetic drag. Default value is (9.81*3100.0)/200.0 N
thrust_time : float
Time spent during a propulsive section. Default value is 1.5 s
prop_period : float
distance between pripulsion sections. Defualt value is 25.0e3 km
Returns
-------
ticket_cost : float
cost of individual ticket. Default value is 0.0 USD
prop_energy_cost : float
cost of energy used by propulsion section per year. Default value is 0.0 USD
'''
def __init__(self):
super(TicketCost, self).__init__()
self.add_param('land_cost', val = 2.437e6, desc = 'Cost of materials over land per unit length', units = 'USD/km')
self.add_param('water_cost', val = 389.346941e3, desc = 'Cost of materials underwater per unit length', units = 'USD/km')
self.add_param('pod_cost', val = 1.0e6, desc = 'Cost of individual pod', units = 'USD')
self.add_param('capital_cost', val = 1.0e10, desc = 'Estimate of overhead capital cost', units = 'USD')
self.add_param('energy_cost', val = .13, desc = 'Cost of electricity', units = 'USD/kW/h')
self.add_param('ib', val = .04, desc = 'Bond interest rate', units = 'unitless')
self.add_param('bm', val = 20.0, desc = 'Bond maturity', units = 'yr')
self.add_param('operating_time', val = 16.0*3600, desc = 'Operating time per day', units = 's')
self.add_param('JtokWh', val = 2.7778e-7, desc = 'Convert Joules to kWh', units = '(kw*h)/J')
self.add_param('m_pod', val = 3100.0, desc = 'Pod Mass', units = 'kg')
self.add_param('n_passengers', val = 28.0, desc = 'number of passengers', units = 'unitless')
self.add_param('pod_period', val = 120.0, desc = 'Time in between departures', units = 's')
self.add_param('avg_speed', val = 286.86, desc = 'Average Pod Speed', units = 'm/s')
self.add_param('track_length', val = 600.0e3, desc = 'Track Length', units = 'm')
self.add_param('land_length', val = 600e3, desc = 'Length traveled over land', units = 'm')
self.add_param('water_length', val = 0.0e3, desc = 'Length traveled underwater', units = 'm')
self.add_param('pod_power', val = 1.5e6, desc = 'Power required by pod motor', units = 'W')
self.add_param('prop_power', val = 350.0e3, desc = 'Power of single propulsive section', units = 'W')
self.add_param('vac_power', val = 71.049e6, desc = 'Power of vacuums', units = 'W')
self.add_param('steady_vac_power', val = 950.0e3, desc = 'Steady State run power of vacuum pumps', units = 'W')
self.add_param('vf', val = 286.86, desc = 'Pod top speed', units = 'm/s')
self.add_param('g', val = 9.81, desc = 'Gravity', units = 'm/s/s')
self.add_param('Cd', val = .2, desc = 'Pod drag coefficient', units = 'unitless')
self.add_param('S', val = 40.42, desc = 'Pod planform area', units = 'm**2')
self.add_param('p_tunnel', val = 850.0, desc = 'Tunnel Pressure', units = 'Pa')
self.add_param('T_tunnel', val = 320.0, desc = 'Tunnel Temperature', units = 'K')
self.add_param('R', val = 287.0, desc = 'Ideal gas constant', units = 'J/kg/K')
self.add_param('eta', val = .8, desc = 'Propulsive efficiency', units = 'unitless')
self.add_param('D_mag', val = (9.81*3100.0)/200.0, desc = 'Magnetic Drag', units = 'N')
self.add_param('thrust_time', val = 1.5, desc = 'Time that pod is over propulsive section', units = 's')
self.add_param('prop_period', val = 25.0e3, desc = 'distance between propulsive sections', units = 'm')
self.add_param('num_thrust', val = 10.0, desc = 'Number of booster sections along track', units = 'unitless')
self.add_output('num_pods', val = 0.0, desc = 'Number of Pods', units = 'unitless')
self.add_output('ticket_cost', val = 0.0, desc = 'Ticket cost', units = 'USD')
self.add_output('prop_energy_cost', val = 0.0, desc = 'Cost of propulsion energy', units = 'USD')
self.add_output('tube_energy_cost', val = 0.0, desc = 'Cost of tube energy', units = 'USD')
self.add_output('total_energy_cost', val = 0.0, desc = 'Cost of energy consumpition per year', units = 'USD')
def solve_nonlinear(self, p, u,r):
land_cost = p['land_cost']
water_cost = p['water_cost']
pod_cost= p['pod_cost']
capital_cost = p['capital_cost']
energy_cost = p['energy_cost']
ib = p['ib']
bm = p['bm']
operating_time = p['operating_time']
JtokWh = p['JtokWh']
m_pod = p['m_pod']
n_passengers = p['n_passengers']
pod_period = p['pod_period']
avg_speed = p['avg_speed']
track_length = p['track_length']
land_length = p['land_length']
water_length = p['water_length']
pod_power = -1.0*p['pod_power']
prop_power = p['prop_power']
vac_power = p['vac_power']
steady_vac_power = -1.0*p['steady_vac_power']
vf = p['vf']
g = p['g']
Cd = p['Cd']
S = p['S']
p_tunnel = p['p_tunnel']
T_tunnel = p['T_tunnel']
R = p['R']
eta = p['eta']
D_mag = p['D_mag']
thrust_time = p['thrust_time']
prop_period = p['prop_period']
num_thrust = p['num_thrust']
length_cost = ((water_length/track_length)*water_cost) + ((land_length/track_length)*land_cost)
pod_frequency = 1.0/pod_period
num_pods = np.ceil((track_length/avg_speed)*pod_frequency)
flights_per_pod = (operating_time*pod_frequency)/num_pods
energy_per_flight = pod_power*(track_length/avg_speed)*.9
pod_energy = energy_per_flight*flights_per_pod*num_pods*JtokWh
vac_energy = steady_vac_power*operating_time*JtokWh
rho = p_tunnel/(R*T_tunnel)
start_distance = (vf**2)/(2*g)
start_energy = ((m_pod*g+D_mag)*start_distance + (.5*Cd*rho*g*S*(start_distance**2)))/eta
prop_energy = (num_thrust*thrust_time*prop_power + start_energy)*flights_per_pod*num_pods*JtokWh
tube_energy = prop_energy + vac_energy
u['num_pods'] = num_pods
u['prop_energy_cost'] = prop_energy*energy_cost*365
u['tube_energy_cost'] = tube_energy*energy_cost*365
u['total_energy_cost'] = (pod_energy+tube_energy)*energy_cost*365
u['ticket_cost'] = cost_ticket = (length_cost*(track_length/1000.0) + pod_cost*num_pods + capital_cost*(1.0+ib) + \
energy_cost*(tube_energy + pod_energy)*365.0)/(n_passengers*pod_frequency*bm*365.0*24.0*3600.0)
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (('n_passengers', 28.0),
('track_length', 600.0e3, {'units' : 'm'}))
root.add('p', TicketCost())
root.add('des_vars', IndepVarComp(params), promotes = ['n_passengers'])
root.connect('n_passengers', 'p.n_passengers')
root.connect('des_vars.track_length', 'p.track_length')
top.setup()
top.run()
print(top['p.ticket_cost'])
# n_passengers = np.linspace(10,100,num = 90)
# ticket_cost = np.zeros((1, len(n_passengers)))
# for i in range(len(n_passengers)):
# top['n_passengers'] = n_passengers[i]
# top.run()
# ticket_cost[0, i] = top['p.ticket_cost']
# plt.plot(n_passengers*(175200.0/(1.0e6)), ticket_cost[0,:])
# plt.show()
| apache-2.0 |
rahulkurup/zookeeper | src/contrib/monitoring/test.py | 114 | 8195 | #! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import socket
import sys
from StringIO import StringIO
from check_zookeeper import ZooKeeperServer, NagiosHandler, CactiHandler, GangliaHandler
ZK_MNTR_OUTPUT = """zk_version\t3.4.0--1, built on 06/19/2010 15:07 GMT
zk_avg_latency\t1
zk_max_latency\t132
zk_min_latency\t0
zk_packets_received\t640
zk_packets_sent\t639
zk_outstanding_requests\t0
zk_server_state\tfollower
zk_znode_count\t4
zk_watch_count\t0
zk_ephemerals_count\t0
zk_approximate_data_size\t27
zk_open_file_descriptor_count\t22
zk_max_file_descriptor_count\t1024
"""
ZK_MNTR_OUTPUT_WITH_BROKEN_LINES = """zk_version\t3.4.0
zk_avg_latency\t23
broken-line
"""
ZK_STAT_OUTPUT = """Zookeeper version: 3.3.0-943314, built on 05/11/2010 22:20 GMT
Clients:
/0:0:0:0:0:0:0:1:34564[0](queued=0,recved=1,sent=0)
Latency min/avg/max: 0/40/121
Received: 11
Sent: 10
Outstanding: 0
Zxid: 0x700000003
Mode: follower
Node count: 4
"""
class SocketMock(object):
def __init__(self):
self.sent = []
def settimeout(self, timeout):
self.timeout = timeout
def connect(self, address):
self.address = address
def send(self, data):
self.sent.append(data)
return len(data)
def recv(self, size):
return ZK_MNTR_OUTPUT[:size]
def close(self): pass
class ZK33xSocketMock(SocketMock):
def __init__(self):
SocketMock.__init__(self)
self.got_stat_cmd = False
def recv(self, size):
if 'stat' in self.sent:
return ZK_STAT_OUTPUT[:size]
else:
return ''
class UnableToConnectSocketMock(SocketMock):
def connect(self, _):
raise socket.error('[Errno 111] Connection refused')
def create_server_mock(socket_class):
class ZooKeeperServerMock(ZooKeeperServer):
def _create_socket(self):
return socket_class()
return ZooKeeperServerMock()
class TestCheckZookeeper(unittest.TestCase):
def setUp(self):
self.zk = ZooKeeperServer()
def test_parse_valid_line(self):
key, value = self.zk._parse_line('something\t5')
self.assertEqual(key, 'something')
self.assertEqual(value, 5)
def test_parse_line_raises_exception_on_invalid_output(self):
invalid_lines = ['something', '', 'a\tb\tc', '\t1']
for line in invalid_lines:
self.assertRaises(ValueError, self.zk._parse_line, line)
def test_parser_on_valid_output(self):
data = self.zk._parse(ZK_MNTR_OUTPUT)
self.assertEqual(len(data), 14)
self.assertEqual(data['zk_znode_count'], 4)
def test_parse_should_ignore_invalid_lines(self):
data = self.zk._parse(ZK_MNTR_OUTPUT_WITH_BROKEN_LINES)
self.assertEqual(len(data), 2)
def test_parse_stat_valid_output(self):
data = self.zk._parse_stat(ZK_STAT_OUTPUT)
result = {
'zk_version' : '3.3.0-943314, built on 05/11/2010 22:20 GMT',
'zk_min_latency' : 0,
'zk_avg_latency' : 40,
'zk_max_latency' : 121,
'zk_packets_received': 11,
'zk_packets_sent': 10,
'zk_server_state': 'follower',
'zk_znode_count': 4
}
for k, v in result.iteritems():
self.assertEqual(v, data[k])
def test_recv_valid_output(self):
zk = create_server_mock(SocketMock)
data = zk.get_stats()
self.assertEqual(len(data), 14)
self.assertEqual(data['zk_znode_count'], 4)
def test_socket_unable_to_connect(self):
zk = create_server_mock(UnableToConnectSocketMock)
self.assertRaises(socket.error, zk.get_stats)
def test_use_stat_cmd_if_mntr_is_not_available(self):
zk = create_server_mock(ZK33xSocketMock)
data = zk.get_stats()
self.assertEqual(data['zk_version'], '3.3.0-943314, built on 05/11/2010 22:20 GMT')
class HandlerTestCase(unittest.TestCase):
def setUp(self):
try:
sys._stdout
except:
sys._stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = sys._stdout
def output(self):
sys.stdout.seek(0)
return sys.stdout.read()
class TestNagiosHandler(HandlerTestCase):
def _analyze(self, w, c, k, stats):
class Opts(object):
warning = w
critical = c
key = k
return NagiosHandler().analyze(Opts(), {'localhost:2181':stats})
def test_ok_status(self):
r = self._analyze(10, 20, 'a', {'a': 5})
self.assertEqual(r, 0)
self.assertEqual(self.output(), 'Ok "a"!|localhost:2181=5;10;20\n')
r = self._analyze(20, 10, 'a', {'a': 30})
self.assertEqual(r, 0)
def test_warning_status(self):
r = self._analyze(10, 20, 'a', {'a': 15})
self.assertEqual(r, 1)
self.assertEqual(self.output(),
'Warning "a" localhost:2181!|localhost:2181=15;10;20\n')
r = self._analyze(20, 10, 'a', {'a': 15})
self.assertEqual(r, 1)
def test_critical_status(self):
r = self._analyze(10, 20, 'a', {'a': 30})
self.assertEqual(r, 2)
self.assertEqual(self.output(),
'Critical "a" localhost:2181!|localhost:2181=30;10;20\n')
r = self._analyze(20, 10, 'a', {'a': 5})
self.assertEqual(r, 2)
def test_check_a_specific_key_on_all_hosts(self):
class Opts(object):
warning = 10
critical = 20
key = 'latency'
r = NagiosHandler().analyze(Opts(), {
's1:2181': {'latency': 5},
's2:2181': {'latency': 15},
's3:2181': {'latency': 35},
})
self.assertEqual(r, 2)
self.assertEqual(self.output(),
'Critical "latency" s3:2181!|s1:2181=5;10;20 '\
's3:2181=35;10;20 s2:2181=15;10;20\n')
class TestCactiHandler(HandlerTestCase):
class Opts(object):
key = 'a'
leader = False
def __init__(self, leader=False):
self.leader = leader
def test_output_values_for_all_hosts(self):
r = CactiHandler().analyze(TestCactiHandler.Opts(), {
's1:2181':{'a':1},
's2:2181':{'a':2, 'b':3}
})
self.assertEqual(r, None)
self.assertEqual(self.output(), 's1_2181:1 s2_2181:2')
def test_output_single_value_for_leader(self):
r = CactiHandler().analyze(TestCactiHandler.Opts(leader=True), {
's1:2181': {'a':1, 'zk_server_state': 'leader'},
's2:2181': {'a':2}
})
self.assertEqual(r, 0)
self.assertEqual(self.output(), '1\n')
class TestGangliaHandler(unittest.TestCase):
class TestableGangliaHandler(GangliaHandler):
def __init__(self):
GangliaHandler.__init__(self)
self.cli_calls = []
def call(self, cli):
self.cli_calls.append(' '.join(cli))
def test_send_single_metric(self):
class Opts(object):
@property
def gmetric(self): return '/usr/bin/gmetric'
opts = Opts()
h = TestGangliaHandler.TestableGangliaHandler()
h.analyze(opts, {'localhost:2181':{'latency':10}})
cmd = "%s -n latency -v 10 -t uint32" % opts.gmetric
assert cmd in h.cli_calls
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ax003d/openerp | openerp/addons/event_moodle/__init__.py | 54 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_moodle
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
atheed/servo | tests/wpt/web-platform-tests/old-tests/webdriver/windows/window_manipulation.py | 142 | 1556 | # -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
class WindowingTest(base_test.WebDriverBaseTest):
def test_maximize(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.maximize_window()
def test_window_size_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_size(400, 400)
window_size = self.driver.get_window_size()
self.assertTrue("width" in window_size)
self.assertTrue("height" in window_size)
self.assertEquals({"width": 400, "height":400}, window_size)
"""
todo: make that work
see: https://w3c.github.io/webdriver/webdriver-spec.html#setwindowsize
result = self.driver.set_window_size(100, 100)
self.assertTrue("status" in result)
self.assertEquals(result["status"], 500)
"""
def test_window_position_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_position(400, 400)
window_position = self.driver.get_window_position()
self.assertTrue("x" in window_position)
self.assertTrue("y" in window_position)
self.assertEquals({"x": 400, "y": 400}, window_position)
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
heaven001/android_kernel_sony_msm8974 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
SoftwareHeritage/swh-web-ui | swh/web/admin/adminurls.py | 1 | 1231 | # Copyright (C) 2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from swh.web.common.urlsindex import UrlsIndex
class AdminUrls(UrlsIndex):
"""
Class to manage swh-web admin urls.
"""
scope = 'admin'
class admin_route(object): # noqa: N801
"""
Decorator to ease the registration of a swh-web admin endpoint
Args:
url_patterns: list of url patterns used by Django to identify the admin routes
view_name: the name of the Django view associated to the routes used to
reverse the url
""" # noqa
def __init__(self, *url_patterns, view_name=None):
super().__init__()
self.url_patterns = []
for url_pattern in url_patterns:
self.url_patterns.append('^' + url_pattern + '$')
self.view_name = view_name
def __call__(self, f):
# register the route and its view in the browse endpoints index
for url_pattern in self.url_patterns:
AdminUrls.add_url_pattern(url_pattern, f, self.view_name)
return f
| agpl-3.0 |
jonyroda97/redbot-amigosprovaveis | lib/pip/_vendor/urllib3/packages/backports/makefile.py | 339 | 1461 | # -*- coding: utf-8 -*-
"""
backports.makefile
~~~~~~~~~~~~~~~~~~
Backports the Python 3 ``socket.makefile`` method for use with anything that
wants to create a "fake" socket object.
"""
import io
from socket import SocketIO
def backport_makefile(self, mode="r", buffering=None, encoding=None,
errors=None, newline=None):
"""
Backport of ``socket.makefile`` from Python 3.5.
"""
if not set(mode) <= set(["r", "w", "b"]):
raise ValueError(
"invalid mode %r (only r, w, b allowed)" % (mode,)
)
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._makefile_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
| gpl-3.0 |
lmcro/webserver | admin/CTK/CTK/List.py | 5 | 3058 | # -*- coding: utf-8 -*-
#
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Container import Container
from util import props_to_str
ENTRY_HTML = '<%(tag)s id="%(id)s" %(props)s>%(content)s</%(tag)s>'
class ListEntry (Container):
def __init__ (self, _props={}, tag='li'):
Container.__init__ (self)
self.tag = tag
self.props = _props.copy()
def Render (self):
render = Container.Render (self)
if 'id' in self.props:
self.id = self.props['id']
props = {'id': self.id,
'tag': self.tag,
'props': props_to_str(self.props),
'content': render.html}
render.html = ENTRY_HTML %(props)
return render
class List (Container):
"""
Widget for lists of elements. The list can grow dynamically, and
accept any kind of CTK widget as listed element. Arguments are
optional.
Arguments:
_props: dictionary with properties for the HTML element,
such as {'name': 'foo', 'id': 'bar', 'class': 'baz'}
tag: tag to use for the element, either 'ul' for unordered
lists, or 'ol' for ordered lists. By default, 'ul' is
used.
Examples:
lst = CTK.List()
lst.Add (CTK.RawHTML('One')
lst.Add (CTK.Image({'src': '/foo/bar/baz.png'})
"""
def __init__ (self, _props={}, tag='ul'):
Container.__init__ (self)
self.tag = tag
self.props = _props.copy()
def Add (self, widget, props={}):
assert isinstance(widget, Widget) or widget is None or type(widget) is list
entry = ListEntry (props.copy())
if widget:
if type(widget) == list:
for w in widget:
entry += w
else:
entry += widget
Container.__iadd__ (self, entry)
def __iadd__ (self, widget):
self.Add (widget)
return self
def Render (self):
render = Container.Render (self)
props = {'id': self.id,
'tag': self.tag,
'props': props_to_str(self.props),
'content': render.html}
render.html = ENTRY_HTML %(props)
return render
| gpl-2.0 |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/wi/views/user/user.py | 1 | 7236 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.views.user.user
@author Piotr Wójcik
@date 31.01.2014
"""
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from wi.commontags.templatetags.templatetags import filesizeformatmb
from wi.forms.user import CMAuthenticationForm, HelpForm, PasswordChangeForm, \
AccountDataEdit
from wi.utils import get_dict_from_list, messages_ajax
from wi.utils.decorators import django_view, user_permission
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import ajax_request
from wi.utils.messages_codes import get_message
from wi.utils.states import message_levels_reversed
from wi.utils.views import prep_data
@django_view
@user_permission
def change_cm(request, cm_id, success_url='mai_main'):
"""
View changing used CM.
"""
request.session['user'].cm_id = int(cm_id)
request.session.modified = True
messages.success(request, _('Cluster Manager changed.'))
return redirect(request.META['HTTP_REFERER'] or success_url)
@django_view
@ajax_request
@user_permission
def get_messages(request):
"""
Ajax view fetching user messages.
"""
if request.method == 'POST':
response = prep_data('user/message/get_list/', request.session)
for item in response:
item['text'] = get_message(item['code'], item['params'])
item['level'] = message_levels_reversed[item['level']]
return messages_ajax.success(response)
@django_view
@ajax_request
@user_permission
def acc_ajax_get_user_data(request):
"""
Ajax view. Returns user account data.
"""
if request.method == 'GET':
rest_data = prep_data({'user': 'user/user/get_my_data/',
'cms': 'guest/cluster/list_names/'
}, request.session)
user_data = rest_data['user']
users_cm = get_dict_from_list(rest_data['cms'], user_data['default_cluster_id'], key='cluster_id')
if users_cm is None:
raise Exception('User\'s default_cluster_id=%d is not a valid CM id.' % user_data['default_cluster_id'])
user_data['default_cluster_id'] = users_cm['name']
return messages_ajax.success(user_data)
@django_view
@ajax_request
@user_permission
@csrf_protect
def acc_ajax_account_data_edit(request, template_name='generic/form.html', form_class=AccountDataEdit):
"""
Ajax view for user account data editing.
"""
rest_data = prep_data({'cms': 'guest/cluster/list_names/'}, request.session)
if request.method == 'POST':
form = form_class(data=request.POST, rest_data=rest_data)
if form.is_valid():
prep_data({'user': ('user/user/edit/', form.cleaned_data)}, request.session)
request.session['user'].email = form.cleaned_data['email']
request.session['user'].default_cluster_id = form.cleaned_data['default_cluster_id']
request.session.modified = True
return messages_ajax.success(_('Account data edited.'))
else:
form = form_class(data={'email': request.session['user'].email,
'default_cluster_id': request.session['user'].default_cluster_id}, rest_data=rest_data)
return messages_ajax.success(render_to_string(template_name, {'form': form,
'text': '',
'confirmation': _('Save')},
context_instance=RequestContext(request)),
status=1)
@django_view
@ajax_request
@user_permission
def acc_ajax_get_user_quotas(request):
"""
Ajax view for fetching users' quotas.
"""
if request.method == 'GET':
quota = prep_data('user/user/check_quota/', request.session)
quota['memory'] = filesizeformatmb(quota['memory'])
quota['used_memory'] = filesizeformatmb(quota['used_memory'])
quota['storage'] = filesizeformatmb(quota['storage'])
quota['used_storage'] = filesizeformatmb(quota['used_storage'])
return messages_ajax.success(quota)
@django_view
@csrf_protect
@user_permission
def acc_password_change(request, template_name='account/password_change_form.html',
password_change_form=PasswordChangeForm):
"""
View for password changing (for logged users).
"""
if request.method == "POST":
form = password_change_form(user=request.session['user'], data=request.POST)
if form.is_valid():
new_password = form.cleaned_data['new_password1']
try:
prep_data(('user/user/set_password/', {'new_password': new_password}), request.session)
except RestErrorException as ex:
messages.error(request, ex.value)
request.session['user'].set_password(new_password)
request.session.modified = True
return redirect('acc_password_change_done')
else:
form = password_change_form(user=request.session['user'])
return render_to_response(template_name, {'form': form}, context_instance=RequestContext(request))
@django_view
@user_permission
def hlp_form(request, form_class=HelpForm, template_name='help/form.html'):
"""
View handling help form.
"""
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
topic, issue, email = form.cleaned_data['topic'], form.cleaned_data['issue'], form.cleaned_data['email']
name = str(request.session.get('user', form.cleaned_data['firstlast']))
topic += _(' from user:') + name + ', email: ' + email
dictionary = {'issue': issue,
'topic': topic}
try:
prep_data(('user/user/send_issue/', dictionary), request.session)
except Exception:
return redirect('hlp_issue_error')
return redirect('hlp_issue_sent')
else:
form = form_class()
rest_data = prep_data('guest/user/is_mailer_active/', request.session)
return render_to_response(template_name, dict({'form': form}.items() + rest_data.items()),
context_instance=RequestContext(request))
| apache-2.0 |
Maseratigsg/kohencoin | test/functional/mempool_spendcoinbase.py | 35 | 2286 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
self.extra_args = [["-checkmempool"]]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_jsonrpc(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| mit |
devs1991/test_edx_docmode | common/djangoapps/embargo/views.py | 148 | 2463 | """Views served by the embargo app. """
from django.http import Http404
from django.views.generic.base import View
from django.conf import settings
from edxmako.shortcuts import render_to_response
from embargo import messages
class CourseAccessMessageView(View):
"""Show a message explaining that the user was blocked from a course. """
ENROLLMENT_ACCESS_POINT = 'enrollment'
COURSEWARE_ACCESS_POINT = 'courseware'
def get(self, request, access_point=None, message_key=None):
"""Show a message explaining that the user was blocked.
Arguments:
request (HttpRequest)
Keyword Arguments:
access_point (str): Either 'enrollment' or 'courseware',
indicating how the user is trying to access the restricted
content.
message_key (str): An identifier for which message to show.
See `embargo.messages` for more information.
Returns:
HttpResponse
Raises:
Http404: If no message is configured for the specified message key.
"""
blocked_message = self._message(access_point, message_key)
if blocked_message is None:
raise Http404
return render_to_response(blocked_message.template, {})
def _message(self, access_point, message_key):
"""Retrieve message information.
Arguments:
access_point (str): Either 'enrollment' or 'courseware'
message_key (str): The identifier for which message to show.
Returns:
embargo.messages.BlockedMessage or None
"""
message_dict = dict()
# Backwards compatibility with themes created for
# earlier implementations of the embargo app.
if settings.FEATURES.get('USE_CUSTOM_THEME') and message_key in messages.CUSTOM_THEME_OVERRIDES:
message_dict = messages.CUSTOM_THEME_OVERRIDES
# The access point determines which set of messages to use.
# This allows us to show different messages to students who
# are enrolling in a course than we show to students
# who are enrolled and accessing courseware.
elif access_point == self.ENROLLMENT_ACCESS_POINT:
message_dict = messages.ENROLL_MESSAGES
elif access_point == self.COURSEWARE_ACCESS_POINT:
message_dict = messages.COURSEWARE_MESSAGES
return message_dict.get(message_key)
| agpl-3.0 |
nkmk/python-snippets | notebook/numpy_sin_cos_tan.py | 1 | 2677 | import numpy as np
print(np.__version__)
# 1.19.0
print(np.pi)
# 3.141592653589793
print(np.radians(180))
# 3.141592653589793
print(type(np.radians(180)))
# <class 'numpy.float64'>
a = np.array([0, 90, 180])
print(type(a))
# <class 'numpy.ndarray'>
print(np.radians(a))
# [0. 1.57079633 3.14159265]
print(type(np.radians(a)))
# <class 'numpy.ndarray'>
l = [0, 90, 180]
print(type(l))
# <class 'list'>
print(np.radians(l))
# [0. 1.57079633 3.14159265]
print(type(np.radians(l)))
# <class 'numpy.ndarray'>
print(np.radians(a))
# [0. 1.57079633 3.14159265]
print(np.round(np.radians(a)))
# [0. 2. 3.]
print(np.round(np.radians(a), 2))
# [0. 1.57 3.14]
print(np.sin(np.radians(30)))
# 0.49999999999999994
print(np.round(np.sin(np.radians(30)), 1))
# 0.5
print(np.sin(np.radians([0, 30, 90])))
# [0. 0.5 1. ]
print(np.sin(np.radians([0, 30, 90]))[1])
# 0.49999999999999994
np.set_printoptions(precision=20)
print(np.sin(np.radians([0, 30, 90])))
# [0. 0.49999999999999994 1. ]
np.set_printoptions(precision=8) # reset to default
print(np.radians([0, 90, 180]))
# [0. 1.57079633 3.14159265]
print(np.deg2rad([0, 90, 180]))
# [0. 1.57079633 3.14159265]
print(np.degrees([0, np.pi / 2, np.pi]))
# [ 0. 90. 180.]
print(np.rad2deg([0, np.pi / 2, np.pi]))
# [ 0. 90. 180.]
print(np.sin(np.radians([0, 30, 90])))
# [0. 0.5 1. ]
print(np.degrees(np.arcsin([0, 0.5, 1])))
# [ 0. 30. 90.]
print(np.cos(np.radians([0, 60, 90])))
# [1.000000e+00 5.000000e-01 6.123234e-17]
print(np.round(np.cos(np.radians([0, 60, 90])), 1))
# [1. 0.5 0. ]
print(np.degrees(np.arccos([0, 0.5, 1])))
# [90. 60. 0.]
print(np.tan(np.radians([0, 45, 90])))
# [0.00000000e+00 1.00000000e+00 1.63312394e+16]
print(np.degrees(np.arctan([0, 1, np.inf])))
# [ 0. 45. 90.]
print(np.degrees(np.arctan([-np.inf, -1, 0, 1, np.inf])))
# [-90. -45. 0. 45. 90.]
print(np.degrees(np.arctan2(-1, 1)))
# -45.0
print(np.degrees(np.arctan2(1, -1)))
# 135.0
print(np.degrees(np.arctan2([0, 1, 1, 1, 0],
[0, 1, 0, -1, -1])))
# [ 0. 45. 90. 135. 180.]
print(np.degrees(np.arctan2([0, -1, -1, -1, 0],
[0, 1, 0, -1, -1])))
# [ 0. -45. -90. -135. 180.]
print(np.degrees(np.arctan2(0, -1)))
# 180.0
print(np.degrees(np.arctan2(-0.0, -1.0)))
# -180.0
print(np.degrees(np.arctan2(-0, -1)))
# 180.0
print(np.degrees(np.arctan2([0.0, -0.0, 0.0, -0.0],
[0.0, 0.0, -0.0, -0.0])))
# [ 0. -0. 180. -180.]
print(np.sin(-0.0))
# -0.0
print(np.arcsin(-0.0))
# -0.0
print(np.tan(-0.0))
# -0.0
print(np.arctan(-0.0))
# -0.0
| mit |
egraba/vbox_openbsd | VirtualBox-5.0.0/src/libs/xpcom18a4/python/gen_python_deps.py | 11 | 3486 | #!/usr/bin/python
"""
Copyright (C) 2009-2013 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
import os,sys
versions = ["2.3", "2.4", "2.5", "2.6", "2.7",]
prefixes = ["/usr", "/usr/local", "/opt", "/opt/local"]
known = {}
def checkPair(p, v,dllpre,dllsuff, bitness_magic):
file = os.path.join(p, "include", "python"+v, "Python.h")
if not os.path.isfile(file):
return None
lib = os.path.join(p, "lib/i386-linux-gnu", dllpre+"python"+v+dllsuff)
if not os.path.isfile(lib):
lib = os.path.join(p, "lib", dllpre+"python"+v+dllsuff)
if bitness_magic == 1:
lib64 = os.path.join(p, "lib", "64", dllpre+"python"+v+dllsuff)
elif bitness_magic == 2:
lib64 = os.path.join(p, "lib/x86_64-linux-gnu", dllpre+"python"+v+dllsuff)
if not os.path.isfile(lib64):
lib64 = os.path.join(p, "lib64", dllpre+"python"+v+dllsuff)
if not os.path.isfile(lib64):
lib64 = lib
else:
lib64 = None
return [os.path.join(p, "include", "python"+v), lib, lib64]
def print_vars(vers, known, sep, bitness_magic):
print "VBOX_PYTHON%s_INC=%s%s" %(vers, known[0], sep)
if bitness_magic > 0:
print "VBOX_PYTHON%s_LIB=%s%s" %(vers, known[2], sep)
print "VBOX_PYTHON%s_LIB_X86=%s%s" %(vers, known[1], sep)
else:
print "VBOX_PYTHON%s_LIB=%s%s" %(vers, known[1], sep)
def main(argv):
global prefixes
global versions
dllpre = "lib"
dllsuff = ".so"
bitness_magic = 0
if len(argv) > 1:
target = argv[1]
else:
target = sys.platform
if len(argv) > 2:
arch = argv[2]
else:
arch = "unknown"
if len(argv) > 3:
multi = int(argv[3])
else:
multi = 1
if multi == 0:
prefixes = ["/usr"]
versions = [str(sys.version_info[0])+'.'+str(sys.version_info[1])]
if target == 'darwin':
## @todo Pick up the locations from VBOX_PATH_MACOSX_SDK_10_*.
prefixes = ['/Developer/SDKs/MacOSX10.4u.sdk/usr',
'/Developer/SDKs/MacOSX10.5.sdk/usr',
'/Developer/SDKs/MacOSX10.6.sdk/usr',
'/Developer/SDKs/MacOSX10.7.sdk/usr']
dllsuff = '.dylib'
if target == 'solaris' and arch == 'amd64':
bitness_magic = 1
if target == 'linux' and arch == 'amd64':
bitness_magic = 2
for v in versions:
for p in prefixes:
c = checkPair(p, v, dllpre, dllsuff, bitness_magic)
if c is not None:
known[v] = c
break
keys = known.keys()
# we want default to be the lowest versioned Python
keys.sort()
d = None
# We need separator other than newline, to sneak through $(shell)
sep = "|"
for k in keys:
if d is None:
d = k
vers = k.replace('.', '')
print_vars(vers, known[k], sep, bitness_magic)
if d is not None:
print_vars("DEF", known[d], sep, bitness_magic)
if __name__ == '__main__':
main(sys.argv)
| mit |
wagnerand/olympia | src/olympia/addons/tests/test_tasks.py | 2 | 21631 | import mock
import os
import pytest
import tempfile
from datetime import datetime
from django.conf import settings
from django.core import mail
from django.test.utils import override_settings
from freezegun import freeze_time
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons import cron
from olympia.addons.models import Addon, AddonCategory, MigratedLWT
from olympia.addons.tasks import (
add_static_theme_from_lwt, create_persona_preview_images,
migrate_legacy_dictionary_to_webextension, migrate_lwts_to_static_themes,
migrate_webextensions_to_git_storage,
recreate_theme_previews, save_persona_image)
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.tests import (
addon_factory, collection_factory, TestCase, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import image_size
from olympia.applications.models import AppVersion
from olympia.constants import licenses
from olympia.constants.categories import CATEGORIES
from olympia.files.models import FileUpload
from olympia.files.utils import id_to_path
from olympia.ratings.models import Rating
from olympia.stats.models import ThemeUpdateCount, UpdateCount
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.versions.models import License, VersionPreview
from olympia.lib.git import AddonGitRepository
class TestPersonaImageFunctions(TestCase):
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_create_persona_preview_image(self, pngcrush_image_mock):
addon = addon_factory()
addon.modified = self.days_ago(41)
# Given an image, a 680x100 and a 32x32 thumbnails need to be generated
# and processed with pngcrush.
expected_dst1 = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
expected_dst2 = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
create_persona_preview_images(
src=get_image_path('persona-header.jpg'),
full_dst=[expected_dst1.name, expected_dst2.name],
set_modified_on=addon.serializable_reference(),
)
# pngcrush_image should have been called twice, once for each
# destination thumbnail.
assert pngcrush_image_mock.call_count == 2
assert pngcrush_image_mock.call_args_list[0][0][0] == (
expected_dst1.name)
assert pngcrush_image_mock.call_args_list[1][0][0] == (
expected_dst2.name)
assert image_size(expected_dst1.name) == (680, 100)
assert image_size(expected_dst2.name) == (32, 32)
addon.reload()
self.assertCloseToNow(addon.modified)
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_save_persona_image(self, pngcrush_image_mock):
# save_persona_image() simply saves an image as a png to the
# destination file. The image should be processed with pngcrush.
expected_dst = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
save_persona_image(
get_image_path('persona-header.jpg'),
expected_dst.name
)
# pngcrush_image should have been called once.
assert pngcrush_image_mock.call_count == 1
assert pngcrush_image_mock.call_args_list[0][0][0] == expected_dst.name
@mock.patch('olympia.addons.tasks.pngcrush_image')
def test_save_persona_image_not_an_image(self, pngcrush_image_mock):
# If the source is not an image, save_persona_image() should just
# return early without writing the destination or calling pngcrush.
expected_dst = tempfile.NamedTemporaryFile(
mode='wb', suffix=".png", delete=False, dir=settings.TMP_PATH)
save_persona_image(
get_image_path('non-image.png'),
expected_dst.name
)
# pngcrush_image should not have been called.
assert pngcrush_image_mock.call_count == 0
# the destination file should not have been written to.
assert os.stat(expected_dst.name).st_size == 0
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.add_static_theme_from_lwt')
def test_migrate_lwts_to_static_themes(add_static_theme_from_lwt_mock):
# Include two LWT that won't get migrated sandwiched between some good LWTs
persona_a = addon_factory(type=amo.ADDON_PERSONA, slug='theme_a')
persona_none = addon_factory(type=amo.ADDON_PERSONA, slug='theme_none')
persona_b = addon_factory(type=amo.ADDON_PERSONA, slug='theme_b')
persona_raise = addon_factory(type=amo.ADDON_PERSONA, slug='theme_raise')
persona_c = addon_factory(type=amo.ADDON_PERSONA, slug='theme_c')
addon_a = addon_factory(type=amo.ADDON_STATICTHEME)
addon_b = addon_factory(type=amo.ADDON_STATICTHEME)
addon_c = addon_factory(type=amo.ADDON_STATICTHEME)
add_static_theme_from_lwt_mock.side_effect = [
addon_a, False, addon_b, Exception('foo'), addon_c]
# call the migration task, as the command would:
migrate_lwts_to_static_themes(
[persona_a.id, persona_none.id, persona_b.id, persona_raise.id,
persona_c.id])
assert MigratedLWT.objects.all().count() == 3
assert Addon.objects.filter(type=amo.ADDON_PERSONA).count() == 2
persona_a.reload()
addon_a.reload()
assert persona_a.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_a).static_theme == addon_a
assert addon_a.slug == 'theme_a'
persona_b.reload()
addon_b.reload()
assert persona_b.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_b).static_theme == addon_b
assert addon_b.slug == 'theme_b'
persona_c.reload()
addon_c.reload()
assert persona_c.status == amo.STATUS_DELETED
assert MigratedLWT.objects.get(
lightweight_theme=persona_c).static_theme == addon_c
assert addon_c.slug == 'theme_c'
assert len(mail.outbox) == 0
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestAddStaticThemeFromLwt(TestCase):
create_date = datetime(2000, 1, 1, 1, 1, 1)
modify_date = datetime(2008, 8, 8, 8, 8, 8)
update_date = datetime(2009, 9, 9, 9, 9, 9)
def setUp(self):
super(TestAddStaticThemeFromLwt, self).setUp()
self.call_signing_mock = self.patch(
'olympia.lib.crypto.signing.call_signing')
self.build_mock = self.patch(
'olympia.addons.tasks.build_static_theme_xpi_from_lwt')
self.build_mock.side_effect = self._mock_xpi_side_effect
self.call_signing_mock.return_value = 'abcdefg1234'
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='53.0')
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='*')
user_factory(id=settings.TASK_USER_ID, email='taskuser@mozilla.com')
def _mock_xpi_side_effect(self, lwt, upload_path):
xpi_path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/mozilla_static_theme.zip')
copy_stored_file(xpi_path, upload_path)
assert not os.path.isdir(upload_path)
return mock.DEFAULT
def _check_result(self, static_theme, authors, tags, categories, license_,
ratings, collection):
# metadata is correct
assert list(static_theme.authors.all()) == authors
assert list(static_theme.tags.all()) == tags
assert len(categories) == 1
lwt_cat = categories[0]
static_theme_cats = [
(cat.name, cat.application) for cat in static_theme.all_categories]
assert static_theme_cats == [
(lwt_cat.name, amo.FIREFOX.id), (lwt_cat.name, amo.ANDROID.id)]
assert static_theme.current_version.license.builtin == license_
# status is good
assert static_theme.status == amo.STATUS_PUBLIC
current_file = static_theme.current_version.files.get()
assert current_file.status == amo.STATUS_PUBLIC
# Ratings were migrated
assert list(Rating.unfiltered.filter(addon=static_theme)) == ratings
log_entries = ActivityLog.objects.filter(
action=amo.LOG.ADD_RATING.id, addonlog__addon=static_theme)
assert log_entries.count() == len(ratings)
for rating, log_entry in zip(ratings, log_entries):
arguments = log_entry.arguments
assert rating in arguments
assert static_theme in arguments
# The collection has the new theme
if collection:
assert static_theme in list(collection.addons.all())
assert collection.addons.filter(
type=amo.ADDON_PERSONA).count() == 0
assert collection.addons.filter(
type=amo.ADDON_STATICTHEME).count() == 1
assert collection.addons.count() == 2
# UpdateCounts were copied.
assert UpdateCount.objects.filter(
addon_id=static_theme.id).count() == 2
# xpi was signed
self.call_signing_mock.assert_called_with(current_file)
assert current_file.cert_serial_num == 'abcdefg1234'
assert static_theme.created == self.create_date
assert static_theme.modified == self.modify_date
cron.addon_last_updated() # Make sure the last_updated change stuck.
assert static_theme.reload().last_updated == self.update_date
def test_add_static_theme_from_lwt(self):
author = user_factory()
persona = addon_factory(
type=amo.ADDON_PERSONA, users=[author], name='Firefox Theme')
persona.update(
created=self.create_date, modified=self.modify_date,
last_updated=self.update_date)
persona.persona.license = licenses.LICENSE_CC_BY_ND.id
Tag.objects.create(tag_text='themey').save_tag(persona)
License.objects.create(builtin=licenses.LICENSE_CC_BY_ND.builtin)
rating_user = user_factory()
rating = Rating.objects.create(
addon=persona, version=persona.current_version, user=rating_user,
rating=2, body=u'fooooo', user_responsible=rating_user)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 1, 1), count=123)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 2, 1), count=456)
# Create a count for an addon that shouldn't be migrated too.
ThemeUpdateCount.objects.create(
addon_id=addon_factory().id, date=datetime(2018, 2, 1), count=45)
# And add it to a collection
collection = collection_factory()
collection.add_addon(persona)
collection.add_addon(addon_factory())
static_theme = add_static_theme_from_lwt(persona)
self._check_result(
static_theme, [author], list(persona.tags.all()),
persona.all_categories, licenses.LICENSE_CC_BY_ND.builtin,
[rating], collection)
def test_add_static_theme_broken_lwt(self):
"""What if no author or license or category?"""
persona = addon_factory(type=amo.ADDON_PERSONA)
persona.update(
created=self.create_date, modified=self.modify_date,
last_updated=self.update_date)
assert list(persona.authors.all()) == [] # no author
persona.persona.license = None # no license
AddonCategory.objects.filter(addon=persona).delete()
assert persona.all_categories == [] # no category
License.objects.create(builtin=licenses.LICENSE_COPYRIGHT_AR.builtin)
rating_user = user_factory()
rating = Rating.objects.create(
addon=persona, version=persona.current_version, user=rating_user,
rating=2, body=u'fooooo', user_responsible=rating_user)
rating.delete() # delete the rating - should still be migrated.
# Add 2 more Ratings for different addons that shouldn't be copied.
Rating.objects.create(
addon=addon_factory(), user=rating_user,
rating=3, body=u'tgd', user_responsible=rating_user)
Rating.objects.create(
addon=addon_factory(), user=rating_user,
rating=4, body=u'tgffd', user_responsible=rating_user)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 1, 1), count=123)
ThemeUpdateCount.objects.create(
addon_id=persona.id, date=datetime(2018, 2, 1), count=456)
# Create a count for an addon that shouldn't be migrated too.
ThemeUpdateCount.objects.create(
addon_id=addon_factory().id, date=datetime(2018, 2, 1), count=45)
static_theme = add_static_theme_from_lwt(persona)
default_author = UserProfile.objects.get(
email=settings.MIGRATED_LWT_DEFAULT_OWNER_EMAIL)
desktop_default_category = (
CATEGORIES[amo.FIREFOX.id][amo.ADDON_STATICTHEME]['other'])
android_default_category = (
CATEGORIES[amo.ANDROID.id][amo.ADDON_STATICTHEME]['other'])
self._check_result(
static_theme, [default_author], [], [desktop_default_category],
licenses.LICENSE_COPYRIGHT_AR.builtin, [rating], None)
# Double check its the exact category we want.
assert static_theme.all_categories == [
desktop_default_category, android_default_category]
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestMigrateLegacyDictionaryToWebextension(TestCase):
def setUp(self):
self.user = user_factory(
id=settings.TASK_USER_ID, username='taskuser',
email='taskuser@mozilla.com')
with freeze_time('2017-07-27 07:00'):
self.addon = addon_factory(
type=amo.ADDON_DICT,
guid='@my-dict', # Same id used in dict-webext.xpi.
version_kw={'version': '6.3'})
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='61.0')
AppVersion.objects.get_or_create(
application=amo.FIREFOX.id, version='*')
self.call_signing_mock = self.patch(
'olympia.lib.crypto.signing.call_signing')
self.call_signing_mock.return_value = 'abcdefg1234'
self.build_mock = self.patch(
'olympia.addons.tasks.build_webext_dictionary_from_legacy')
self.build_mock.side_effect = self._mock_xpi_side_effect
def _mock_xpi_side_effect(self, addon, destination):
xpi_path = os.path.join(
settings.ROOT, 'src/olympia/files/fixtures/files/dict-webext.xpi')
copy_stored_file(xpi_path, destination)
assert not os.path.isdir(destination)
return mock.DEFAULT
def test_basic(self):
assert not FileUpload.objects.exists()
assert not ActivityLog.objects.filter(
action=amo.LOG.ADD_VERSION.id).exists()
old_version = self.addon.current_version
self.build_mock.return_value = 'fake-locale'
with freeze_time('2018-08-28 08:00'):
self.migration_date = datetime.now()
migrate_legacy_dictionary_to_webextension(self.addon)
self.build_mock.assert_called_once_with(self.addon, mock.ANY)
assert FileUpload.objects.exists()
self.addon.reload()
assert self.addon.target_locale == 'fake-locale'
assert self.addon.current_version != old_version
activity_log = ActivityLog.objects.filter(
action=amo.LOG.ADD_VERSION.id).get()
assert activity_log.arguments == [
self.addon.current_version, self.addon
]
assert self.addon.last_updated == self.migration_date
current_file = self.addon.current_version.all_files[0]
assert current_file.datestatuschanged == self.migration_date
assert current_file.status == amo.STATUS_PUBLIC
self.call_signing_mock.assert_called_with(current_file)
assert current_file.cert_serial_num == 'abcdefg1234'
class TestMigrateWebextensionsToGitStorage(TestCase):
def test_basic(self):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
migrate_webextensions_to_git_storage([addon.pk])
repo = AddonGitRepository(addon.pk)
assert repo.git_repository_path == os.path.join(
settings.GIT_FILE_STORAGE_PATH, id_to_path(addon.id), 'package')
assert os.listdir(repo.git_repository_path) == ['.git']
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_no_files(self, extract_mock):
addon = addon_factory()
addon.current_version.files.all().delete()
migrate_webextensions_to_git_storage([addon.pk])
extract_mock.assert_not_called()
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_skip_already_migrated_versions(self, extract_mock):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
version_to_migrate = addon.current_version
already_migrated_version = version_factory(
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
already_migrated_version.update(git_hash='already migrated...')
migrate_webextensions_to_git_storage([addon.pk])
# Only once instead of twice
extract_mock.assert_called_once_with(version_to_migrate.pk)
@mock.patch('olympia.versions.tasks.extract_version_to_git')
def test_migrate_versions_from_old_to_new(self, extract_mock):
addon = addon_factory(file_kw={'filename': 'webextension_no_id.xpi'})
oldest_version = addon.current_version
oldest_version.update(created=self.days_ago(6))
older_version = version_factory(
created=self.days_ago(5),
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
most_recent = version_factory(
created=self.days_ago(2),
addon=addon, file_kw={'filename': 'webextension_no_id.xpi'})
migrate_webextensions_to_git_storage([addon.pk])
# Only once instead of twice
assert extract_mock.call_count == 3
assert extract_mock.call_args_list[0][0][0] == oldest_version.pk
assert extract_mock.call_args_list[1][0][0] == older_version.pk
assert extract_mock.call_args_list[2][0][0] == most_recent.pk
@pytest.mark.django_db
def test_recreate_theme_previews():
xpi_path = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/mozilla_static_theme.zip')
addon_without_previews = addon_factory(type=amo.ADDON_STATICTHEME)
copy_stored_file(
xpi_path,
addon_without_previews.current_version.all_files[0].file_path)
addon_with_previews = addon_factory(type=amo.ADDON_STATICTHEME)
copy_stored_file(
xpi_path,
addon_with_previews.current_version.all_files[0].file_path)
VersionPreview.objects.create(
version=addon_with_previews.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
assert addon_without_previews.current_previews.count() == 0
assert addon_with_previews.current_previews.count() == 1
recreate_theme_previews(
[addon_without_previews.id, addon_with_previews.id])
assert addon_without_previews.reload().current_previews.count() == 3
assert addon_with_previews.reload().current_previews.count() == 3
sizes = addon_without_previews.current_previews.values_list(
'sizes', flat=True)
assert list(sizes) == [
{'image': list(amo.THEME_PREVIEW_SIZES['header']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['header']['thumbnail'])},
{'image': list(amo.THEME_PREVIEW_SIZES['list']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['list']['thumbnail'])},
{'image': list(amo.THEME_PREVIEW_SIZES['single']['full']),
'thumbnail': list(amo.THEME_PREVIEW_SIZES['single']['thumbnail'])}]
@pytest.mark.django_db
@mock.patch('olympia.addons.tasks.parse_addon')
def test_create_missing_theme_previews(parse_addon_mock):
parse_addon_mock.return_value = {}
theme = addon_factory(type=amo.ADDON_STATICTHEME)
preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]})
# addon has 3 complete previews already so skip when only_missing=True
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 0
recreate_theme_previews([theme.id], only_missing=False)
assert p.call_count == 1
# break one of the previews
preview.update(sizes={})
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 1
# And delete it so the addon only has 2 previews
preview.delete()
with mock.patch('olympia.addons.tasks.generate_static_theme_preview') as p:
recreate_theme_previews([theme.id], only_missing=True)
assert p.call_count == 1
| bsd-3-clause |
Fl0rianFischer/sme_odoo | addons/event/models/event_config_settings.py | 44 | 1952 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class event_config_settings(osv.TransientModel):
_name='event.config.settings'
_inherit='res.config.settings'
_columns = {
'module_event_sale': fields.selection([
(0, "All events are free"),
(1, 'Allow selling tickets')
], "Tickets",
help='Install the event_sale module'),
'module_website_event_track': fields.selection([
(0, "No mini website per event"),
(1, 'Allow tracks, agenda and dedicated menus/website per event')
], "Tracks and Agenda",
help='Install the module website_event_track'),
'module_website_event_questions': fields.selection([
(0, "No extra questions on subscriptions"),
(1, 'Allow adding extra questions on subscriptions')
], "Subscription Survey",
help='Install the website_event_questions module'),
'auto_confirmation': fields.selection([
(1, 'No validation step on subscription'),
(0, "Manually confirm every subscription")
], "Auto Confirmation",
help='Unselect this option to manually manage draft event and draft subscription'),
'group_email_scheduling': fields.selection([
(0, "No automated emails"),
(1, 'Schedule emails to attendees and subscribers')
], "Email Scheduling",
help='You will be able to configure emails, and to schedule them to be automatically sent to the attendees on subscription and/or attendance',
implied_group='event.group_email_scheduling'),
}
def set_default_auto_confirmation(self, cr, uid, ids, context=None):
config_value = self.browse(cr, uid, ids, context=context).auto_confirmation
self.pool.get('ir.values').set_default(cr, uid, 'event.config.settings', 'auto_confirmation', config_value)
| gpl-3.0 |
mahim97/zulip | zerver/webhooks/github_webhook/tests.py | 5 | 21928 | from typing import Dict, Optional, Text
import ujson
from mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.models import Message
class GithubWebhookTest(WebhookTestCase):
STREAM_NAME = 'github'
URL_TEMPLATE = "/api/v1/external/github?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'github_webhook'
EXPECTED_SUBJECT_REPO_EVENTS = u"public-repo"
EXPECTED_SUBJECT_ISSUE_EVENTS = u"public-repo / Issue #2 Spelling error in the README file"
EXPECTED_SUBJECT_PR_EVENTS = u"public-repo / PR #1 Update the README with new information"
EXPECTED_SUBJECT_DEPLOYMENT_EVENTS = u"public-repo / Deployment on production"
EXPECTED_SUBJECT_ORGANIZATION_EVENTS = u"baxterandthehackers organization"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"public-repo / changes"
EXPECTED_SUBJECT_WIKI_EVENTS = u"public-repo / Wiki Pages"
def test_ping_event(self) -> None:
expected_message = u"GitHub webhook has been successfully configured by TomaszKolek"
self.send_and_test_stream_message('ping', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_ping_organization_event(self) -> None:
expected_message = u"GitHub webhook has been successfully configured by eeshangarg"
self.send_and_test_stream_message('ping_organization', 'zulip-test-org', expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_push_delete_branch(self) -> None:
expected_message = u"eeshangarg [deleted](https://github.com/eeshangarg/public-repo/compare/2e8cf535fb38...000000000000) the branch feature."
self.send_and_test_stream_message('push_delete_branch', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_local_branch_without_commits(self) -> None:
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/feature) the branch feature."
self.send_and_test_stream_message('push_local_branch_without_commits', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit(self) -> None:
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_without_username(self) -> None:
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/0383613da871...2e8cf535fb38) 1 commit to branch changes. Commits by John Snow (1).\n\n* Update the README ([2e8cf53](https://github.com/eeshangarg/public-repo/commit/2e8cf535fb38a3dab2476cdf856efda904ad4c94))"
self.send_and_test_stream_message('push_1_commit_without_username', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url('master,changes')
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters(self) -> None:
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by Tomasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others(self) -> None:
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by Tomasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits(self) -> None:
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{}[and 30 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,changes')
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{}[and 30 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_commit_comment_msg(self) -> None:
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b#commitcomment-11056394) on [9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b)\n~~~ quote\nThis is a really good change! :+1:\n~~~"
self.send_and_test_stream_message('commit_comment', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='commit_comment')
def test_create_msg(self) -> None:
expected_message = u"baxterthehacker created tag 0.0.1"
self.send_and_test_stream_message('create', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='create')
def test_delete_msg(self) -> None:
expected_message = u"baxterthehacker deleted tag simple-tag"
self.send_and_test_stream_message('delete', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='delete')
def test_deployment_msg(self) -> None:
expected_message = u"baxterthehacker created new deployment"
self.send_and_test_stream_message('deployment', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment')
def test_deployment_status_msg(self) -> None:
expected_message = u"Deployment changed status to success"
self.send_and_test_stream_message('deployment_status', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment_status')
def test_fork_msg(self) -> None:
expected_message = u"baxterandthehackers forked [public-repo](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('fork', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='fork')
def test_issue_comment_msg(self) -> None:
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/issues/2#issuecomment-99262140) on [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nYou are totally right! I'll get this fixed right away.\n~~~"
self.send_and_test_stream_message('issue_comment', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issue_comment')
def test_issue_msg(self) -> None:
expected_message = u"baxterthehacker opened [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nIt looks like you accidently spelled 'commit' with two 't's.\n~~~"
self.send_and_test_stream_message('issue', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issues')
def test_membership_msg(self) -> None:
expected_message = u"baxterthehacker added [kdaigle](https://github.com/kdaigle) to Contractors team"
self.send_and_test_stream_message('membership', self.EXPECTED_SUBJECT_ORGANIZATION_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='membership')
def test_member_msg(self) -> None:
expected_message = u"baxterthehacker added [octocat](https://github.com/octocat) to [public-repo](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('member', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='member')
def test_pull_request_opened_msg(self) -> None:
expected_message = u"baxterthehacker opened [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`\n\n~~~ quote\nThis is a pretty simple change that we need to pull into master.\n~~~"
self.send_and_test_stream_message('opened_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_synchronized_msg(self) -> None:
expected_message = u"baxterthehacker updated [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`"
self.send_and_test_stream_message('synchronized_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_closed_msg(self) -> None:
expected_message = u"baxterthehacker closed without merge [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('closed_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_merged_msg(self) -> None:
expected_message = u"baxterthehacker merged [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('merged_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_public_msg(self) -> None:
expected_message = u"baxterthehacker made [the repository](https://github.com/baxterthehacker/public-repo) public"
self.send_and_test_stream_message('public', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='public')
def test_wiki_pages_msg(self) -> None:
expected_message = u"jasonrudolph:\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)"
self.send_and_test_stream_message('wiki_pages', self.EXPECTED_SUBJECT_WIKI_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='gollum')
def test_watch_msg(self) -> None:
expected_message = u"baxterthehacker starred [the repository](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('watch_repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='watch')
def test_repository_msg(self) -> None:
expected_message = u"baxterthehacker created [the repository](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='repository')
def test_team_add_msg(self) -> None:
expected_message = u"[The repository](https://github.com/baxterandthehackers/public-repo) was added to team github"
self.send_and_test_stream_message('team_add', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='team_add')
def test_release_msg(self) -> None:
expected_message = u"baxterthehacker published [the release](https://github.com/baxterthehacker/public-repo/releases/tag/0.0.1)"
self.send_and_test_stream_message('release', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='release')
def test_page_build_msg(self) -> None:
expected_message = u"Github Pages build, trigerred by baxterthehacker, is built"
self.send_and_test_stream_message('page_build', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='page_build')
def test_status_msg(self) -> None:
expected_message = u"[9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b) changed its status to success"
self.send_and_test_stream_message('status', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='status')
def test_pull_request_review_msg(self) -> None:
expected_message = u"baxterthehacker submitted [PR Review](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884)"
self.send_and_test_stream_message('pull_request_review', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review')
def test_pull_request_review_comment_msg(self) -> None:
expected_message = u"baxterthehacker created [PR Review Comment](https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692)\n\n~~~ quote\nMaybe you should use more emojji on this line.\n~~~"
self.send_and_test_stream_message('pull_request_review_comment', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review_comment')
def test_push_tag_msg(self) -> None:
expected_message = u"baxterthehacker pushed tag abc"
self.send_and_test_stream_message('push_tag', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_pull_request_edited_msg(self) -> None:
expected_message = u"baxterthehacker edited [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`"
self.send_and_test_stream_message('edited_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message,
HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_assigned_msg(self) -> None:
expected_message = u"baxterthehacker assigned [PR](https://github.com/baxterthehacker/public-repo/pull/1) to baxterthehacker"
self.send_and_test_stream_message('assigned_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message,
HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_unassigned_msg(self) -> None:
expected_message = u"eeshangarg unassigned [PR](https://github.com/zulip-test-org/helloworld/pull/1)"
self.send_and_test_stream_message(
'unassigned_pull_request',
'helloworld / PR #1 Mention that Zulip rocks!',
expected_message,
HTTP_X_GITHUB_EVENT='pull_request'
)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_pull_request_labeled_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
payload = self.get_body('labeled_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_pull_request_unlabeled_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
payload = self.get_body('unlabeled_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_pull_request_request_review_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
payload = self.get_body('request_review_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_pull_request_request_review_remove_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
payload = self.get_body('request_review_removed_pull_request')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='pull_request', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_push_1_commit_filtered_by_branches_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_1_commit')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_push_50_commits_filtered_by_branches_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_50_commits')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_push_multiple_comitters_filtered_by_branches_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_multiple_committers')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.github_webhook.view.check_send_stream_message')
def test_push_multiple_comitters_with_others_filtered_by_branches_ignore(
self, check_send_stream_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_multiple_committers_with_others')
result = self.client_post(self.url, payload, HTTP_X_GITHUB_EVENT='push', content_type="application/json")
self.assertFalse(check_send_stream_message_mock.called)
self.assert_json_success(result)
| apache-2.0 |
melmorabity/streamlink | src/streamlink/plugins/clubbingtv.py | 4 | 2653 | import logging
import re
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class ClubbingTV(Plugin):
_login_url = "https://www.clubbingtv.com/user/login"
_url_re = re.compile(r"https://(www\.)?clubbingtv\.com/")
_live_re = re.compile(
r'playerInstance\.setup\({\s*"file"\s*:\s*"(?P<stream_url>.+?)"',
re.DOTALL,
)
_vod_re = re.compile(r'<iframe src="(?P<stream_url>.+?)"')
arguments = PluginArguments(
PluginArgument(
"username",
required=True,
requires=["password"],
help="The username used to register with Clubbing TV.",
),
PluginArgument(
"password",
required=True,
sensitive=True,
help="A Clubbing TV account password to use with --clubbingtv-username.",
),
)
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def login(self):
username = self.get_option("username")
password = self.get_option("password")
res = self.session.http.post(
self._login_url,
data={"val[login]": username, "val[password]": password},
)
if "Invalid Email/User Name" in res.text:
log.error(
"Failed to login to Clubbing TV, incorrect email/password combination"
)
return False
log.info("Successfully logged in")
return True
def _get_live_streams(self, content):
match = self._live_re.search(content)
if not match:
return
stream_url = match.group("stream_url")
for stream in HLSStream.parse_variant_playlist(
self.session, stream_url
).items():
yield stream
def _get_vod_streams(self, content):
match = self._vod_re.search(content)
if not match:
return
stream_url = match.group("stream_url")
log.info(
"Fetching external stream from URL {0}".format(stream_url)
)
return self.session.streams(stream_url)
def _get_streams(self):
if not self.login():
return
self.session.http.headers.update({"Referer": self.url})
res = self.session.http.get(self.url)
if "clubbingtv.com/live" in self.url:
log.debug("Live stream detected")
return self._get_live_streams(res.text)
log.debug("VOD stream detected")
return self._get_vod_streams(res.text)
__plugin__ = ClubbingTV
| bsd-2-clause |
michaelhowden/eden | modules/templates/default/layouts.py | 19 | 9240 | # -*- coding: utf-8 -*-
from gluon import *
from gluon.storage import Storage
from s3 import *
#from s3theme import NAV, SECTION
# Below is an example which you can base your own template's layout.py on
# - there are also other examples in the other templates folders
# - you will need to restart web2py after making changes to this file
# =============================================================================
#class S3MainMenuLayout(S3NavigationItem):
# """
# Application Main Menu Layout
#
# The layout() function takes an S3NavigationItem instance as input
# and renders it as an HTML helper class instance. If the item can
# or shall not be rendered on the page, this method must return None.
#
# S3NavigationItem instances provide a number of attributes and methods
# to support context-sensisitve rendering (see modules/s3/s3navigation.py).
#
# Note that individual items can override the layout method by explicitly
# setting the layout-property in the item's constructor.
# """
#
# @staticmethod
# def layout(item):
# """ Custom Layout Method """
#
# # Manage flags: hide any disabled/unauthorized items
# if not item.authorized:
# item.enabled = False
# item.visible = False
# elif item.enabled is None or item.enabled:
# item.enabled = True
# item.visible = True
#
# if item.enabled and item.visible:
#
# items = item.render_components()
# if item.parent is not None:
#
# classes = []
#
# if item.parent.parent is None:
# # Item at the top-level?
# toplevel = True
# if item.opts.right:
# classes.append("menu-right")
# else:
# toplevel = False
#
# if item.components:
# classes.append("has-dropdown not-click")
# _class = " ".join(classes)
# # Menu item with Dropdown
# if item.get_first(enabled=True):
# _href = item.url()
# return LI(A(item.label,
# _href=_href,
# _id=item.attr._id
# ),
# UL(items,
# _class="dropdown"
# ),
# _class=_class,
# )
# else:
# # Menu item without Drop-Down
# if toplevel:
# item_url = item.url()
# if item_url == URL(c="default", f="index"):
# classes.append("menu-home")
# if item.selected:
# classes.append("active")
# _class = " ".join(classes)
# return LI(A(item.label,
# _href=item_url,
# _id=item.attr._id,
# ),
# _class=_class,
# )
# else:
# # Submenu item
# if isinstance(item.label, dict):
# if "name" in item.label:
# label = item.label["name"]
# else:
# return None
# else:
# label = item.label
# link = A(label, _href=item.url(), _id=item.attr._id)
# return LI(link)
# else:
# # Main menu
#
# right = []
# left = []
# for item in items:
# if "menu-right" in item["_class"]:
# item.remove_class("menu-right")
# right.append(item)
# else:
# left.append(item)
# right.reverse()
# return NAV(
# UL(LI(A(" ",
# _href=URL(c="default", f="index"),
# _class="S3menulogo"
# ),
# _class="name"
# ),
# LI(A(SPAN(current.T("Menu"))),
# _class="toggle-topbar menu-icon",
# ),
# _class="title-area"),
# SECTION(UL(right,
# _class="right"),
# UL(left,
# _class="left"),
# _class="top-bar-section"),
# _class = "top-bar",
# data = {"topbar": " "},
# )
#
# else:
# return None
#
# # ---------------------------------------------------------------------
# @staticmethod
# def checkbox_item(item):
# """ Render special active items """
#
# name = item.label
# link = item.url()
# _id = name["id"]
# if "name" in name:
# _name = name["name"]
# else:
# _name = ""
# if "value" in name:
# _value = name["value"]
# else:
# _value = False
# if "request_type" in name:
# _request_type = name["request_type"]
# else:
# _request_type = "ajax"
# if link:
# if _request_type == "ajax":
# _onchange='''var val=$('#%s:checked').length;$.getS3('%s'+'?val='+val,null,false,null,false,false)''' % \
# (_id, link)
# else:
# # Just load the page. Use this if the changed menu
# # item should alter the contents of the page, and
# # it's simpler just to load it.
# _onchange="location.href='%s'" % link
# else:
# _onchange=None
# return LI(A(INPUT(_type="checkbox",
# _id=_id,
# _onchange=_onchange,
# value=_value,
# ),
# "%s" % _name,
# _nowrap="nowrap",
# ),
# _class="menu-toggle",
# )
#
# =============================================================================
#class S3OptionsMenuLayout(S3NavigationItem):
# """ Controller Options Menu Layout """
#
# @staticmethod
# def layout(item):
# """ Custom Layout Method """
#
# # Manage flags: hide any disabled/unauthorized items
# if not item.authorized:
# enabled = False
# visible = False
# elif item.enabled is None or item.enabled:
# enabled = True
# visible = True
#
# if enabled and visible:
# if item.parent is not None:
# if item.enabled and item.authorized:
#
# if item.components:
# # Submenu
# _class = ""
# if item.parent.parent is None and item.selected:
# _class = "active"
#
# section = [LI(A(item.label,
# _href=item.url(),
# _id=item.attr._id,
# ),
# _class="heading %s" % _class,
# ),
# ]
#
# items = item.render_components()
# if items:
# section.append(UL(items))
# return section
#
# else:
# # Submenu item
# if item.parent.parent is None:
# _class = "heading"
# else:
# _class = ""
#
# return LI(A(item.label,
# _href=item.url(),
# _id=item.attr._id,
# ),
# _class=_class,
# )
# else:
# # Main menu
# items = item.render_components()
# return DIV(NAV(UL(items, _id="main-sub-menu", _class="side-nav")), _class="sidebar")
#
# else:
# return None
#
# =============================================================================
#class S3MenuSeparatorLayout(S3NavigationItem):
# """ Simple menu separator """
#
# @staticmethod
# def layout(item):
# """ Custom Layout Method """
#
# if item.parent is not None:
# return LI(_class="divider hide-for-small")
# else:
# return None
#
# END =========================================================================
| mit |
beni55/edx-platform | common/djangoapps/third_party_auth/settings.py | 19 | 6670 | """Settings for the third-party auth module.
Defers configuration of settings so we can inspect the provider registry and
create settings placeholders for only those values actually needed by a given
deployment. Required by Django; consequently, this file must not invoke the
Django armature.
The flow for settings registration is:
The base settings file contains a boolean, ENABLE_THIRD_PARTY_AUTH, indicating
whether this module is enabled. Ancillary settings files (aws.py, dev.py) put
options in THIRD_PARTY_SETTINGS. startup.py probes the ENABLE_THIRD_PARTY_AUTH.
If true, it:
a) loads this module.
b) calls apply_settings(), passing in settings.THIRD_PARTY_AUTH.
THIRD_PARTY AUTH is a dict of the form
'THIRD_PARTY_AUTH': {
'<PROVIDER_NAME>': {
'<PROVIDER_SETTING_NAME>': '<PROVIDER_SETTING_VALUE>',
[...]
},
[...]
}
If you are using a dev settings file, your settings dict starts at the
level of <PROVIDER_NAME> and is a map of provider name string to
settings dict. If you are using an auth.json file, it should contain a
THIRD_PARTY_AUTH entry as above.
c) apply_settings() builds a list of <PROVIDER_NAMES>. These are the
enabled third party auth providers for the deployment. These are enabled
in provider.Registry, the canonical list of enabled providers.
d) then, it sets global, provider-independent settings.
e) then, it sets provider-specific settings. For each enabled provider, we
read its SETTINGS member. These are merged onto the Django settings
object. In most cases these are stubs and the real values are set from
THIRD_PARTY_AUTH. All values that are set from this dict must first be
initialized from SETTINGS. This allows us to validate the dict and
ensure that the values match expected configuration options on the
provider.
f) finally, the (key, value) pairs from the dict file are merged onto the
django settings object.
"""
from . import provider
_FIELDS_STORED_IN_SESSION = ['auth_entry', 'next', 'enroll_course_id', 'email_opt_in']
_MIDDLEWARE_CLASSES = (
'third_party_auth.middleware.ExceptionMiddleware',
)
_SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/dashboard'
def _merge_auth_info(django_settings, auth_info):
"""Merge auth_info dict onto django_settings module."""
enabled_provider_names = []
to_merge = []
for provider_name, provider_dict in auth_info.items():
enabled_provider_names.append(provider_name)
# Merge iff all settings have been intialized.
for key in provider_dict:
if key not in dir(django_settings):
raise ValueError('Auth setting %s not initialized' % key)
to_merge.append(provider_dict)
for passed_validation in to_merge:
for key, value in passed_validation.iteritems():
setattr(django_settings, key, value)
def _set_global_settings(django_settings):
"""Set provider-independent settings."""
# Whitelisted URL query parameters retrained in the pipeline session.
# Params not in this whitelist will be silently dropped.
django_settings.FIELDS_STORED_IN_SESSION = _FIELDS_STORED_IN_SESSION
# Register and configure python-social-auth with Django.
django_settings.INSTALLED_APPS += (
'social.apps.django_app.default',
'third_party_auth',
)
# Inject exception middleware to make redirects fire.
django_settings.MIDDLEWARE_CLASSES += _MIDDLEWARE_CLASSES
# Where to send the user if there's an error during social authentication
# and we cannot send them to a more specific URL
# (see middleware.ExceptionMiddleware).
django_settings.SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
# Where to send the user once social authentication is successful.
django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL = _SOCIAL_AUTH_LOGIN_REDIRECT_URL
# Inject our customized auth pipeline. All auth backends must work with
# this pipeline.
django_settings.SOCIAL_AUTH_PIPELINE = (
'third_party_auth.pipeline.parse_query_params',
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'third_party_auth.pipeline.associate_by_email_if_login_api',
'social.pipeline.user.get_username',
'third_party_auth.pipeline.ensure_user_information',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'third_party_auth.pipeline.set_logged_in_cookie',
'third_party_auth.pipeline.login_analytics',
'third_party_auth.pipeline.change_enrollment',
)
# We let the user specify their email address during signup.
django_settings.SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Disable exceptions by default for prod so you get redirect behavior
# instead of a Django error page. During development you may want to
# enable this when you want to get stack traces rather than redirections.
django_settings.SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# Context processors required under Django.
django_settings.SOCIAL_AUTH_UUID_LENGTH = 4
django_settings.TEMPLATE_CONTEXT_PROCESSORS += (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
def _set_provider_settings(django_settings, enabled_providers, auth_info):
"""Sets provider-specific settings."""
# Must prepend here so we get called first.
django_settings.AUTHENTICATION_BACKENDS = (
tuple(enabled_provider.get_authentication_backend() for enabled_provider in enabled_providers) +
django_settings.AUTHENTICATION_BACKENDS)
# Merge settings from provider classes, and configure all placeholders.
for enabled_provider in enabled_providers:
enabled_provider.merge_onto(django_settings)
# Merge settings from <deployment>.auth.json, overwriting placeholders.
_merge_auth_info(django_settings, auth_info)
def apply_settings(auth_info, django_settings):
"""Applies settings from auth_info dict to django_settings module."""
provider_names = auth_info.keys()
provider.Registry.configure_once(provider_names)
enabled_providers = provider.Registry.enabled()
_set_global_settings(django_settings)
_set_provider_settings(django_settings, enabled_providers, auth_info)
| agpl-3.0 |
jdmcbr/geopandas | benchmarks/sindex.py | 2 | 3488 | from shapely.geometry import Point
from geopandas import read_file, datasets, GeoSeries
# Derive list of valid query predicates based on underlying index backend;
# we have to create a non-empty instance of the index to get these
index = GeoSeries([Point(0, 0)]).sindex
predicates = sorted(p for p in index.valid_query_predicates if p is not None)
geom_types = ("mixed", "points", "polygons")
def generate_test_df():
world = read_file(datasets.get_path("naturalearth_lowres"))
capitals = read_file(datasets.get_path("naturalearth_cities"))
countries = world.to_crs("epsg:3395")[["geometry"]]
capitals = capitals.to_crs("epsg:3395")[["geometry"]]
mixed = capitals.append(countries) # get a mix of geometries
points = capitals
polygons = countries
# filter out invalid geometries
data = {
"mixed": mixed[mixed.is_valid],
"points": points[points.is_valid],
"polygons": polygons[polygons.is_valid],
}
# ensure index is pre-generated
for data_type in data.keys():
data[data_type].sindex.query(data[data_type].geometry.values.data[0])
return data
class BenchIntersection:
param_names = ["input_geom_type", "tree_geom_type"]
params = [
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
# cache bounds so that bound creation is not counted in benchmarks
self.bounds = {
data_type: [g.bounds for g in self.data[data_type].geometry]
for data_type in self.data.keys()
}
def time_intersects(self, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for bounds in self.bounds[input_geom_type]:
tree.intersection(bounds)
class BenchIndexCreation:
param_names = ["tree_geom_type"]
params = [
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_index_creation(self, tree_geom_type):
"""Time creation of spatial index.
Note: requires running a single query to ensure that
lazy-building indexes are actually built.
"""
# Note: the GeoDataFram._sindex_generated attribute will
# be removed by GH#1444 but is kept here (in the benchmarks
# so that we can compare pre GH#1444 to post GH#1444 if needed
self.data[tree_geom_type]._sindex_generated = None
self.data[tree_geom_type].geometry.values._sindex = None
tree = self.data[tree_geom_type].sindex
# also do a single query to ensure the index is actually
# generated and used
tree.query(
self.data[tree_geom_type].geometry.values.data[0]
)
class BenchQuery:
param_names = ["predicate", "input_geom_type", "tree_geom_type"]
params = [
predicates,
geom_types,
geom_types,
]
def setup(self, *args):
self.data = generate_test_df()
def time_query_bulk(self, predicate, input_geom_type, tree_geom_type):
self.data[tree_geom_type].sindex.query_bulk(
self.data[input_geom_type].geometry.values.data,
predicate=predicate,
)
def time_query(self, predicate, input_geom_type, tree_geom_type):
tree = self.data[tree_geom_type].sindex
for geom in self.data[input_geom_type].geometry.values.data:
tree.query(
geom,
predicate=predicate
)
| bsd-3-clause |
TheTypoMaster/my-vim-set-mac | .vim/bundle/YouCompleteMe/third_party/ycmd/third_party/jedi/jedi/evaluate/analysis.py | 33 | 10812 | """
Module for statical analysis.
"""
from jedi import debug
from jedi.parser import tree
from jedi.evaluate.compiled import CompiledObject
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."),
'type-error-too-many-arguments': (5, TypeError, None),
'type-error-too-few-arguments': (6, TypeError, None),
'type-error-keyword-argument': (7, TypeError, None),
'type-error-multiple-values': (8, TypeError, None),
'type-error-star-star': (9, TypeError, None),
'type-error-star': (10, TypeError, None),
'type-error-operation': (11, TypeError, None),
}
class Error(object):
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __unicode__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
return (self.path == other.path and self.name == other.name
and self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None):
from jedi.evaluate.iterable import MergedNodes
while isinstance(jedi_obj, MergedNodes):
if len(jedi_obj) != 1:
# TODO is this kosher?
return
jedi_obj = list(jedi_obj)[0]
exception = CODES[name][1]
if _check_for_exception_catch(evaluator, jedi_obj, exception, payload):
return
module_path = jedi_obj.get_parent_until().path
instance = typ(name, module_path, jedi_obj.start_pos, message)
debug.warning(str(instance))
evaluator.analysis.append(instance)
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_parent_until()
try:
stmts = module.used_names['setattr']
except KeyError:
return False
return any(instance.start_pos < stmt.start_pos < instance.end_pos
for stmt in stmts)
def add_attribute_error(evaluator, scope, name):
message = ('AttributeError: %s has no attribute %s.' % (scope, name))
from jedi.evaluate.representation import Instance
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
if isinstance(scope, Instance):
typ = Warning
try:
scope.get_subscope_by_name('__getattr__')
except KeyError:
try:
scope.get_subscope_by_name('__getattribute__')
except KeyError:
if not _check_for_setattr(scope):
typ = Error
else:
typ = Error
payload = scope, name
add(evaluator, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
try:
return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj)
except TypeError:
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
colon = next(iterator)
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos):
return False
for node in obj.except_clauses():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = evaluator.eval_element(node)
for cls in except_classes:
from jedi.evaluate import iterable
if isinstance(cls, iterable.Array) and cls.type == 'tuple':
# multiple exceptions
for c in cls.values():
if check_match(c, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos
assert node.type == 'power'
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.evaluate.param import Arguments
args = list(Arguments(evaluator, arglist).unpack())
# Arguments should be very simple
assert len(args) == 2
# Check name
key, values = args[1]
assert len(values) == 1
names = evaluator.eval_element(values[0])
assert len(names) == 1 and isinstance(names[0], CompiledObject)
assert names[0].obj == str(payload[1])
# Check objects
key, values = args[0]
assert len(values) == 1
objects = evaluator.eval_element(values[0])
return payload[0] in objects
except AssertionError:
return False
obj = jedi_obj
while obj is not None and not obj.isinstance(tree.Function, tree.Class):
if obj.isinstance(tree.Flow):
# try/except catch check
if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False
def get_module_statements(module):
"""
Returns the statements used in a module. All these statements should be
evaluated to check for potential exceptions.
"""
def check_children(node):
try:
children = node.children
except AttributeError:
return []
else:
nodes = []
for child in children:
nodes += check_children(child)
if child.type == 'trailer':
c = child.children
if c[0] == '(' and c[1] != ')':
if c[1].type != 'arglist':
if c[1].type == 'argument':
nodes.append(c[1].children[-1])
else:
nodes.append(c[1])
else:
for argument in c[1].children:
if argument.type == 'argument':
nodes.append(argument.children[-1])
elif argument.type != 'operator':
nodes.append(argument)
return nodes
def add_nodes(nodes):
new = set()
for node in nodes:
if isinstance(node, tree.Flow):
children = node.children
if node.type == 'for_stmt':
children = children[2:] # Don't want to include the names.
# Pick the suite/simple_stmt.
new |= add_nodes(children)
elif node.type in ('simple_stmt', 'suite'):
new |= add_nodes(node.children)
elif node.type in ('return_stmt', 'yield_expr'):
try:
new.add(node.children[1])
except IndexError:
pass
elif node.type not in ('whitespace', 'operator', 'keyword',
'parameters', 'decorated', 'except_clause') \
and not isinstance(node, (tree.ClassOrFunc, tree.Import)):
new.add(node)
try:
children = node.children
except AttributeError:
pass
else:
for next_node in children:
new.update(check_children(node))
if next_node.type != 'keyword' and node.type != 'expr_stmt':
new.add(node)
return new
nodes = set()
import_names = set()
decorated_funcs = []
for scope in module.walk():
for imp in set(scope.imports):
import_names |= set(imp.get_defined_names())
if imp.is_nested():
import_names |= set(path[-1] for path in imp.paths())
children = scope.children
if isinstance(scope, tree.ClassOrFunc):
children = children[2:] # We don't want to include the class name.
nodes |= add_nodes(children)
for flow in scope.flows:
if flow.type == 'for_stmt':
nodes.add(flow.children[3])
elif flow.type == 'try_stmt':
nodes.update(e for e in flow.except_clauses() if e is not None)
try:
decorators = scope.get_decorators()
except AttributeError:
pass
else:
if decorators:
decorated_funcs.append(scope)
return nodes, import_names, decorated_funcs
| gpl-2.0 |
SebDieBln/QGIS | python/ext-libs/jinja2/ext.py | 34 | 25077 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import next, with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| gpl-2.0 |
Kiiv/CouchPotatoServer | libs/git/files.py | 122 | 1831 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ModifiedFile(object):
def __init__(self, filename):
super(ModifiedFile, self).__init__()
self.filename = filename
def __repr__(self):
return self.filename
def __eq__(self, other):
return isinstance(other, ModifiedFile) and other.filename == self.filename
| gpl-3.0 |
GehenHe/Recognize-Face-on-Android | tensorflow/python/summary/writer/writer_cache.py | 49 | 1780 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A cache for FileWriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import ops
from tensorflow.python.summary.writer.writer import FileWriter
class FileWriterCache(object):
"""Cache for file writers.
This class caches file writers, one per directory.
"""
# Cache, keyed by directory.
_cache = {}
# Lock protecting _FILE_WRITERS.
_lock = threading.RLock()
@staticmethod
def clear():
"""Clear cached summary writers. Currently only used for unit tests."""
with FileWriterCache._lock:
FileWriterCache._cache = {}
@staticmethod
def get(logdir):
"""Returns the FileWriter for the specified directory.
Args:
logdir: str, name of the directory.
Returns:
A `FileWriter`.
"""
with FileWriterCache._lock:
if logdir not in FileWriterCache._cache:
FileWriterCache._cache[logdir] = FileWriter(
logdir, graph=ops.get_default_graph())
return FileWriterCache._cache[logdir]
| apache-2.0 |
abhisg/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
pigeonflight/strider-plone | docker/appengine/lib/django-1.3/tests/regressiontests/conditional_processing/models.py | 47 | 6873 | # -*- coding:utf-8 -*-
from datetime import datetime
from django.test import TestCase
from django.utils import unittest
from django.utils.http import parse_etags, quote_etag, parse_http_date
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT'
LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = 'b4246ffc4f62314ca13147c9d4f76974'
EXPIRED_ETAG = '7fae4cd4b0f81e7d2914700043aa8ed6'
class ConditionalGet(TestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE)
if check_last_modified:
self.assertEqual(response['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response['ETag'], '"%s"' % ETAG)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, '')
def testWithoutConditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfModifiedSince(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfNoneMatch(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s", "%s"' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def testIfMatch(self):
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 200)
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 412)
def testBothHeaders(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testSingleCondition1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def testInvalidETag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = r'"\"'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
class ETagProcessing(unittest.TestCase):
def testParsing(self):
etags = parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = quote_etag(r'e\t"ag')
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
class HttpDateProcessing(unittest.TestCase):
def testParsingRfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingRfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingAsctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
| mit |
atztogo/spglib | python/test/test_collinear_spin.py | 1 | 1335 | import unittest
import numpy as np
from spglib import get_symmetry
class TestGetSymmetry(unittest.TestCase):
def setUp(self):
lattice = [[4, 0, 0], [0, 4, 0], [0, 0, 4]]
positions = [[0, 0, 0], [0.5, 0.5, 0.5]]
numbers = [1, 1]
magmoms = [0, 0]
self._cell = (lattice, positions, numbers, magmoms)
def tearDown(self):
pass
def test_get_symmetry_ferro(self):
self._cell[3][0] = 1
self._cell[3][1] = 1
sym = get_symmetry(self._cell)
self.assertEqual(96, len(sym['rotations']))
np.testing.assert_equal(sym['equivalent_atoms'], [0, 0])
def test_get_symmetry_anti_ferro(self):
self._cell[3][0] = 1
self._cell[3][1] = -1
sym = get_symmetry(self._cell)
self.assertEqual(96, len(sym['rotations']))
np.testing.assert_equal(sym['equivalent_atoms'], [0, 0])
def test_get_symmetry_broken_magmoms(self):
self._cell[3][0] = 1
self._cell[3][1] = 2
sym = get_symmetry(self._cell)
self.assertEqual(48, len(sym['rotations']))
np.testing.assert_equal(sym['equivalent_atoms'], [0, 1])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGetSymmetry)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
| bsd-3-clause |
aalien/subtitle2spu | parsesrt.py | 1 | 1661 | # Copyright (C) 2008 Antti Laine <antti.a.laine@tut.fi>
#
# This file is part of subtitle2spu.
#
# subtitle2spu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# subtitle2spu is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with subtitle2spu. If not, see <http://www.gnu.org/licenses/>.
import sys
# States
READNUMBER = 1
READTIME = 2
READTEXT = 3
def parse( file, writer ):
state = READNUMBER
linecount = 0
lines = ""
for buf in file:
if not buf:
continue
if state == READNUMBER:
number = buf.split()[0]
state = READTIME
continue
if state == READTIME:
starttime = buf.split()[0]
endtime = buf.split()[2]
state = READTEXT
continue
if state == READTEXT:
if buf[0] not in ("\n", "\r"):
linecount += 1
lines += buf
else:
print "Writing subtitle %s" %(number)
if not writer.write( number, starttime, endtime, lines ):
return False
state = READNUMBER
linecount = 0
lines = ""
return True
| mit |
soscpd/bee | root/tests/zguide/examples/Python/mdcliapi.py | 1 | 3030 | """Majordomo Protocol Client API, Python version.
Implements the MDP/Worker spec at http:#rfc.zeromq.org/spec:7.
Author: Min RK <benjaminrk@gmail.com>
Based on Java example by Arkadiusz Orzechowski
"""
import logging
import zmq
import MDP
from zhelpers import dump
class MajorDomoClient(object):
"""Majordomo Protocol Client API, Python version.
Implements the MDP/Worker spec at http:#rfc.zeromq.org/spec:7.
"""
broker = None
ctx = None
client = None
poller = None
timeout = 2500
retries = 3
verbose = False
def __init__(self, broker, verbose=False):
self.broker = broker
self.verbose = verbose
self.ctx = zmq.Context()
self.poller = zmq.Poller()
logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
self.reconnect_to_broker()
def reconnect_to_broker(self):
"""Connect or reconnect to broker"""
if self.client:
self.poller.unregister(self.client)
self.client.close()
self.client = self.ctx.socket(zmq.REQ)
self.client.linger = 0
self.client.connect(self.broker)
self.poller.register(self.client, zmq.POLLIN)
if self.verbose:
logging.info("I: connecting to broker at %s...", self.broker)
def send(self, service, request):
"""Send request to broker and get reply by hook or crook.
Takes ownership of request message and destroys it when sent.
Returns the reply message or None if there was no reply.
"""
if not isinstance(request, list):
request = [request]
request = [MDP.C_CLIENT, service] + request
if self.verbose:
logging.warn("I: send request to '%s' service: ", service)
dump(request)
reply = None
retries = self.retries
while retries > 0:
self.client.send_multipart(request)
try:
items = self.poller.poll(self.timeout)
except KeyboardInterrupt:
break # interrupted
if items:
msg = self.client.recv_multipart()
if self.verbose:
logging.info("I: received reply:")
dump(msg)
# Don't try to handle errors, just assert noisily
assert len(msg) >= 3
header = msg.pop(0)
assert MDP.C_CLIENT == header
reply_service = msg.pop(0)
assert service == reply_service
reply = msg
break
else:
if retries:
logging.warn("W: no reply, reconnecting...")
self.reconnect_to_broker()
else:
logging.warn("W: permanent error, abandoning")
break
retries -= 1
return reply
def destroy(self):
self.context.destroy()
| mit |
Cerberus98/quark | quark/tests/functional/mysql/test_ipam.py | 3 | 6645 | import contextlib
import datetime
import mock
import netaddr
from neutron.common import exceptions
from neutron.common import rpc
from oslo_config import cfg
from oslo_utils import timeutils
from quark.db import api as db_api
import quark.ipam
from quark.tests.functional.mysql.base import MySqlBaseFunctionalTest
class QuarkIpamBaseFunctionalTest(MySqlBaseFunctionalTest):
def setUp(self):
super(QuarkIpamBaseFunctionalTest, self).setUp()
patcher = mock.patch("neutron.common.rpc.oslo_messaging")
patcher.start()
self.addCleanup(patcher.stop)
rpc.init(mock.MagicMock())
class QuarkIPAddressReallocate(QuarkIpamBaseFunctionalTest):
@contextlib.contextmanager
def _stubs(self, network, subnet, address, lock=False):
self.ipam = quark.ipam.QuarkIpamANY()
with self.context.session.begin():
next_ip = subnet.pop("next_auto_assign_ip", 0)
net_mod = db_api.network_create(self.context, **network)
subnet["network"] = net_mod
sub_mod = db_api.subnet_create(self.context, **subnet)
address["network_id"] = net_mod["id"]
address["subnet_id"] = sub_mod["id"]
ip = db_api.ip_address_create(self.context, **address)
address.pop("address")
ip = db_api.ip_address_update(self.context, ip, **address)
# NOTE(asadoughi): update after cidr constructor has been invoked
db_api.subnet_update(self.context,
sub_mod,
next_auto_assign_ip=next_ip)
if lock:
db_api.lock_holder_create(self.context, ip,
name="testlock", type="ip_address")
yield net_mod
def test_allocate_finds_ip_reallocates(self):
network = dict(name="public", tenant_id="fake")
ipnet = netaddr.IPNetwork("0.0.0.0/24")
next_ip = ipnet.ipv6().first + 10
subnet = dict(cidr="0.0.0.0/24", next_auto_assign_ip=next_ip,
ip_policy=None, tenant_id="fake", do_not_use=False)
addr = netaddr.IPAddress("0.0.0.2")
after_reuse_after = cfg.CONF.QUARK.ipam_reuse_after + 1
reusable_after = datetime.timedelta(seconds=after_reuse_after)
deallocated_at = timeutils.utcnow() - reusable_after
ip_address = dict(address=addr, version=4, _deallocated=True,
deallocated_at=deallocated_at)
with self._stubs(network, subnet, ip_address) as net:
ipaddress = []
self.ipam.allocate_ip_address(self.context, ipaddress,
net["id"], 0, 0)
self.assertIsNotNone(ipaddress[0]['id'])
expected = netaddr.IPAddress("0.0.0.2").ipv6().value
self.assertEqual(ipaddress[0]['address'], expected)
self.assertEqual(ipaddress[0]['version'], 4)
self.assertEqual(ipaddress[0]['used_by_tenant_id'], "fake")
def test_allocate_finds_ip_in_do_not_use_subnet_raises(self):
network = dict(name="public", tenant_id="fake")
ipnet = netaddr.IPNetwork("0.0.0.0/24")
next_ip = ipnet.ipv6().first + 3
subnet = dict(cidr="0.0.0.0/24", next_auto_assign_ip=next_ip,
ip_policy=None, tenant_id="fake", do_not_use=True)
addr = netaddr.IPAddress("0.0.0.2")
after_reuse_after = cfg.CONF.QUARK.ipam_reuse_after + 1
reusable_after = datetime.timedelta(seconds=after_reuse_after)
deallocated_at = timeutils.utcnow() - reusable_after
ip_address = dict(address=addr, version=4, _deallocated=True,
deallocated_at=deallocated_at)
with self._stubs(network, subnet, ip_address) as net:
with self.assertRaises(exceptions.IpAddressGenerationFailure):
self.ipam.allocate_ip_address(self.context, [], net["id"],
0, 0)
def test_allocate_finds_ip_locked_allocates_next_ip(self):
network = dict(name="public", tenant_id="fake")
ipnet = netaddr.IPNetwork("0.0.0.0/24")
next_ip = ipnet.ipv6().first + 10
subnet = dict(cidr="0.0.0.0/24", next_auto_assign_ip=next_ip,
ip_policy=None, tenant_id="fake", do_not_use=False)
addr = netaddr.IPAddress("0.0.0.2")
after_reuse_after = cfg.CONF.QUARK.ipam_reuse_after + 1
reusable_after = datetime.timedelta(seconds=after_reuse_after)
deallocated_at = timeutils.utcnow() - reusable_after
ip_address = dict(address=addr, version=4, _deallocated=True,
deallocated_at=deallocated_at)
with self._stubs(network, subnet, ip_address, lock=True) as net:
ipaddress = []
self.ipam.allocate_ip_address(self.context, ipaddress,
net["id"], 0, 0)
self.assertIsNotNone(ipaddress[0]['id'])
self.assertEqual(ipaddress[0]['address'], next_ip)
self.assertEqual(ipaddress[0]['version'], 4)
self.assertEqual(ipaddress[0]['used_by_tenant_id'], "fake")
class MacAddressReallocate(QuarkIpamBaseFunctionalTest):
@contextlib.contextmanager
def _stubs(self, do_not_use):
self.ipam = quark.ipam.QuarkIpamANY()
mar = db_api.mac_address_range_create(
self.context,
cidr="00:00:00:00:00:00/40",
first_address=0, last_address=255,
next_auto_assign_mac=6,
do_not_use=do_not_use)
mac = db_api.mac_address_create(
self.context,
address=1,
mac_address_range=mar)
db_api.mac_address_update(
self.context, mac,
deallocated=True,
deallocated_at=datetime.datetime(1970, 1, 1))
self.context.session.flush()
yield mar
def test_reallocate_mac(self):
with self._stubs(do_not_use=False):
realloc_mac = self.ipam.allocate_mac_address(self.context, 0, 0, 0)
self.assertEqual(realloc_mac["address"], 1)
def test_delete_mac_with_mac_range_do_not_use(self):
macs = lambda mar: db_api.mac_address_find(
self.context,
mac_address_range_id=mar["id"],
scope=db_api.ALL)
with self._stubs(do_not_use=True) as mar:
self.assertEqual(len(macs(mar)), 1)
with self.assertRaises(exceptions.MacAddressGenerationFailure):
self.ipam.allocate_mac_address(self.context, 0, 0, 0)
self.assertEqual(len(macs(mar)), 0)
| apache-2.0 |
epssy/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/test_openidyadis.py | 87 | 4828 | import unittest
from openid.consumer.discover import \
OpenIDServiceEndpoint, OPENID_1_1_TYPE, OPENID_1_0_TYPE
from openid.yadis.services import applyFilter
XRDS_BOILERPLATE = '''\
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS xmlns:xrds="xri://$xrds"
xmlns="xri://$xrd*($v*2.0)"
xmlns:openid="http://openid.net/xmlns/1.0">
<XRD>
%s\
</XRD>
</xrds:XRDS>
'''
def mkXRDS(services):
return XRDS_BOILERPLATE % (services,)
def mkService(uris=None, type_uris=None, local_id=None, dent=' '):
chunks = [dent, '<Service>\n']
dent2 = dent + ' '
if type_uris:
for type_uri in type_uris:
chunks.extend([dent2 + '<Type>', type_uri, '</Type>\n'])
if uris:
for uri in uris:
if type(uri) is tuple:
uri, prio = uri
else:
prio = None
chunks.extend([dent2, '<URI'])
if prio is not None:
chunks.extend([' priority="', str(prio), '"'])
chunks.extend(['>', uri, '</URI>\n'])
if local_id:
chunks.extend(
[dent2, '<openid:Delegate>', local_id, '</openid:Delegate>\n'])
chunks.extend([dent, '</Service>\n'])
return ''.join(chunks)
# Different sets of server URLs for use in the URI tag
server_url_options = [
[], # This case should not generate an endpoint object
['http://server.url/'],
['https://server.url/'],
['https://server.url/', 'http://server.url/'],
['https://server.url/',
'http://server.url/',
'http://example.server.url/'],
]
# Used for generating test data
def subsets(l):
"""Generate all non-empty sublists of a list"""
subsets_list = [[]]
for x in l:
subsets_list += [[x] + t for t in subsets_list]
return subsets_list
# A couple of example extension type URIs. These are not at all
# official, but are just here for testing.
ext_types = [
'http://janrain.com/extension/blah',
'http://openid.net/sreg/1.0',
]
# All valid combinations of Type tags that should produce an OpenID endpoint
type_uri_options = [
exts + ts
# All non-empty sublists of the valid OpenID type URIs
for ts in subsets([OPENID_1_0_TYPE, OPENID_1_1_TYPE])
if ts
# All combinations of extension types (including empty extenstion list)
for exts in subsets(ext_types)
]
# Range of valid Delegate tag values for generating test data
local_id_options = [
None,
'http://vanity.domain/',
'https://somewhere/yadis/',
]
# All combinations of valid URIs, Type URIs and Delegate tags
data = [
(uris, type_uris, local_id)
for uris in server_url_options
for type_uris in type_uri_options
for local_id in local_id_options
]
class OpenIDYadisTest(unittest.TestCase):
def __init__(self, uris, type_uris, local_id):
unittest.TestCase.__init__(self)
self.uris = uris
self.type_uris = type_uris
self.local_id = local_id
def shortDescription(self):
# XXX:
return 'Successful OpenID Yadis parsing case'
def setUp(self):
self.yadis_url = 'http://unit.test/'
# Create an XRDS document to parse
services = mkService(uris=self.uris,
type_uris=self.type_uris,
local_id=self.local_id)
self.xrds = mkXRDS(services)
def runTest(self):
# Parse into endpoint objects that we will check
endpoints = applyFilter(
self.yadis_url, self.xrds, OpenIDServiceEndpoint)
# make sure there are the same number of endpoints as
# URIs. This assumes that the type_uris contains at least one
# OpenID type.
self.failUnlessEqual(len(self.uris), len(endpoints))
# So that we can check equality on the endpoint types
type_uris = list(self.type_uris)
type_uris.sort()
seen_uris = []
for endpoint in endpoints:
seen_uris.append(endpoint.server_url)
# All endpoints will have same yadis_url
self.failUnlessEqual(self.yadis_url, endpoint.claimed_id)
# and local_id
self.failUnlessEqual(self.local_id, endpoint.local_id)
# and types
actual_types = list(endpoint.type_uris)
actual_types.sort()
self.failUnlessEqual(actual_types, type_uris)
# So that they will compare equal, because we don't care what
# order they are in
seen_uris.sort()
uris = list(self.uris)
uris.sort()
# Make sure we saw all URIs, and saw each one once
self.failUnlessEqual(uris, seen_uris)
def pyUnitTests():
cases = []
for args in data:
cases.append(OpenIDYadisTest(*args))
return unittest.TestSuite(cases)
| apache-2.0 |
Kast0rTr0y/ansible | lib/ansible/modules/system/debconf.py | 25 | 5870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
value = dict(required=False, type='str', aliases=['answer']),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
CuonDeveloper/cuon | cuon_client/cuon/bin/cuon/Databases/SingleCuon.py | 3 | 2083 | # -*- coding: utf-8 -*-
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
import os
#sys.path.append(os.environ['CUON_PATH'])
from cuon.Databases.SingleData import SingleData
import logging
import threading
class SingleCuon(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "cuon_clients"
self.xmlTableDef = 0
self.loadTable(allTables)
# self.saveTable()
#self.athread = threading.Thread(target = self.loadTable())
#self.athread.start()
self.listHeader['names'] = ['name', 'zip', 'city', 'Street', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
self.out( "number of Columns ")
self.out( len(self.table.Columns))
#
self.statusfields = ['lastname', 'city']
def saveNewVersion(self, id):
self.dicUser['Name'] = 'zope'
self.sqlDicUser['Name'] = 'zope'
self.load(id)
result = self.getFirstRecord()
print len(result)
#rint `result`
data = result['clientdata']
print 'len data = ', `len(data)`
cuonpath = '..'
f = file(cuonpath + '/newclient','wb')
data = self.doDecode(data)
f.write(data)
f.close()
| gpl-3.0 |
ENjOyAbLE1991/scrapy | scrapy/commands/shell.py | 107 | 2194 | """
Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from threading import Thread
from scrapy.commands import ScrapyCommand
from scrapy.shell import Shell
from scrapy.http import Request
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
class Command(ScrapyCommand):
requires_project = False
default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0}
def syntax(self):
return "[url|file]"
def short_desc(self):
return "Interactive scraping console"
def long_desc(self):
return "Interactive console for scraping the given url"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", dest="code",
help="evaluate the code in the shell, print the result and exit")
parser.add_option("--spider", dest="spider",
help="use this spider")
def update_vars(self, vars):
"""You can use this function to update the Scrapy objects that will be
available in the shell
"""
pass
def run(self, args, opts):
url = args[0] if args else None
spider_loader = self.crawler_process.spider_loader
spidercls = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(spider_loader, Request(url),
spidercls, log_multiple=True)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
crawler.engine.start()
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url)
def _start_crawler_thread(self):
t = Thread(target=self.crawler_process.start,
kwargs={'stop_after_crawl': False})
t.daemon = True
t.start()
| bsd-3-clause |
kamcpp/tensorflow | tensorflow/python/debug/debug_data.py | 6 | 33994 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures and helpers for TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import tensor_util
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a Event protobuf and the Event protobuf
contains a tensor.
Args:
event_file_path: Path to the event file.
Returns:
The tensor value loaded from the event file. For uninitialized tensors,
return None.
"""
event = event_pb2.Event()
with open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
if (event.summary.value[0].tensor.tensor_content or
event.summary.value[0].tensor.string_val):
# Initialized tensor.
tensor_value = tensor_util.MakeNdarray(event.summary.value[0].tensor)
else:
# Uninitialized tensor.
tensor_value = None
return tensor_value
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def _is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def _is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def _parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include nans and infs.
The signature of this function follows the requiremnet of DebugDumpDir's
find() method.
Args:
datum: (DebugTensorDatum) Datum metadata.
tensor: (numpy.ndarray or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(bool) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicte.
if tensor is None:
# Uninitialized tensor doesn't have bad numerical values.
return False
else:
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
class DebugTensorDatum(object):
"""A single tensor dumped by tfdbg.
Contains "metadata" for the dumped tensor, including node name, output slot,
debug op and timestamp.
This type does not contain the space-expensive tensor (numpy array) itself.
It just points to the file path from which the tensor can be loaded if
needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""DebugTensorDatum constructor.
Args:
dump_root: Debug dump root directory.
debug_dump_rel_path: Path to a debug dump file, relative to the debug
dump root directory. For example, suppose the debug dump root
directory is "/tmp/tfdbg_1" and the dump file is at
"/tmp/tfdbg_1/ns_1/node_a_0_DebugIdentity_123456789", then
the value of the debug_dump_rel_path should be
"ns_1/node_a_0_DebugIdenity_1234456789".
"""
base = os.path.basename(debug_dump_rel_path)
# TODO(cais): Add hostname and pid to support dumps from distributed
# sessions.
self._timestamp = int(base.split("_")[-1])
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
namespace = os.path.dirname(debug_dump_rel_path)
node_base_name = "_".join(base.split("_")[:-3])
if not namespace or namespace == ".":
self._node_name = node_base_name
else:
self._node_name = namespace + "/" + node_base_name
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
def __str__(self):
return "{DebugTensorDatum: %s:%d @ %s @ %d}" % (self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (Event) file.
Returns:
The tensor loaded from the dump (Event) file.
"""
return load_tensor_from_event_file(self.file_path)
@property
def timestamp(self):
return self._timestamp
@property
def debug_op(self):
return self._debug_op
@property
def node_name(self):
return self._node_name
@property
def output_slot(self):
return self._output_slot
@property
def tensor_name(self):
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
A watch key, in the form of <tensor_name>:<debug_op>.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
return self._file_path
class DebugDumpDir(object):
"""Data set from a debug dump directory on filesystem.
An instance of DebugDumpDir contains all DebugTensorDatum in a tfdbg dump
root directory. This is an immutable object, of which all constitute tensor
dump files and partition_graphs are loaded during the __init__ call.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""DebugDumpDir constructor.
Args:
dump_root: Path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: Whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
ValueError: If the dump_root directory contains file path patterns
that do not conform to the canonical dump file naming pattern.
"""
if not os.path.isdir(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._dump_root = dump_root
self._dump_tensor_data = []
for root, _, files in os.walk(self._dump_root):
for f in files:
if f.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % f)
debug_dump_rel_path = os.path.join(
os.path.relpath(root, self._dump_root), f)
self._dump_tensor_data.append(
DebugTensorDatum(self._dump_root, debug_dump_rel_path))
# Sort the data by ascending timestamp.
# This sorting order reflects the order in which the TensorFlow
# executor processed the nodes of the graph. It is (one of many
# possible) topological sort of the nodes. This is useful for
# displaying tensors in the debugger frontend as well as for the use
# case in which the user wants to find a "culprit tensor", i.e., the
# first tensor in the graph that exhibits certain problematic
# properties, i.e., all zero values, or bad numerical values such as
# nan and inf.
self._dump_tensor_data = sorted(
self._dump_tensor_data, key=lambda x: x.timestamp)
# Time stamp of the first tensor dump.
if self._dump_tensor_data:
self._t0 = self._dump_tensor_data[0].timestamp
else:
self._t0 = None
# Create a map from watch key (tensor name + debug op) to
# DebugTensorDatum item.
# Also make a map from watch key to relative timestamp.
# "relative" means (absolute timestamp - t0).
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
for datum in self._dump_tensor_data:
if datum.watch_key not in self._watch_key_to_datum:
self._watch_key_to_datum[datum.watch_key] = [datum]
self._watch_key_to_rel_time[datum.watch_key] = [
datum.timestamp - self._t0
]
else:
self._watch_key_to_datum[datum.watch_key].append(datum)
self._watch_key_to_rel_time[datum.watch_key].append(datum.timestamp -
self._t0)
# Initialize partition graph-related information.
self._partition_graphs = None
self._node_inputs = None
self._node_ctrl_inputs = None
self._node_recipients = None
self._node_ctrl_recipients = None
self._devices = None
self._node_devices = None
self._node_op_types = None
self._debug_watches = None
# Check the dump data against partition executor graphs.
if partition_graphs:
self._load_partition_graphs(partition_graphs)
if (partition_graphs is not None) and validate:
self._validate_dump_with_graphs()
@property
def dumped_tensor_data(self):
return self._dump_tensor_data
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor.
Returns:
Absolute timestamp of the first dumped tensor.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
Total number of dumped tensors in the dump root directory.
"""
return len(self._dump_tensor_data)
def _load_partition_graphs(self, partition_graphs):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
partition_graphs: Partition graphs executed by the TensorFlow runtime,
represented as repeated fields of GraphDef.
Raises:
ValueError: If duplicate node names are encountered.
"""
self._partition_graphs = partition_graphs
# A map from node name to node attributes.
self._node_attributes = {}
# A map from node name to the node's non-control inputs, for non-debug &
# non-copy nodes only.
self._node_inputs = {}
# A map from node name to the node's control inputs.
self._node_ctrl_inputs = {}
# A map from node name to non-control recipients of the node's output(s).
self._node_recipients = {}
# A map from node name to control recipients of the node.
self._node_ctrl_recipients = {}
# A map from node name to debug watches.
# The key is the watched node name.
# The value is a dictionary.
# Of this dictionary, the key is the watched_output_slot.
# The value is a list of debug ops watching this output slot.
self._debug_watches = {}
# A map from node name to devices (as indices to self._devices)
self._devices = []
self._node_devices = {}
# A map from node name to node type.
self._node_op_types = {}
# A list of _Send that send Copy node outputs across devices.
copy_send_nodes = []
for pg in self._partition_graphs:
for node in pg.node:
if _is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
(watched_node_name, watched_output_slot, _,
debug_op) = _parse_debug_node_name(node.name)
if watched_node_name not in self._debug_watches:
self._debug_watches[
watched_node_name] = {watched_output_slot: [debug_op]}
else:
if watched_output_slot not in self._debug_watches[
watched_node_name]:
self._debug_watches[watched_node_name][
watched_output_slot] = [debug_op]
else:
self._debug_watches[watched_node_name][watched_node_name].append(
debug_op)
continue
if node.name in self._node_inputs:
raise ValueError("Duplicate node name: '%s'" % node.name)
# Collect node attributes.
self._node_attributes[node.name] = node.attr
# Keep track of devices.
if node.device not in self._devices and node.device:
self._devices.append(node.device)
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
self._node_devices[node.name] = node.device
self._node_op_types[node.name] = node.op
for inp in node.input:
if _is_copy_node(inp) and node.op == "_Send":
copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
# Prune the Copy ops and associated _Send ops inserted by the debugger out
# from the non-control inputs and output recipients map. Replace the inputs
# and recipients with original ones.
copy_nodes = []
for node in self._node_inputs:
if node in copy_send_nodes:
continue
if _is_copy_node(node):
copy_nodes.append(node)
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if _is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
# Remove the Copy ops inserted by the debugger from the maps.
for copy_node in copy_nodes:
del self._node_inputs[copy_node]
del self._node_ctrl_inputs[copy_node]
del self._node_recipients[copy_node]
del self._node_ctrl_recipients[copy_node]
# Remove the _Send ops associated with the Copy ops.
for copy_send_node in copy_send_nodes:
del self._node_inputs[copy_send_node]
del self._node_ctrl_inputs[copy_send_node]
del self._node_recipients[copy_send_node]
del self._node_ctrl_recipients[copy_send_node]
# Prune the edges from debug ops from the control edge map.
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if _is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
# Create the recipients maps.
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
# A tensor name: replace it with the node name.
if inp.count(":") == 1:
inp = inp.split(":")[0]
self._node_recipients[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in copy_send_nodes:
# Skip _Send ops associated with Copy nodes.
continue
self._node_ctrl_recipients[ctrl_inp].append(node)
def _validate_dump_with_graphs(self):
"""Validate the dumped tensor data against the partition graphs.
Raises:
RuntimeError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._partition_graphs:
raise RuntimeError("No partition graphs loaded.")
# Verify that the node names in the dump data are all present in the
# partittion graphs.
for datum in self._dump_tensor_data:
if datum.node_name not in self._node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs." %
datum.node_name)
pending_inputs = {}
for node in self._node_inputs:
pending_inputs[node] = []
# TODO(cais): tfdbg currently does not watch control edges. Add control
# edges to pending_inputs when it does.
inputs = self._node_inputs[node]
for inp in inputs:
if inp.count(":") == 1:
inp = inp.split(":")[0]
# Keep track of only the watched nodes, as the debugger allows clients
# to watch a subset of the nodes.
if inp in self._debug_watches:
pending_inputs[node].append(inp)
for datum in self._dump_tensor_data:
node = datum.node_name
if pending_inputs[node]:
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
# Get the recipients of the node's output
recipients = self._node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if node in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[recipient_pending_inputs.index(node)]
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as repeated fields of GraphDef.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return self._partition_graphs
def nodes(self):
"""Get a list of all nodes from the partition graphs.
Returns:
All nodes' names, as a list of str.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return [node_name for node_name in self._node_inputs]
def node_attributes(self, node_name):
"""Get attributes of a node.
Args:
node_name: Name of the node in question.
Returns:
Attributes of the node.
Raises:
RuntimeError: If no partition graphs have been loaded.
ValueError: If no node named node_name exists.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
if node_name in self._node_attributes:
return self._node_attributes[node_name]
else:
raise ValueError("No node named \"%s\" exists." % node_name)
def node_inputs(self, node_name, is_control=False):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control inputs, rather than non-control inputs, are
to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_inputs is None or self._node_ctrl_inputs is None:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_inputs[node_name]
else:
return self._node_inputs[node_name]
def transitive_inputs(self, node_name, include_control=True):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node
include_control: Include control inputs (True by default).
Returns:
All transitive inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if not self._node_inputs or not self._node_ctrl_inputs:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
inputs = []
# Keep track of visited nodes to avoid infinite loops during input
# tracing.
visited_nodes = []
def trace_inputs(node):
"""Inner function for recursive tracing of node inputs.
The transitive input names are appended to the list captured list
"inputs".
Args:
node: Name of the node, as a str.
"""
if node.count(":") == 1:
# This check is necessary for cases in which an input is not from the
# 0-th output slot, e.g., from a Switch op.
node = node[:node.rindex(":")]
# Stop the tracing at a Merge op, as it is generally impossible to infer
# outside the runtime which input to the Merge op is alive.
if self._node_op_types[node] == "Merge":
return
if node in visited_nodes:
# Avoid infinite loops.
return
visited_nodes.append(node)
for inp in self._node_inputs[node]:
if inp == node_name:
continue
inputs.append(inp)
trace_inputs(inp) # Recursive call.
if include_control:
for ctrl_inp in self._node_ctrl_inputs[node]:
if ctrl_inp == node_name:
continue
inputs.append(ctrl_inp)
trace_inputs(ctrl_inp) # Recursive call.
trace_inputs(node_name)
return inputs
def node_recipients(self, node_name, is_control=False):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control outputs, rather than non-control outputs,
are to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_recipients is None or self._node_ctrl_recipients is None:
raise RuntimeError(
"Node recipients are not loaded from partition graphs yet.")
if node_name not in self._node_recipients:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_recipients[node_name]
else:
return self._node_recipients[node_name]
def devices(self):
"""Get the list of devices.
Returns:
Number of devices.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if self._devices is None:
raise RuntimeError("Devices are not loaded from partition graphs yet.")
return self._devices
def node_exists(self, node_name):
"""Test if a node exists in the partition graphs.
Args:
node_name: Name of the node to be checked, as a str.
Returns:
A boolean indicating whether the node exists.
Raises:
RuntimeError: If no partition graphs have been loaded yet.
"""
if self._node_inputs is None:
raise RuntimeError(
"Nodes have not been loaded from partition graphs yet.")
return node_name in self._node_inputs
def node_device(self, node_name):
"""Get the device of a node.
Args:
node_name: Name of the node.
Returns:
Name of the device on which the node is placed, as a str.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_devices is None:
raise RuntimeError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_devices[node_name]
def node_op_type(self, node_name):
"""Get the op type of given node.
Args:
node_name: Name of the node.
Returns:
Type of the node's op, as a str.
Raises:
RuntimeError: If node op types have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_op_types is None:
raise RuntimeError(
"Node op types are not loaded from partition graphs yet.")
if node_name not in self._node_op_types:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_op_types[node_name]
def debug_watch_keys(self, node_name):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: Name of the node.
Returns:
All debug tensor watch keys, as a list of strings. Returns an empty list
if the node name does not correspond to any debug watch keys.
Raises:
RuntimeError: If debug watch information has not been loaded from
partition graphs yet.
"""
if node_name not in self._debug_watches:
return []
watch_keys = []
for watched_slot in self._debug_watches[node_name]:
debug_ops = self._debug_watches[node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key):
"""Get all DebugTensorDatum instances corresponding to a debug watch key.
Args:
debug_watch_key: A debug watch key, as a str.
Returns:
A list of DebugTensorDatuminstances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If the debug watch key does not exist.
"""
return self._watch_key_to_datum.get(debug_watch_key, [])
def find(self, predicate, first_n=0):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
predicate(debug_tensor_datum, tensor),
where "debug_tensor_datum" is an instance of DebugTensorDatum, which
carries "metadata", such as the name of the node, the tensor's slot
index on the node, timestamp, debug op name, etc; and "tensor" is
the dumped tensor value as a numpy array.
first_n: Return only the first n dumped tensor data (in time order) for
which the predicate is True. To return all such data, let first_n be
<= 0.
Returns:
A list of all DebugTensorDatum objects in this DebugDumpDir object for
which predicate returns True, sorted in ascending order of the timestamp.
"""
matched_data = []
for datum in self._dump_tensor_data:
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
break
return matched_data
def get_tensor_file_paths(self, node_name, output_slot, debug_op):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.file_path for datum in self._watch_key_to_datum[watch_key]]
def get_tensors(self, node_name, output_slot, debug_op):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of tensor(s) loaded from the tensor dump file(s).
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.get_tensor() for datum in self._watch_key_to_datum[watch_key]]
def get_rel_timestamps(self, node_name, output_slot, debug_op):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - t0), t0 being the absolute
timestamp of the first dumped tensor in the dump root. The tensor may be
dumped multiple times in the dump root directory, so a list of relative
timestamp (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of relative timestamps.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return self._watch_key_to_rel_time[watch_key]
| apache-2.0 |
scholer/cadnano2.5 | cadnano/strand/modscmd.py | 2 | 1922 | from cadnano.proxies.cnproxy import UndoCommand
from cadnano.cntypes import (
DocT,
StrandT
)
class AddModsCommand(UndoCommand):
def __init__(self, document: DocT, strand: StrandT, idx: int, mod_id: str):
super(AddModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
class RemoveModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(RemoveModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
| mit |
deandunbar/bitwave | hackathon_version/venv/lib/python2.7/site-packages/django/utils/dateformat.py | 115 | 10703 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import re
import time
import calendar
import datetime
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils import six
from django.utils.timezone import get_default_timezone, is_aware, is_naive
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = self.timezone.tzname(self.data) if self.timezone else None
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| mit |
preghenella/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/base/ComparisonData.py | 41 | 4738 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import GraphicsObject,SinglePanelPlot
from ROOT import TFile
class ComparisonObject(object):
"""
Base entry type for object inside comparison data
"""
def __init__(self, data, style):
self.__data = data
self.__style = style
def GetData(self):
return self.__data
def GetGraphicsObject(self):
return GraphicsObject(self.__data, self.__style)
def GetRootPrimitive(self):
self.__data.SetName(self.GetObjectName())
return self.__data
def Draw(self, pad, addToLegend = True):
pad.DrawGraphicsObject(self.GetGraphicsObject(), addToLegend, self.GetLegendTitle())
def GetLegendTitle(self):
"""
To be implemented in inheriting classes
"""
return ""
def GetObjectName(self):
"""
To be implemented in inheriting classes
"""
return ""
class ComparisonData(object):
"""
General comparison data collection
"""
def __init__(self):
"""
Constructor
"""
self.__entries = []
def GetEntries(self):
return self.__entries
def AddEntry(self, entry):
self.__entries.append(entry)
def DrawObjects(self, pad, addToLegend = True):
for entry in self.__entries:
entry.Draw(pad, addToLegend)
def GetListOfRootObjects(self):
"""
Get a list of root-primitive trigger efficiencies
"""
rootprimitives = []
for entry in self.__entries:
rootprimitives.append(entry.GetRootPrimitive())
return rootprimitives
class ComparisonPlot(SinglePanelPlot):
"""
General comparison plot type
"""
def __init__(self):
"""
Constructor
"""
SinglePanelPlot.__init__(self)
self.__frame = None
self._comparisonContainer = None # be specified in inheriting classes
self.__legendAttributes = None
self.__padattributes = {"logx":False, "logy":False, "gridx":False, "gridy":False}
def SetFrame(self, frame):
self.__frame = frame
def SetLegendAttributes(self, xmin, ymin, xmax, ymax):
self.__legendAttributes = {"xmin":xmin, "xmax":xmax, "ymin":ymin, "ymax":ymax}
def SetPadAttributes(self, logx, logy, gridx, gridy):
self.__padattributes["logx"] = logx
self.__padattributes["logy"] = logy
self.__padattributes["gridx"] = gridx
self.__padattributes["gridy"] = gridy
def _Create(self, canvasname, canvastitle):
"""
Make the plot
"""
self._OpenCanvas(canvasname, canvastitle)
pad = self._GetFramedPad()
if self.__padattributes["logx"]:
pad.GetPad().SetLogx()
if self.__padattributes["logy"]:
pad.GetPad().SetLogy()
pad.DrawFrame(self.__frame)
doLegend = False
if self.__legendAttributes:
doLegend = True
self._comparisonContainer.DrawObjects(pad, doLegend)
if doLegend:
pad.CreateLegend(self.__legendAttributes["xmin"], self.__legendAttributes["ymin"], self.__legendAttributes["xmax"], self.__legendAttributes["ymax"])
def WriteData(self, rootfilename):
"""
Write out trigger efficiency curves to a root file
"""
outputfile = TFile(rootfilename, "RECREATE")
for rootprim in self._comparisonContainer.GetListOfRootObjects():
rootprim.Write()
outputfile.Close() | bsd-3-clause |
sanghinitin/golismero | thirdparty_libs/django/core/files/base.py | 147 | 4653 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.utils.encoding import smart_text
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import force_bytes, python_2_unicode_compatible
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif hasattr(self.file, 'name') and os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
elif hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
self._size = self.file.tell()
self.file.seek(pos)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = BytesIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
| gpl-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/query_troubleshooting_parameters_py3.py | 9 | 1239 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class QueryTroubleshootingParameters(Model):
"""Parameters that define the resource to query the troubleshooting result.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The target resource ID to query the
troubleshooting result.
:type target_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
}
def __init__(self, *, target_resource_id: str, **kwargs) -> None:
super(QueryTroubleshootingParameters, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.