repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
fusion809/fusion809.github.io-old | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/formatters/terminal.py | 50 | 5401 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ansiformat(color, line[:-1]))
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
| gpl-3.0 |
puyilio/Application-for-PyBoard | 3d.py | 1 | 3205 | # -*- coding: utf-8 -*-
"""
Demonstrate use of GLLinePlotItem to draw cross-sections of a surface.
"""
## Add path to library (just for examples; you do not need this)
#import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import scipy.ndimage as ndi
from time import *
xyz1 = int(0)
xyz2 = int(0)
xyz3 = int(0)
counter1 = 0
counter2 = 0
counter3 = 0
phase = 0
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 40
w.show()
w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
gx = gl.GLGridItem()
gx.rotate(90, 0, 1, 0)
gx.translate(-10, 0, 0)
w.addItem(gx)
gy = gl.GLGridItem()
gy.rotate(90, 1, 0, 0)
gy.translate(0, -10, 0)
w.addItem(gy)
gz = gl.GLGridItem()
gz.translate(0, 0, -10)
w.addItem(gz)
## Manually specified colors
#z = ndi.gaussian_filter(np.random.normal(size=(100,100)), (1,1))
z = np.ones((21, 21))
x = np.linspace(-10, 10, 100)
y = np.linspace(-10, 10, 100)
counter1 = int(0)
print counter1
print 'x', x
print 'y', y
print 'z', z
colors = np.ones((100,100,4), dtype=float)
colors[...,0] = np.clip(np.cos(((x.reshape(100,1) ** 2) + (y.reshape(1,100) ** 2)) ** 0.5), 0, 1)
colors[...,1] = colors[...,0]
p3 = gl.GLSurfacePlotItem(z=z, colors=colors.reshape(100*100,4), shader='shaded', smooth=False)
#p3.scale(-1 , 1, 1)
p3.translate(-10, -10, 0)
w.addItem(p3)
def update():
global p3, z, counter1, counter2, counter3
print 'update' '\n'
sleep(0.4)
values1 = np.load('values1.npy')
values2 = np.load('values2.npy')
values3 = np.load('values3.npy')
values4 = np.load('values4.npy')
values1 = values1[-1:]
values2 = values2[-1:]
values3 = values3[-1:]
values4 = values4[-1:]
values1 = values1[0]
values2 = values2[0]
values3 = values3[0]
values4 = values4[0]
print int(values1)
print int(values2)
print int(values3)
#1
if (int(values1) > 85) & (counter1 == 0) :
counter1 += 1
print 'ROTATEEE'
p3.rotate(90,0,10,0) # po 45 u levo centar y = 10
if ((abs(int(values1)) < 15)) & (counter1 == 1):
print 'ovde 0'
p3.rotate(-90,0,10,0) # po 45 u desno centar y = 10
counter1 -= 1
if (int(values1) < -73) & (counter1 == 0):
counter1 += 1
p3.rotate(-90,0,10,0) # po 45 u desno centar y = 10
#2
if (int(values2) > 75) & (counter2 == 0) :
counter2 += 1
print 'ROTATEEE'
p3.rotate(90,10,0,0) # po 45 u levo centar y = 10
if ((abs(int(values2)) < 15)) & (counter2 == 1):
print 'ovde 0'
p3.rotate(-90,10,0,0) # po 45 u levo centar y = 10
counter2 -= 1
if (int(values2) < -75) & (counter2 == 0):
counter2 += 1
p3.rotate(-90,10,0,0) # po 45 u levo centar y = 10
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(30)
print 'startovao'
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| gpl-3.0 |
fifengine/fifengine | engine/python/fife/extensions/pychan/widgets/spacer.py | 1 | 3729 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from fife import fifechan
from .widget import Widget
from fife.extensions.pychan.attrs import PointAttr
class Spacer(Widget):
"""
A spacer represents expandable or fixed 'whitespace' in the GUI.
In a XML file you can get this by adding a <Spacer /> inside a VBox or
HBox element (Windows implicitly are VBox elements).
"""
DEFAULT_HEXPAND = True
DEFAULT_VEXPAND = False
DEFAULT_MARGINS = 0, 0
DEFAULT_PADDING = 0
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None):
self.real_widget = fifechan.Spacer()
super(Spacer,self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
base_color=base_color,
background_color=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if hexpand is not None:
self.hexpand = hexpand
else:
self.hexpand = self.DEFAULT_HEXPAND
if vexpand is not None:
self.vexpand = vexpand
else:
self.vexpand = self.DEFAULT_VEXPAND
def clone(self, prefix):
spacerClone = Spacer(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment)
return spacerClone
| lgpl-2.1 |
adlius/osf.io | tests/test_subjects.py | 12 | 8026 | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from nose.tools import * # noqa: F403 (PEP8 asserts)
from osf.exceptions import ValidationValueError
from tests.base import OsfTestCase
from osf_tests.factories import SubjectFactory, PreprintFactory, PreprintProviderFactory
from osf.models.validators import validate_subject_hierarchy
class TestSubjectTreeValidation(OsfTestCase):
def setUp(self):
super(TestSubjectTreeValidation, self).setUp()
self.root_subject = SubjectFactory()
self.one_level_root = SubjectFactory()
self.two_level_root = SubjectFactory()
self.outside_root = SubjectFactory()
self.root_subject.save()
self.outside_root.save()
self.two_level_root.save()
self.one_level_root.save()
self.parent_subj_0 = SubjectFactory(parent=self.root_subject)
self.parent_subj_1 = SubjectFactory(parent=self.root_subject)
self.two_level_parent = SubjectFactory(parent=self.two_level_root)
self.outside_parent = SubjectFactory(parent=self.outside_root)
self.parent_subj_0.save()
self.parent_subj_1.save()
self.outside_parent.save()
self.two_level_parent.save()
self.child_subj_00 = SubjectFactory(parent=self.parent_subj_0)
self.child_subj_01 = SubjectFactory(parent=self.parent_subj_0)
self.child_subj_10 = SubjectFactory(parent=self.parent_subj_1)
self.child_subj_11 = SubjectFactory(parent=self.parent_subj_1)
self.outside_child = SubjectFactory(parent=self.outside_parent)
self.child_subj_00.save()
self.child_subj_01.save()
self.child_subj_10.save()
self.child_subj_11.save()
self.outside_child.save()
self.valid_full_hierarchy = [self.root_subject._id, self.parent_subj_0._id, self.child_subj_00._id]
self.valid_two_level_hierarchy = [self.two_level_root._id, self.two_level_parent._id]
self.valid_one_level_hierarchy = [self.one_level_root._id]
self.valid_partial_hierarchy = [self.root_subject._id, self.parent_subj_1._id]
self.valid_root = [self.root_subject._id]
self.no_root = [self.parent_subj_0._id, self.child_subj_00._id]
self.no_parent = [self.root_subject._id, self.child_subj_00._id]
self.invalid_child_leaf = [self.root_subject._id, self.parent_subj_0._id, self.child_subj_10._id]
self.invalid_parent_leaf = [self.root_subject._id, self.outside_parent._id, self.child_subj_00._id]
self.invalid_root_leaf = [self.outside_root._id, self.parent_subj_0._id, self.child_subj_00._id]
self.invalid_ids = ['notarealsubjectid', 'thisisalsoafakeid']
def test_hiarachy_property(self):
assert_equal(self.child_subj_00.hierarchy, [self.root_subject._id, self.parent_subj_0._id, self.child_subj_00._id])
assert_equal(self.two_level_parent.hierarchy, [self.two_level_root._id, self.two_level_parent._id])
assert_equal(self.one_level_root.hierarchy, [self.one_level_root._id])
assert_equal(self.parent_subj_1.hierarchy, [self.root_subject._id, self.parent_subj_1._id])
assert_equal(self.root_subject.hierarchy, [self.root_subject._id])
def test_object_hierarchy_property(self):
assert_equal(self.child_subj_00.object_hierarchy, [self.root_subject, self.parent_subj_0, self.child_subj_00])
assert_equal(self.two_level_parent.object_hierarchy, [self.two_level_root, self.two_level_parent])
assert_equal(self.one_level_root.object_hierarchy, [self.one_level_root])
assert_equal(self.parent_subj_1.object_hierarchy, [self.root_subject, self.parent_subj_1])
assert_equal(self.root_subject.object_hierarchy, [self.root_subject])
def test_validation_full_hierarchy(self):
assert_equal(validate_subject_hierarchy(self.valid_full_hierarchy), None)
def test_validation_two_level_hierarchy(self):
assert_equal(validate_subject_hierarchy(self.valid_two_level_hierarchy), None)
def test_validation_one_level_hierarchy(self):
assert_equal(validate_subject_hierarchy(self.valid_one_level_hierarchy), None)
def test_validation_partial_hierarchy(self):
assert_equal(validate_subject_hierarchy(self.valid_partial_hierarchy), None)
def test_validation_root_only(self):
assert_equal(validate_subject_hierarchy(self.valid_root), None)
def test_invalidation_no_root(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.no_root)
assert_in('Unable to find root', e.exception.message)
def test_invalidation_no_parent(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.no_parent)
assert_in('Invalid subject hierarchy', e.exception.message)
def test_invalidation_invalid_child_leaf(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.invalid_child_leaf)
assert_in('Invalid subject hierarchy', e.exception.message)
def test_invalidation_invalid_parent_leaf(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.invalid_parent_leaf)
assert_in('Invalid subject hierarchy', e.exception.message)
def test_invalidation_invalid_root_leaf(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.invalid_root_leaf)
assert_in('Invalid subject hierarchy', e.exception.message)
def test_invalidation_invalid_ids(self):
with assert_raises(ValidationValueError) as e:
validate_subject_hierarchy(self.invalid_ids)
assert_in('could not be found', e.exception.message)
class TestSubjectEditValidation(OsfTestCase):
def setUp(self):
super(TestSubjectEditValidation, self).setUp()
self.subject = SubjectFactory()
def test_edit_unused_subject(self):
self.subject.text = 'asdfg'
self.subject.save()
def test_edit_used_subject(self):
preprint = PreprintFactory(subjects=[[self.subject._id]])
self.subject.text = 'asdfg'
with assert_raises(ValidationError):
self.subject.save()
def test_delete_unused_subject(self):
self.subject.delete()
def test_delete_used_subject(self):
preprint = PreprintFactory(subjects=[[self.subject._id]])
with assert_raises(ValidationError):
self.subject.delete()
def test_max_highlighted_count(self):
highlights = [SubjectFactory(provider=self.subject.provider, highlighted=True) for _ in range(10)]
with assert_raises(ValidationError):
self.subject.highlighted=True
self.subject.save()
class TestSubjectProperties(OsfTestCase):
def setUp(self):
super(TestSubjectProperties, self).setUp()
self.osf_provider = PreprintProviderFactory(_id='osf', share_title='bepress')
self.asdf_provider = PreprintProviderFactory(_id='asdf')
self.bepress_subj = SubjectFactory(text='BePress Text', provider=self.osf_provider)
self.other_subj = SubjectFactory(text='Other Text', bepress_subject=self.bepress_subj, provider=self.asdf_provider)
def test_bepress_text(self):
assert self.other_subj.bepress_text == 'BePress Text'
assert self.bepress_subj.bepress_text == 'BePress Text'
def test_path(self):
self.bepress_child = SubjectFactory(text='BePress Child', provider=self.osf_provider, parent=self.bepress_subj)
self.other_child = SubjectFactory(text='Other Child', bepress_subject=self.bepress_subj, provider=self.asdf_provider, parent=self.other_subj)
assert self.bepress_subj.path == 'bepress|BePress Text'
assert self.bepress_child.path == 'bepress|BePress Text|BePress Child'
assert self.other_subj.path == 'asdf|Other Text'
assert self.other_child.path == 'asdf|Other Text|Other Child'
| apache-2.0 |
rhatdan/selinux | policycoreutils/sepolicy/sepolicy/templates/var_cache.py | 5 | 4152 | # Copyright (C) 2007-2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# policygentool is a tool for the initial generation of SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
########################### cache Template File #############################
########################### Type Enforcement File #############################
te_types="""
type TEMPLATETYPE_cache_t;
files_type(TEMPLATETYPE_cache_t)
"""
te_rules="""
manage_dirs_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
manage_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
manage_lnk_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
files_var_filetrans(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, { dir file lnk_file })
"""
te_stream_rules="""\
manage_sock_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
files_var_filetrans(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, sock_file)
"""
########################### Interface File #############################
if_rules="""
########################################
## <summary>
## Search TEMPLATETYPE cache directories.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_search_cache',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
allow $1 TEMPLATETYPE_cache_t:dir search_dir_perms;
files_search_var($1)
')
########################################
## <summary>
## Read TEMPLATETYPE cache files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_read_cache_files',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
read_files_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
########################################
## <summary>
## Create, read, write, and delete
## TEMPLATETYPE cache files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_cache_files',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
manage_files_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
########################################
## <summary>
## Manage TEMPLATETYPE cache dirs.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_cache_dirs',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
manage_dirs_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
"""
if_stream_rules="""
########################################
## <summary>
## Connect to TEMPLATETYPE over a unix stream socket.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_stream_connect',`
gen_require(`
type TEMPLATETYPE_t, TEMPLATETYPE_cache_t;
')
stream_connect_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
"""
if_admin_types="""
type TEMPLATETYPE_cache_t;"""
if_admin_rules="""
files_search_var($1)
admin_pattern($1, TEMPLATETYPE_cache_t)
"""
########################### File Context ##################################
fc_file="""\
FILENAME -- gen_context(system_u:object_r:TEMPLATETYPE_cache_t,s0)
"""
fc_dir="""\
FILENAME(/.*)? gen_context(system_u:object_r:TEMPLATETYPE_cache_t,s0)
"""
| gpl-2.0 |
idoru9/serverpi | serverpi/users/migrations/0001_initial.py | 25 | 2693 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
)
] | bsd-3-clause |
rdeheele/odoo | addons/point_of_sale/wizard/pos_discount.py | 382 | 2266 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class pos_discount(osv.osv_memory):
_name = 'pos.discount'
_description = 'Add a Global Discount'
_columns = {
'discount': fields.float('Discount (%)', required=True, digits=(16,2)),
}
_defaults = {
'discount': 5,
}
def apply_discount(self, cr, uid, ids, context=None):
"""
To give the discount of product and check the.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : nothing
"""
order_ref = self.pool.get('pos.order')
order_line_ref = self.pool.get('pos.order.line')
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
record_id = context and context.get('active_id', False)
if isinstance(record_id, (int, long)):
record_id = [record_id]
for order in order_ref.browse(cr, uid, record_id, context=context):
order_line_ref.write(cr, uid, [x.id for x in order.lines], {'discount':this.discount}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
h4ck3rm1k3/ansible | lib/ansible/runner/lookup_plugins/together.py | 174 | 2135 | # (c) 2013, Bradley Young <young.bradley@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.utils as utils
from ansible.utils import safe_eval
import ansible.errors as errors
from itertools import izip_longest
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
elif isinstance(term, tuple):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def __lookup_injects(self, terms, inject):
results = []
for x in terms:
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
results.append(intermediate)
return results
def run(self, terms, inject=None, **kwargs):
# this code is common with 'items.py' consider moving to utils if we need it again
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
terms = self.__lookup_injects(terms, inject)
my_list = terms[:]
if len(my_list) == 0:
raise errors.AnsibleError("with_together requires at least one element in each list")
return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
| gpl-3.0 |
dfalt974/SickRage | lib/sqlalchemy/dialects/mysql/types.py | 8 | 25137 | # mysql/types.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import datetime
from ... import exc, util
from ... import types as sqltypes
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_NumericType, sqltypes.Numeric])
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
return util.generic_repr(self, to_inspect=[_FloatType,
_NumericType,
sqltypes.Float])
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self, to_inspect=[_IntegerType,
_NumericType,
sqltypes.Integer])
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False, unicode=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
kw.setdefault('collation', kw.pop('collate', collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_StringType, sqltypes.String])
class _MatchType(sqltypes.Float, sqltypes.MatchType):
def __init__(self, **kw):
# TODO: float arguments?
sqltypes.Float.__init__(self)
sqltypes.MatchType.__init__(self)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision,
scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in value:
if not isinstance(i, int):
i = ord(i) # convert byte to int on Python 2
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = 'TIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8 The MySQL-specific TIME
type as well as fractional seconds support.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = 'TIMESTAMP'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
with fractional seconds support.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = 'DATETIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
with fractional seconds support.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/migrations/test_base.py | 25 | 2508 | import os
from django.db import connection
from django.test import TransactionTestCase
from django.utils._os import upath
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
| mit |
keepkey/python-keepkey | tests/test_msg_verifymessage_segwit_native.py | 1 | 4324 | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import unittest
import common
from binascii import unhexlify
class TestMsgVerifymessageSegwitNative(common.KeepKeyTest):
def test_message_long(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('285ff795c29aef7538f8b3bdb2e8add0d0722ad630a140b6aefd504a5a895cbd867cbb00981afc50edd0398211e8d7c304bb8efa461181bc0afa67ea4a720a89ed'),
"VeryLongMessage!" * 64
)
assert ret is True
def test_message_testnet(self):
self.setup_mnemonic_nopin_nopassphrase()
ret = self.client.verify_message(
'Testnet',
'tb1qyjjkmdpu7metqt5r36jf872a34syws336p3n3p',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert ret is True
def test_message_verify(self):
self.setup_mnemonic_nopin_nopassphrase()
# trezor pubkey - OK
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message.'
)
assert res is True
# trezor pubkey - FAIL - wrong sig
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be00'),
'This is an example of a signed message.'
)
assert res is False
# trezor pubkey - FAIL - wrong msg
res = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('289e23edf0e4e47ff1dec27f32cd78c50e74ef018ee8a6adf35ae17c7a9b0dd96f48b493fd7dbab03efb6f439c6383c9523b3bbc5f1a7d158a6af90ab154e9be80'),
'This is an example of a signed message!'
)
assert res is False
def test_verify_utf(self):
self.setup_mnemonic_nopin_nopassphrase()
words_nfkd = u'Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\u0301l zo\u0301ny u\u0301lu\u030a'
words_nfc = u'P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f'
res_nfkd = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfkd
)
res_nfc = self.client.verify_message(
'Bitcoin',
'bc1qyjjkmdpu7metqt5r36jf872a34syws33s82q2j',
unhexlify('28d0ec02ed8da8df23e7fe9e680e7867cc290312fe1c970749d8306ddad1a1eda41c6a771b13d495dd225b13b0a9d0f915a984ee3d0703f92287bf8009fbb9f7d6'),
words_nfc
)
assert res_nfkd is True
assert res_nfc is True
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
Orochimarufan/youtube-dl | youtube_dl/extractor/clipsyndicate.py | 64 | 1812 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
fix_xml_ampersands
)
class ClipsyndicateIE(InfoExtractor):
_VALID_URL = r'https?://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
'md5': '4d7d549451bad625e0ff3d7bd56d776c',
'info_dict': {
'id': '4629301',
'ext': 'mp4',
'title': 'Brick Briscoe',
'duration': 612,
'thumbnail': r're:^https?://.+\.jpg',
},
}, {
'url': 'http://chic.clipsyndicate.com/video/play/5844117/shark_attack',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
js_player = self._download_webpage(
'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id,
video_id, 'Downlaoding player')
# it includes a required token
flvars = self._search_regex(r'flvars: "(.*?)"', js_player, 'flvars')
pdoc = self._download_xml(
'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars,
video_id, 'Downloading video info',
transform_source=fix_xml_ampersands)
track_doc = pdoc.find('trackList/track')
def find_param(name):
node = find_xpath_attr(track_doc, './/param', 'name', name)
if node is not None:
return node.attrib['value']
return {
'id': video_id,
'title': find_param('title'),
'url': track_doc.find('location').text,
'thumbnail': find_param('thumbnail'),
'duration': int(find_param('duration')),
}
| unlicense |
efabless/openlane | scripts/tksimpledialog.py | 1 | 2996 | #!/ef/efabless/opengalaxy/venv/bin/python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Dialog class for tkinter
import os
import tkinter
from tkinter import ttk
class Dialog(tkinter.Toplevel):
def __init__(self, parent, message = None, title = None, seed = None, border = 'blue', **kwargs):
tkinter.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.configure(background=border, padx=2, pady=2)
self.obox = ttk.Frame(self)
self.obox.pack(side = 'left', fill = 'both', expand = 'true')
self.parent = parent
self.result = None
body = ttk.Frame(self.obox)
self.initial_focus = self.body(body, message, seed, **kwargs)
body.pack(padx = 5, pady = 5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.initial_focus.focus_set()
self.wait_window(self)
# Construction hooks
def body(self, master, **kwargs):
# Create dialog body. Return widget that should have
# initial focus. This method should be overridden
pass
def buttonbox(self):
# Add standard button box. Override if you don't want the
# standard buttons
box = ttk.Frame(self.obox)
self.okb = ttk.Button(box, text="OK", width=10, command=self.ok, default='active')
self.okb.pack(side='left', padx=5, pady=5)
w = ttk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side='left', padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack(fill='x', expand='true')
# Standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.result = self.apply()
self.cancel()
def cancel(self, event=None):
# Put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validate(self):
return 1 # Override this
def apply(self):
return None # Override this
| apache-2.0 |
HalcyonChimera/osf.io | api_tests/registrations/views/test_registration_embeds.py | 2 | 3140 | import pytest
from nose.tools import * # noqa:
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
@pytest.mark.enable_quickfiles_creation
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(
ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(
contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(
contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(
parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(
project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(
project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
json = res.json
embeds = json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
ids = ['{}-{}'.format(self.registration._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_identifiers(self):
url = '/{0}registrations/{1}/?embed=identifiers'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(
res.json['errors'][0]['detail'],
'The following fields are not embeddable: title'
)
| apache-2.0 |
GenePeeks/py-rest-orm | tests/test_fields.py | 1 | 1036 | from datetime import date, datetime
from unittest import TestCase
from pyrestorm.fields import Field, DateField
from pyrestorm import exceptions
class FieldsTestCase(TestCase):
def test_field_clean(self):
field = Field()
self.assertRaises(NotImplementedError, field.clean, 1)
def test_field_restore(self):
d = '2016-01-01'
field = Field()
self.assertEqual(field.restore(d), d)
def test_datefield_clean(self):
d = date(2016, 1, 1)
field = DateField()
self.assertEqual(field.clean(d), '2016-01-01')
def test_datefield_restore(self):
d = '2016-01-01'
field = DateField()
self.assertEqual(field.restore(d), datetime(2016, 1, 1))
def test_datefield_validate_true(self):
d = date(2016, 1, 1)
field = DateField()
self.assertIsNone(field.validate(d))
def test_datefield_validate_false(self):
field = DateField()
self.assertRaises(exceptions.ValidationError, field.validate, 'tomato')
| mit |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_2/tests/regressiontests/dispatch/tests/test_dispatcher.py | 50 | 3724 | from django.dispatch import Signal
import unittest
import sys
import gc
import django.utils.copycompat as copy
if sys.platform.startswith('java'):
def garbage_collect():
"""Run the garbage collector and wait a bit to let it do his work"""
import time
gc.collect()
time.sleep(0.1)
else:
def garbage_collect():
gc.collect()
def receiver_1_arg(val, **kwargs):
return val
class Callable(object):
def __call__(self, val, **kwargs):
return val
def a(self, val, **kwargs):
return val
a_signal = Signal(providing_args=["val"])
class DispatcherTests(unittest.TestCase):
"""Test suite for dispatcher (barely started)"""
def _testIsClean(self, signal):
"""Assert that everything has been cleaned up automatically"""
self.assertEqual(signal.receivers, [])
# force cleanup just in case
signal.receivers = []
def testExact(self):
a_signal.connect(receiver_1_arg, sender=self)
expected = [(receiver_1_arg,"test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg, sender=self)
self._testIsClean(a_signal)
def testIgnoredSender(self):
a_signal.connect(receiver_1_arg)
expected = [(receiver_1_arg,"test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg)
self._testIsClean(a_signal)
def testGarbageCollected(self):
a = Callable()
a_signal.connect(a.a, sender=self)
expected = []
del a
garbage_collect()
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
self._testIsClean(a_signal)
def testMultipleRegistration(self):
a = Callable()
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
result = a_signal.send(sender=self, val="test")
self.assertEqual(len(result), 1)
self.assertEqual(len(a_signal.receivers), 1)
del a
del result
garbage_collect()
self._testIsClean(a_signal)
def testUidRegistration(self):
def uid_based_receiver_1(**kwargs):
pass
def uid_based_receiver_2(**kwargs):
pass
a_signal.connect(uid_based_receiver_1, dispatch_uid = "uid")
a_signal.connect(uid_based_receiver_2, dispatch_uid = "uid")
self.assertEqual(len(a_signal.receivers), 1)
a_signal.disconnect(dispatch_uid = "uid")
self._testIsClean(a_signal)
def testRobust(self):
"""Test the sendRobust function"""
def fails(val, **kwargs):
raise ValueError('this')
a_signal.connect(fails)
result = a_signal.send_robust(sender=self, val="test")
err = result[0][1]
self.assert_(isinstance(err, ValueError))
self.assertEqual(err.args, ('this',))
a_signal.disconnect(fails)
self._testIsClean(a_signal)
def testDisconnection(self):
receiver_1 = Callable()
receiver_2 = Callable()
receiver_3 = Callable()
a_signal.connect(receiver_1)
a_signal.connect(receiver_2)
a_signal.connect(receiver_3)
a_signal.disconnect(receiver_1)
del receiver_2
garbage_collect()
a_signal.disconnect(receiver_3)
self._testIsClean(a_signal)
def getSuite():
return unittest.makeSuite(DispatcherTests,'test')
if __name__ == "__main__":
unittest.main()
| mit |
asampat3090/readthedocs.org | readthedocs/rtd_tests/tests/test_redirects_utils.py | 24 | 1731 | from django.test import TestCase
from django.test.utils import override_settings
from readthedocs.projects.models import Project
from django.core.urlresolvers import reverse
from readthedocs.redirects.utils import redirect_filename
class RedirectFilenameTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.proj = Project.objects.get(slug="read-the-docs")
def test_http_filenames_return_themselves(self):
self.assertEqual(
redirect_filename(None, 'http'),
'http'
)
def test_redirects_no_subdomain(self):
self.assertEqual(
redirect_filename(self.proj, 'index.html'),
'/docs/read-the-docs/en/latest/index.html'
)
@override_settings(
USE_SUBDOMAIN=True, PRODUCTION_DOMAIN='rtfd.org'
)
def test_redirects_with_subdomain(self):
self.assertEqual(
redirect_filename(self.proj, 'faq.html'),
'http://read-the-docs.rtfd.org/en/latest/faq.html'
)
@override_settings(
USE_SUBDOMAIN=True, PRODUCTION_DOMAIN='rtfd.org'
)
def test_single_version_with_subdomain(self):
self.proj.single_version = True
self.assertEqual(
redirect_filename(self.proj, 'faq.html'),
'http://read-the-docs.rtfd.org/faq.html'
)
def test_single_version_no_subdomain(self):
self.proj.single_version = True
self.assertEqual(
redirect_filename(self.proj, 'faq.html'),
reverse(
'docs_detail',
kwargs={
'project_slug': self.proj.slug,
'filename': 'faq.html',
}
)
)
| mit |
fedorpatlin/ansible | lib/ansible/modules/network/junos/junos_command.py | 5 | 10875 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on an Juniper JUNOS device
description:
- Sends an arbitrary set of commands to an JUNOS node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: junos
options:
commands:
description:
- The commands to send to the remote junos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
required: false
default: null
rpcs:
description:
- The C(rpcs) argument accepts a list of RPCs to be executed
over a netconf session and the results from the RPC execution
is return to the playbook via the modules results dictionary.
required: false
default: null
version_added: "2.3"
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- name: run show version on remote devices
junos_command:
commands: show version
- name: run show version and check to see if output contains Juniper
junos_command:
commands: show version
wait_for: result[0] contains Juniper
- name: run multiple commands on remote nodes
junos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
junos_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Juniper
- result[1] contains Loopback0
- name: run commands and specify the output format
junos_command:
commands: show version
display: json
- name: run rpc on the remote device
junos_command:
rpcs: get-software-information
"""
RETURN = """
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
import re
import shlex
from functools import partial
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement, tostring
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcli import Conditional, FailedConditionalError
from ansible.module_utils.netconf import send_request
from ansible.module_utils.network_common import ComplexList, to_list
from ansible.module_utils.six import string_types, iteritems
try:
import jxmlease
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
USE_PERSISTENT_CONNECTION = True
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def rpc(module, items):
responses = list()
for item in items:
name = item['name']
xattrs = item['xattrs']
args = item.get('args')
text = item.get('text')
name = str(name).replace('_', '-')
if all((module.check_mode, not name.startswith('get'))):
module.fail_json(msg='invalid rpc for running in check_mode')
element = Element(name, xattrs)
if text:
element.text = text
elif args:
for key, value in iteritems(args):
key = str(key).replace('_', '-')
if isinstance(value, list):
for item in value:
child = SubElement(element, key)
if item is not True:
child.text = item
else:
child = SubElement(element, key)
if value is not True:
child.text = value
reply = send_request(module, element)
if xattrs['format'] == 'text':
data = reply.find('.//output')
responses.append(data.text.strip())
elif xattrs['format'] == 'json':
responses.append(module.from_json(reply.text.strip()))
else:
responses.append(tostring(reply))
return responses
def split(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def parse_rpcs(module):
items = list()
for rpc in (module.params['rpcs'] or list()):
parts = split(rpc)
name = parts.pop(0)
args = dict()
for item in parts:
key, value = item.split('=')
if str(value).upper() in ['TRUE', 'FALSE']:
args[key] = bool(value)
elif re.match(r'^[0-9]+$', value):
args[key] = int(value)
else:
args[key] = str(value)
display = module.params['display'] or 'xml'
xattrs = {'format': display}
items.append({'name': name, 'args': args, 'xattrs': xattrs})
return items
def parse_commands(module, warnings):
items = list()
for command in (module.params['commands'] or list()):
if module.check_mode and not command.startswith('show'):
warnings.append(
'Only show commands are supported when using check_mode, not '
'executing %s' % command
)
parts = command.split('|')
text = parts[0]
display = module.params['display'] or 'text'
xattrs = {'format': display}
if '| display json' in command:
xattrs['format'] = 'json'
elif '| display xml' in command:
xattrs['format'] = 'xml'
items.append({'name': 'command', 'xattrs': xattrs, 'text': text})
return items
def main():
"""entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list'),
rpcs=dict(type='list'),
display=dict(choices=['text', 'json', 'xml'], aliases=['format', 'output']),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(junos_argument_spec)
required_one_of = [('commands', 'rpcs')]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
items = list()
items.extend(parse_commands(module, warnings))
items.extend(parse_rpcs(module))
wait_for = module.params['wait_for'] or list()
display = module.params['display']
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = rpc(module, items)
transformed = list()
for item, resp in zip(items, responses):
if item['xattrs']['format'] == 'xml':
if not HAS_JXMLEASE:
module.fail_json(msg='jxmlease is required but does not appear to '
'be installed. It can be installed using `pip install jxmlease`')
try:
transformed.append(jxmlease.parse(resp))
except:
raise ValueError(resp)
else:
transformed.append(resp)
for item in list(conditionals):
try:
if item(transformed):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
except FailedConditionalError:
pass
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'warnings': warnings,
'stdout': responses,
'stdout_lines': to_lines(responses)
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
AltSchool/django | django/core/cache/backends/base.py | 32 | 9680 | "Base Cache class."
from __future__ import unicode_literals
import time
import warnings
from django.core.exceptions import DjangoRuntimeWarning, ImproperlyConfigured
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Returns the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). A different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def get_or_set(self, key, default=None, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
the key is added and set to the default value. The default value can
also be any callable. If timeout is given, that timeout will be used
for the key; otherwise the default cache timeout will be used.
Returns the value of the key stored or retrieved on success,
False on error.
"""
if default is None:
raise ValueError('You need to specify a value.')
val = self.get(key, version=version)
if val is None:
if callable(default):
default = default()
val = self.add(key, default, timeout=timeout, version=version)
if val:
return self.get(key, default, version)
return val
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Delete a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""Subtracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
| bsd-3-clause |
pimier15/PyGUI | Kivy/Kivy/Bk_Interractive/sample/Chapter_03_code/02 - Basic widget events - dragging the stickman/comicwidgets.py | 8 | 1605 | # File name: comicwidgets.py
import kivy
kivy.require('1.9.0')
from kivy.uix.relativelayout import RelativeLayout
from kivy.graphics import Line
class DraggableWidget(RelativeLayout):
def __init__(self, **kwargs):
super(DraggableWidget, self).__init__(**kwargs)
self.selected = None
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.select()
return True
return super(DraggableWidget, self).on_touch_down(touch)
def select(self):
if not self.selected:
self.ix = self.center_x
self.iy = self.center_y
with self.canvas:
self.selected = Line(rectangle=(0, 0, self.width, self.height),
dash_offset=2)
def on_touch_move(self, touch):
(x, y) = self.parent.to_parent(touch.x, touch.y)
if self.selected and self.parent.collide_point(x - self.width / 2,
y - self.height / 2):
self.translate(touch.x - self.ix, touch.y - self.iy)
return True
return super(DraggableWidget, self).on_touch_move(touch)
def translate(self, x, y):
self.center_x = self.ix = self.ix + x
self.center_y = self.iy = self.iy + y
def on_touch_up(self, touch):
if self.selected:
self.unselect()
return True
return super(DraggableWidget, self).on_touch_up(touch)
def unselect(self):
if self.selected:
self.canvas.remove(self.selected)
self.selected = None
class StickMan(DraggableWidget):
pass
| mit |
mas-dse-greina/neon | luna16/LUNA16_CADIMI.py | 1 | 4381 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2017 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
CADIMI on LUNA16 data.
Based on the model from the paper:
FALSE POSITIVE REDUCTION FOR NODULE DETECTION IN CT SCANS USING A CONVOLUTIONAL NEURAL NETWORK: APPLICATION TO THE LUNA16 CHALLENGE
Thomas de Bel, Cas van den Bogaard, Valentin Kotov, Luuk Scholten, Nicole Walasek
https://luna16.grand-challenge.org/serve/public_html/pdfs/CADIMI-TEAM1_160816.pdf/
"""
import itertools as itt
from neon import logger as neon_logger
from neon.optimizers import Adam, Adadelta
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Cost, Softmax
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.models import Model
from aeon import DataLoader
from neon.callbacks.callbacks import Callbacks, MetricCallback
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.backends import gen_backend
from neon.data.dataloader_transformers import TypeCast
import numpy as np
from neon.initializers import Kaiming, IdentityInit, Constant
from neon.layers import Conv, Pooling, GeneralizedCost, Affine, Activation, Dropout
from neon.data.datasets import Dataset
from neon.util.persist import load_obj
import os
from neon.data import HDF5IteratorOneHot
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--subset', type=int, default=9)
args = parser.parse_args()
# hyperparameters
num_epochs = args.epochs
# Next line gets rid of the deterministic warning
args.deterministic = None
if (args.rng_seed is None):
args.rng_seed = 16
print('Batch size = {}'.format(args.batch_size))
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
#be.enable_winograd = 4 # default to winograd 4 for fast autotune
SUBSET = args.subset
train_set = HDF5IteratorOneHot('/mnt/data/medical/luna16/luna16_roi_except_subset{}_augmented.h5'.format(SUBSET), \
flip_enable=True, rot90_enable=True, crop_enable=False, border_size=5)
valid_set = HDF5IteratorOneHot('/mnt/data/medical/luna16/luna16_roi_subset{}_augmented.h5'.format(SUBSET), \
flip_enable=False, rot90_enable=False, crop_enable=False, border_size=5)
print('Using subset{}'.format(SUBSET))
init_uni = Kaiming()
relu = Rectlin()
bn = True
convp1 = dict(init=init_uni, batch_norm=bn, activation=relu, padding=1)
layers = [Conv((5, 5, 24), **convp1),
Pooling(2, op='max'),
Conv((3, 3, 32), **convp1),
Pooling(2, op='max'),
Conv((3, 3, 48), **convp1),
Pooling('all', op='avg'),
Affine(512, init=init_uni, batch_norm=True, activation=relu),
Affine(2, init=init_uni, activation=Softmax())]
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
lunaModel = Model(layers=layers)
modelFileName = 'LUNA16_CADIMI_subset{}.prm'.format(SUBSET)
# If model file exists, then load the it and start from there.
# if (os.path.isfile(modelFileName)):
# lunaModel = Model(modelFileName)
# Nesterov accelerated gradient descent with a learning rate of 0.01, a decay of 10^-3 and a momentum of 0.9
#opt = GradientDescentMomentum(0.01, 0.9, wdecay=0.001, nesterov=True)
opt = Adadelta(decay=0.95, epsilon=1e-6)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
# configure callbacks
callbacks = Callbacks(lunaModel, eval_set=valid_set, **args.callback_args)
# add a callback that saves the best model state
callbacks.add_save_best_state_callback(modelFileName)
lunaModel.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
| apache-2.0 |
tukan/poker-utils | filter_hands_by_player.py | 1 | 6755 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Timothy N. Tsvetkov (email: timothy.tsvetkov@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import argparse
import os
import re
import codecs
import progressbar
def build_money_regex(amount='amount', currency='currency'):
return r'(?P<%s>[^\d]*)\s*(?P<%s>(\d*,?\d*)*\.?\d+)' % (currency, amount)
HEADER_REGEX = re.compile(
r'PokerStars\s+(?P<game_sub_type>Home Game|Zoom)?\s*(Hand|Game)\s+'
r'\#(?P<game_id>\d+)\:\s+'
r'(\"\{o_club}Club \#(?P<club_id>\d+)\{c_club})?\s*'
r'(?P<game_type>.*)\s+'
r'\({sb}/{bb}\s*(?P<currency>\w+)?\)\s+'
r'-\s+'
r'(?P<date>.*)'.
format(
o_club='{',
c_club='}',
sb=build_money_regex('sb','sb_c'),
bb=build_money_regex('bb','bb_c')),
re.UNICODE
)
SEAT_REGEX = re.compile(
r'Seat\s+(?P<seat>\d+)\s*\:\s*(?P<player>.*)\s+\({stack}.*\)'.
format(stack=build_money_regex('stack')),
re.UNICODE
)
def open_out(out_name, out_ext, batch, ix):
fname = "%s_%s%s" % (out_name, ix, out_ext) if batch is not None and batch > 0 else out_name + out_ext
return codecs.open(fname, 'wb', encoding='utf-8', errors='strict')
def filter_hands(hand_files, player, out_name, out_ext, batch):
total_hands, hands_filtered = 0, 0
batch_counter, batch_ix = 0, 0
out = open_out(out_name, out_ext, batch, batch_ix)
for fname in hand_files:
in_hand = False
in_seats = False
after_seats = False
found = False
hand = u''
f = codecs.open(fname, 'rb', encoding='utf-8', errors='replace')
line = f.readline(1000)
while len(line) > 0:
if in_hand:
# Reading hand
if len(line.strip()) > 0:
# Not empty line, so we're still in hand
if not after_seats and not found:
# Haven't found player and haven't finished reading seats
match = SEAT_REGEX.match(line)
if match is not None:
in_seats = True
found = player == match.group('player')
else:
after_seats = in_seats
in_seats = False
hand += line.rstrip() + u"\n"
else:
# Empty line, finishing reading current hand
if found:
print >>out, hand
hands_filtered += 1
batch_counter += 1
in_hand = False
in_seats = False
after_seats = False
found = False
hand = u''
else:
# Waiting for a new hand
match = HEADER_REGEX.match(line)
in_hand = match is not None
if in_hand:
if batch is not None and batch > 0 and batch_counter >= batch:
batch_ix += 1
batch_counter = 0
out.close()
out = open_out(out_name, out_ext, batch, batch_ix)
hand += u"Found in file: %s\n" % os.path.abspath(fname)
hand += line.rstrip() + u"\n"
total_hands += 1
line = f.readline(1000)
f.close()
out.close()
return total_hands, hands_filtered
def main():
parser = argparse.ArgumentParser(
prog='filter_hands_by_player',
description='Filters hands by player. Hands in file must be divided by an empty line.'
)
parser.add_argument('player', metavar='player', type=str, help='Player to filter by')
parser.add_argument('-d', '--dir', type=str, dest='dir', help='directory with hand history files')
parser.add_argument('-f', '--file', type=str, dest='file', help='hand history file')
parser.add_argument('-o', type=str, dest='out', default='out.txt',
help='file name to print found hands (default: out.txt)')
parser.add_argument('-b', '--batch', type=int, dest='batch', help='save by BATCH hands per file')
parser.add_argument('--no-progressbar', action="store_true", dest='no_progressbar',
help='don\'t print a progressbar')
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
files = []
if args.dir is not None:
for root, dirs, fs in os.walk(args.dir):
files += [os.path.join(root, f) for f in fs]
elif args.file is not None:
files = [args.file]
else:
parser.print_help()
exit(1)
out_dir = os.path.dirname(args.out)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
basename = os.path.basename(args.out)
fname, out_ext = os.path.splitext(basename)
out_name = os.path.join(out_dir, fname)
if not args.no_progressbar:
widgets = [
progressbar.Percentage(),
' ',
progressbar.Bar(),
' ',
progressbar.ETA()
]
files_iter = progressbar.ProgressBar(widgets=widgets)(files)
else:
files_iter = files
t0 = time.time()
player = args.player.decode('utf-8')
total_hands, hands_filtered = filter_hands(files_iter, player, out_name, out_ext, args.batch)
t1 = time.time()
print
print "Filtered %d from %d hands" % (hands_filtered, total_hands)
print "Total time: %f sec." % (t1 - t0)
print
if __name__ == "__main__":
main()
| mit |
luxnovalabs/enjigo_door | web_interface/django/utils/ipv6.py | 113 | 7908 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import xrange
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| unlicense |
trabacus-softapps/openerp-8.0-cc | openerp/addons/hr_holidays/report/available_holidays.py | 892 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zanderle/django | django/utils/translation/trans_null.py | 467 | 1408 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| bsd-3-clause |
SpheMakh/msutils | MSUtils/ClassESW.py | 1 | 7690 | import matplotlib
matplotlib.use('Agg')
import sys
import os
import numpy
import numpy.ma as ma
import pylab
from scipy.interpolate import interp1d
from scipy import interpolate
from MSUtils import msutils
from pyrap.tables import table
import matplotlib.cm as cm
MEERKAT_SEFD = numpy.array([
[ 856e6, 580.],
[ 900e6, 578.],
[ 950e6, 559.],
[1000e6, 540.],
[1050e6, 492.],
[1100e6, 443.],
[1150e6, 443.],
[1200e6, 443.],
[1250e6, 443.],
[1300e6, 453.],
[1350e6, 443.],
[1400e6, 424.],
[1450e6, 415.],
[1500e6, 405.],
[1550e6, 405.],
[1600e6, 405.],
[1650e6, 424.],
[1711e6, 421.]], dtype=numpy.float32)
class MSNoise(object):
"""
Estimates visibility noise statistics as a function of frequency given a measurement set (MS).
This statistics can be used to generate weights which can be saved in the MS.
"""
def __init__(self, ms):
"""
Args:
ms (Directory, CASA Table):
CASA measurement set
"""
self.ms = ms
# First get some basic info about the Ms
self.msinfo = msutils.summary(self.ms, display=False)
self.nrows = self.msinfo['NROW']
self.ncor = self.msinfo['NCOR']
self.spw = {
"freqs" : self.msinfo['SPW']['CHAN_FREQ'],
"nchan" : self.msinfo['SPW']['NUM_CHAN'],
}
self.nspw = len(self.spw['freqs'])
def estimate_noise(self, corr=None, autocorr=False):
"""
Estimate visibility noise
"""
return
def estimate_weights(self, mode='specs',
stats_data=None, normalise=True,
smooth='polyn', fit_order=9,
plot_stats=True):
"""
Args:
mode (str, optional):
Mode for estimating noise statistics. These are the options:
- specs : This is a file or array of values that are proportional to the sensitivity as a function of
frequency. For example SEFD values. Column one should be the frequency, and column two should be the sensitivity.
- calc : Calculate noise internally. The calculations estimates the noise by taking differences between
adjacent channels.
noise_data (file, list, numpy.ndarray):
File or array containing information about sensitivity as a function of frequency (in Hz)
smooth (str, optional):
Generate a smooth version of the data. This version is used for further calculations. Options are:
- polyn : Smooth with a polynomial. This is the dafualt
- spline : Smooth with a spline
fit_order (int, optional):
Oder for function used to smooth the data. Default is 9
"""
#TODO(sphe): add function to estimate noise for the other mode.
# For now, fix the mode
mode = 'specs'
if mode=='specs':
if isinstance(stats_data, str):
__data = numpy.load(stats_data)
else:
__data = numpy.array(stats_data, dtype=numpy.float32)
x,y = __data[:,0], __data[:,1]
elif mode=='calc':
# x,y = self.estimate_noise()
pass
if normalise:
y /= y.max()
# lets work in MHz
x = x*1e-6
if smooth=='polyn':
fit_parms = numpy.polyfit(x, y, fit_order)
fit_func = lambda freqs: numpy.poly1d(fit_parms)(freqs)
elif smooth=='spline':
fit_parms = interpolate.splrep(x, y, s=fit_order)
fit_func = lambda freqs: interpolate.splev(freqs, fit_parms, der=0)
# Get noise from the parameterised functions for each spectral window
fig, ax1 = pylab.subplots(figsize=(12,9))
ax2 = ax1.twinx()
color = iter(cm.rainbow(numpy.linspace(0,1,self.nspw)))
noise = []
weights = []
for i in range(self.nspw):
freqs = numpy.array(self.spw['freqs'][i], dtype=numpy.float32)*1e-6
_noise = fit_func(freqs)
_weights = 1.0/_noise**2
if plot_stats:
# Use a differnet color to mark a new SPW
ax1.axvspan(freqs[0]/1e3, freqs[-1]/1e3, facecolor=next(color), alpha=0.25)
# Plot noise/weights
l1, = ax1.plot(x/1e3, y, 'rx')
l2, = ax1.plot(freqs/1e3, _noise, 'k-')
ax1.set_xlabel('Freq [GHz]')
ax1.set_ylabel('Norm Noise')
l3, = ax2.plot(freqs/1e3, _weights, 'g-')
ax2.set_ylabel('Weight')
noise.append(_noise)
weights.append(_weights)
# Set limits based on non-smooth noise
ylims = 1/y**2
ax2.set_ylim(ylims.min()*0.9, ylims.max()*1.1)
pylab.legend([l1,l2,l3],
['Norm. Noise', 'Polynomial fit: n={0:d}'.format(fit_order), 'Weights'], loc=1)
if isinstance(plot_stats, str):
pylab.savefig(plot_stats)
else:
pylab.savefig(self.ms + '-noise_weights.png')
pylab.clf()
return noise, weights
def write_toms(self, data,
columns=['WEIGHT', 'WEIGHT_SPECTRUM'],
stat='sum', rowchunk=None, multiply_old_weights=False):
"""
Write noise or weights into an MS.
Args:
columns (list):
columns to write weights/noise and spectral counterparts into. Default is
columns = ['WEIGHT', 'WEIGHT_SPECTRUM']
stat (str):
Statistic to compute when combining data along frequency axis. For example,
used the sum along Frequency axis of WEIGHT_SPECTRUM as weight for the WEIGHT column
"""
# Initialise relavant columns. It will exit with zero status if the column alredy exists
for i, column in enumerate(columns):
msutils.addcol(self.ms, colname=column,
valuetype='float',
clone='WEIGHT' if i==0 else 'DATA',
)
for spw in range(self.nspw):
tab = table(self.ms, readonly=False)
# Write data into MS in chunks
rowchunk = rowchunk or self.nrows/10
for row0 in range(0, self.nrows, rowchunk):
nr = min(rowchunk, self.nrows-row0)
# Shape for this chunk
dshape = [nr, self.spw['nchan'][spw], self.ncor]
__data = numpy.ones(dshape, dtype=numpy.float32) * data[spw][numpy.newaxis,:,numpy.newaxis]
# Consider old weights if user wants to
if multiply_old_weights:
old_weight = tab.getcol('WEIGHT', row0, nr)
print("Multiplying old weights into WEIGHT_SPECTRUM")
__data *= old_weight[:,numpy.newaxis,:]
# make a masked array to compute stats using unflagged data
flags = tab.getcol('FLAG', row0, nr)
mdata = ma.masked_array(__data, mask=flags)
print(("Populating {0:s} column (rows {1:d} to {2:d})".format(columns[1], row0, row0+nr-1)))
tab.putcol(columns[1], __data, row0, nr)
print(("Populating {0:s} column (rows {1:d} to {2:d})".format(columns[0], row0, row0+nr-1)))
if stat=="stddev":
tab.putcol(columns[0], mdata.std(axis=1).data, row0, nr)
elif stat=="sum":
tab.putcol(columns[0], mdata.sum(axis=1).data, row0, nr)
# Done
tab.close()
| gpl-2.0 |
whn09/tensorflow | tensorflow/contrib/graph_editor/util.py | 23 | 17095 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for the graph_editor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from six import iteritems
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import array_ops as tf_array_ops
__all__ = [
"make_list_of_op",
"get_tensors",
"make_list_of_t",
"get_generating_ops",
"get_consuming_ops",
"ControlOutputs",
"placeholder_name",
"make_placeholder_from_tensor",
"make_placeholder_from_dtype_and_shape",
]
def concatenate_unique(la, lb):
"""Add all the elements of `lb` to `la` if they are not there already.
The elements added to `la` maintain ordering with respect to `lb`.
Args:
la: List of Python objects.
lb: List of Python objects.
Returns:
`la`: The list `la` with missing elements from `lb`.
"""
la_set = set(la)
for l in lb:
if l not in la_set:
la.append(l)
la_set.add(l)
return la
# TODO(fkp): very generic code, it should be moved in a more generic place.
class ListView(object):
"""Immutable list wrapper.
This class is strongly inspired by the one in tf.Operation.
"""
def __init__(self, list_):
if not isinstance(list_, list):
raise TypeError("Expected a list, got: {}.".format(type(list_)))
self._list = list_
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __bool__(self):
return bool(self._list)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._list[i]
def __add__(self, other):
if not isinstance(other, list):
other = list(other)
return list(self) + other
# TODO(fkp): very generic code, it should be moved in a more generic place.
def is_iterable(obj):
"""Return true if the object is iterable."""
try:
_ = iter(obj)
except Exception: # pylint: disable=broad-except
return False
return True
def flatten_tree(tree, leaves=None):
"""Flatten a tree into a list.
Args:
tree: iterable or not. If iterable, its elements (child) can also be
iterable or not.
leaves: list to which the tree leaves are appended (None by default).
Returns:
A list of all the leaves in the tree.
"""
if leaves is None:
leaves = []
if isinstance(tree, dict):
for _, child in iteritems(tree):
flatten_tree(child, leaves)
elif is_iterable(tree):
for child in tree:
flatten_tree(child, leaves)
else:
leaves.append(tree)
return leaves
def transform_tree(tree, fn, iterable_type=tuple):
"""Transform all the nodes of a tree.
Args:
tree: iterable or not. If iterable, its elements (child) can also be
iterable or not.
fn: function to apply to each leaves.
iterable_type: type use to construct the resulting tree for unknwon
iterable, typically `list` or `tuple`.
Returns:
A tree whose leaves has been transformed by `fn`.
The hierarchy of the output tree mimics the one of the input tree.
"""
if is_iterable(tree):
if isinstance(tree, dict):
res = tree.__new__(type(tree))
res.__init__(
(k, transform_tree(child, fn)) for k, child in iteritems(tree))
return res
elif isinstance(tree, tuple):
# NamedTuple?
if hasattr(tree, "_asdict"):
res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn))
else:
res = tree.__new__(type(tree),
(transform_tree(child, fn) for child in tree))
return res
elif isinstance(tree, collections.Sequence):
res = tree.__new__(type(tree))
res.__init__(transform_tree(child, fn) for child in tree)
return res
else:
return iterable_type(transform_tree(child, fn) for child in tree)
else:
return fn(tree)
def check_graphs(*args):
"""Check that all the element in args belong to the same graph.
Args:
*args: a list of object with a obj.graph property.
Raises:
ValueError: if all the elements do not belong to the same graph.
"""
graph = None
for i, sgv in enumerate(args):
if graph is None and sgv.graph is not None:
graph = sgv.graph
elif sgv.graph is not None and sgv.graph is not graph:
raise ValueError("Argument[{}]: Wrong graph!".format(i))
def get_unique_graph(tops, check_types=None, none_if_empty=False):
"""Return the unique graph used by the all the elements in tops.
Args:
tops: list of elements to check (usually a list of tf.Operation and/or
tf.Tensor). Or a tf.Graph.
check_types: check that the element in tops are of given type(s). If None,
the types (tf.Operation, tf.Tensor) are used.
none_if_empty: don't raise an error if tops is an empty list, just return
None.
Returns:
The unique graph used by all the tops.
Raises:
TypeError: if tops is not a iterable of tf.Operation.
ValueError: if the graph is not unique.
"""
if isinstance(tops, tf_ops.Graph):
return tops
if not is_iterable(tops):
raise TypeError("{} is not iterable".format(type(tops)))
if check_types is None:
check_types = (tf_ops.Operation, tf_ops.Tensor)
elif not is_iterable(check_types):
check_types = (check_types,)
g = None
for op in tops:
if not isinstance(op, check_types):
raise TypeError("Expected a type in ({}), got: {}".format(", ".join([str(
t) for t in check_types]), type(op)))
if g is None:
g = op.graph
elif g is not op.graph:
raise ValueError("Operation {} does not belong to given graph".format(op))
if g is None and not none_if_empty:
raise ValueError("Can't find the unique graph of an empty list")
return g
def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False):
"""Convert ops to a list of `tf.Operation`.
Args:
ops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single
operation.
check_graph: if `True` check if all the operations belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
ignore_ts: if True, silently ignore `tf.Tensor`.
Returns:
A newly created list of `tf.Operation`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation` or,
if `check_graph` is `True`, if all the ops do not belong to the
same graph.
"""
if isinstance(ops, tf_ops.Graph):
if allow_graph:
return ops.get_operations()
else:
raise TypeError("allow_graph is False: cannot convert a tf.Graph.")
else:
if not is_iterable(ops):
ops = [ops]
if not ops:
return []
if check_graph:
check_types = None if ignore_ts else tf_ops.Operation
get_unique_graph(ops, check_types=check_types)
return [op for op in ops if isinstance(op, tf_ops.Operation)]
# TODO(fkp): move this function in tf.Graph?
def get_tensors(graph):
"""get all the tensors which are input or output of an op in the graph.
Args:
graph: a `tf.Graph`.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if graph is not a `tf.Graph`.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a graph, got: {}".format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts
def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):
"""Convert ts to a list of `tf.Tensor`.
Args:
ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.
check_graph: if `True` check if all the tensors belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
ignore_ops: if `True`, silently ignore `tf.Operation`.
Returns:
A newly created list of `tf.Tensor`.
Raises:
TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,
if `check_graph` is `True`, if all the ops do not belong to the same graph.
"""
if isinstance(ts, tf_ops.Graph):
if allow_graph:
return get_tensors(ts)
else:
raise TypeError("allow_graph is False: cannot convert a tf.Graph.")
else:
if not is_iterable(ts):
ts = [ts]
if not ts:
return []
if check_graph:
check_types = None if ignore_ops else tf_ops.Tensor
get_unique_graph(ts, check_types=check_types)
return [t for t in ts if isinstance(t, tf_ops.Tensor)]
def get_generating_ops(ts):
"""Return all the generating ops of the tensors in `ts`.
Args:
ts: a list of `tf.Tensor`
Returns:
A list of all the generating `tf.Operation` of the tensors in `ts`.
Raises:
TypeError: if `ts` cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
return [t.op for t in ts]
def get_consuming_ops(ts):
"""Return all the consuming ops of the tensors in ts.
Args:
ts: a list of `tf.Tensor`
Returns:
A list of all the consuming `tf.Operation` of the tensors in `ts`.
Raises:
TypeError: if ts cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
ops = []
for t in ts:
for op in t.consumers():
if op not in ops:
ops.append(op)
return ops
class ControlOutputs(object):
"""The control outputs topology."""
def __init__(self, graph):
"""Create a dictionary of control-output dependencies.
Args:
graph: a `tf.Graph`.
Returns:
A dictionary where a key is a `tf.Operation` instance and the
corresponding value is a list of all the ops which have the key
as one of their control-input dependencies.
Raises:
TypeError: graph is not a `tf.Graph`.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
self._control_outputs = {}
self._graph = graph
self._version = None
self._build()
def update(self):
"""Update the control outputs if the graph has changed."""
if self._version != self._graph.version:
self._build()
return self
def _build(self):
"""Build the control outputs dictionary."""
self._control_outputs.clear()
ops = self._graph.get_operations()
for op in ops:
for control_input in op.control_inputs:
if control_input not in self._control_outputs:
self._control_outputs[control_input] = []
if op not in self._control_outputs[control_input]:
self._control_outputs[control_input].append(op)
self._version = self._graph.version
def get_all(self):
return self._control_outputs
def get(self, op):
"""return the control outputs of op."""
if op in self._control_outputs:
return self._control_outputs[op]
else:
return ()
@property
def graph(self):
return self._graph
def scope_finalize(scope):
if scope and scope[-1] != "/":
scope += "/"
return scope
def scope_dirname(scope):
slash = scope.rfind("/")
if slash == -1:
return ""
return scope[:slash + 1]
def scope_basename(scope):
slash = scope.rfind("/")
if slash == -1:
return scope
return scope[slash + 1:]
def placeholder_name(t=None, scope=None):
"""Create placeholder name for the graph editor.
Args:
t: optional tensor on which the placeholder operation's name will be based
on
scope: absolute scope with which to prefix the placeholder's name. None
means that the scope of t is preserved. "" means the root scope.
Returns:
A new placeholder name prefixed by "geph". Note that "geph" stands for
Graph Editor PlaceHolder. This convention allows to quickly identify the
placeholder generated by the Graph Editor.
Raises:
TypeError: if t is not None or a tf.Tensor.
"""
if scope is not None:
scope = scope_finalize(scope)
if t is not None:
if not isinstance(t, tf_ops.Tensor):
raise TypeError("Expected a tf.Tenfor, got: {}".format(type(t)))
op_dirname = scope_dirname(t.op.name)
op_basename = scope_basename(t.op.name)
if scope is None:
scope = op_dirname
if op_basename.startswith("geph__"):
ph_name = op_basename
else:
ph_name = "geph__{}_{}".format(op_basename, t.value_index)
return scope + ph_name
else:
if scope is None:
scope = ""
return scope + "geph"
def make_placeholder_from_tensor(t, scope=None):
"""Create a `tf.placeholder` for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
Args:
t: a `tf.Tensor` whose name will be used to create the placeholder
(see function placeholder_name).
scope: absolute scope within which to create the placeholder. None
means that the scope of `t` is preserved. `""` means the root scope.
Returns:
A newly created `tf.placeholder`.
Raises:
TypeError: if `t` is not `None` or a `tf.Tensor`.
"""
return tf_array_ops.placeholder(
dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
t, scope=scope))
def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None):
"""Create a tf.placeholder for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
The placeholder is named using the function placeholder_name (with no
tensor argument).
Args:
dtype: the tensor type.
shape: the tensor shape (optional).
scope: absolute scope within which to create the placeholder. None
means that the scope of t is preserved. "" means the root scope.
Returns:
A newly created tf.placeholder.
"""
return tf_array_ops.placeholder(
dtype=dtype, shape=shape, name=placeholder_name(scope=scope))
_INTERNAL_VARIABLE_RE = re.compile(r"^__\w+__$")
def get_predefined_collection_names():
"""Return all the predefined collection names."""
return [getattr(tf_ops.GraphKeys, key) for key in dir(tf_ops.GraphKeys)
if not _INTERNAL_VARIABLE_RE.match(key)]
def find_corresponding_elem(target, dst_graph, dst_scope="", src_scope=""):
"""Find corresponding op/tensor in a different graph.
Args:
target: A `tf.Tensor` or a `tf.Operation` belonging to the original graph.
dst_graph: The graph in which the corresponding graph element must be found.
dst_scope: A scope which is prepended to the name to look for.
src_scope: A scope which is removed from the original of `target` name.
Returns:
The corresponding tf.Tensor` or a `tf.Operation`.
Raises:
ValueError: if `src_name` does not start with `src_scope`.
TypeError: if `target` is not a `tf.Tensor` or a `tf.Operation`
KeyError: If the corresponding graph element cannot be found.
"""
src_name = target.name
if src_scope:
src_scope = scope_finalize(src_scope)
if not src_name.startswidth(src_scope):
raise ValueError("{} does not start with {}".format(src_name, src_scope))
src_name = src_name[len(src_scope):]
dst_name = src_name
if dst_scope:
dst_scope = scope_finalize(dst_scope)
dst_name = dst_scope + dst_name
if isinstance(target, tf_ops.Tensor):
return dst_graph.get_tensor_by_name(dst_name)
if isinstance(target, tf_ops.Operation):
return dst_graph.get_operation_by_name(dst_name)
raise TypeError("Expected tf.Tensor or tf.Operation, got: {}", type(target))
def find_corresponding(targets, dst_graph, dst_scope="", src_scope=""):
"""Find corresponding ops/tensors in a different graph.
`targets` is a Python tree, that is, a nested structure of iterable
(list, tupple, dictionary) whose leaves are instances of
`tf.Tensor` or `tf.Operation`
Args:
targets: A Python tree containing `tf.Tensor` or `tf.Operation`
belonging to the original graph.
dst_graph: The graph in which the corresponding graph element must be found.
dst_scope: A scope which is prepended to the name to look for.
src_scope: A scope which is removed from the original of `top` name.
Returns:
A Python tree containin the corresponding tf.Tensor` or a `tf.Operation`.
Raises:
ValueError: if `src_name` does not start with `src_scope`.
TypeError: if `top` is not a `tf.Tensor` or a `tf.Operation`
KeyError: If the corresponding graph element cannot be found.
"""
def func(top):
return find_corresponding_elem(top, dst_graph, dst_scope, src_scope)
return transform_tree(targets, func)
| apache-2.0 |
robbwagoner/ansible-modules-core | files/replace.py | 22 | 5459 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Evan Kaufman <evan@digitalflophouse.com
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import tempfile
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment: files
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: true
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses multiline mode, which means C(^) and C($) match the beginning
and end respectively of I(each line) of the file.
replace:
required: false
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
validate:
required: false
description:
- validation to run before copying into place
required: false
default: None
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
"""
def write_changes(module,contents,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, dest)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
regexp=dict(required=True),
replace=dict(default='', type='str'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if not os.path.exists(dest):
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
else:
f = open(dest, 'rb')
contents = f.read()
f.close()
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
if result[1] > 0 and contents != result[0]:
msg = '%s replacements made' % result[1]
changed = True
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ZellMechanik-Dresden/dclab | dclab/rtdc_dataset/filter.py | 1 | 9287 | """RT-DC dataset core classes and methods"""
import warnings
import numpy as np
from dclab import definitions as dfn
from .. import downsampling
from ..polygon_filter import PolygonFilter
class NanWarning(UserWarning):
pass
class Filter(object):
def __init__(self, rtdc_ds):
"""Boolean filter arrays for RT-DC measurements
Parameters
----------
rtdc_ds: instance of RTDCBase
The RT-DC dataset the filter applies to
"""
# dictionary of boolean array for box filters
self._box_filters = {}
# dictionary of (hash, boolean array) for polygon filters
self._poly_filters = {}
# initialize important parameters
self._init_rtdc_ds(rtdc_ds)
# initialize properties
self.reset()
def __getitem__(self, key):
"""Return the filter for a feature in `self.features`"""
if key in self.features and dfn.scalar_feature_exists(key):
if key not in self._box_filters:
# Generate filters on-the-fly
self._box_filters[key] = np.ones(self.size, dtype=bool)
else:
raise KeyError("Feature not available: '{}'".format(key))
return self._box_filters[key]
def _init_rtdc_ds(self, rtdc_ds):
#: Available feature names
self.features = rtdc_ds.features_scalar
if hasattr(self, "size") and self.size != len(rtdc_ds):
raise ValueError("Change of RTDCBase size not supported!")
self.size = len(rtdc_ds)
# determine box filters that have been removed
for key in list(self._box_filters.keys()):
if key not in self.features:
self._box_filters.pop(key)
# determine polygon filters that have been removed
for pf_id in list(self._poly_filters.keys()):
pf = PolygonFilter.get_instance_from_id(pf_id)
if (pf_id in rtdc_ds.config["filtering"]["polygon filters"]
and pf.axes[0] in self.features
and pf.axes[1] in self.features):
pass
else:
# filter has been removed
self._poly_filters.pop(pf_id)
def reset(self):
"""Reset all filters"""
self._box_filters = {}
self._poly_filters = {}
#: All filters combined (see :func:`Filter.update`);
#: Use this property to filter the features of
#: :class:`dclab.rtdc_dataset.RTDCBase` instances
self.all = np.ones(self.size, dtype=bool)
#: All box filters
self.box = np.ones(self.size, dtype=bool)
#: Invalid (nan/inf) events
self.invalid = np.ones(self.size, dtype=bool)
#: 1D boolean array for manually excluding events; `False` values
#: are excluded.
self.manual = np.ones(self.size, dtype=bool)
#: Polygon filters
self.polygon = np.ones(self.size, dtype=bool)
# old filter configuration of `rtdc_ds`
self._old_config = {}
def update(self, rtdc_ds, force=[]):
"""Update the filters according to `rtdc_ds.config["filtering"]`
Parameters
----------
rtdc_ds: dclab.rtdc_dataset.core.RTDCBase
The measurement to which the filter is applied
force : list
A list of feature names that must be refiltered with
min/max values.
Notes
-----
This function is called when
:func:`ds.apply_filter <dclab.rtdc_dataset.RTDCBase.apply_filter>`
is called.
"""
# re-initialize important parameters
self._init_rtdc_ds(rtdc_ds)
# These lists may help us become very fast in the future
newkeys = []
oldvals = []
newvals = []
cfg_cur = rtdc_ds.config["filtering"]
cfg_old = self._old_config
# Determine which data was updated
for skey in list(cfg_cur.keys()):
if skey not in cfg_old:
cfg_old[skey] = None
if cfg_cur[skey] != cfg_old[skey]:
newkeys.append(skey)
oldvals.append(cfg_old[skey])
newvals.append(cfg_cur[skey])
# 1. Invalid filters
self.invalid[:] = True
if cfg_cur["remove invalid events"]:
for feat in self.features:
data = rtdc_ds[feat]
invalid = np.isinf(data) | np.isnan(data)
self.invalid &= ~invalid
# 2. Filter all feature min/max values.
feat2filter = []
for k in newkeys:
# k[:-4] because we want to crop " min" and " max"
if (dfn.scalar_feature_exists(k[:-4])
and (k.endswith(" min") or k.endswith(" max"))):
feat2filter.append(k[:-4])
for f in force:
# add forced features
if dfn.scalar_feature_exists(f):
feat2filter.append(f)
else:
# Make sure the feature name is valid.
raise ValueError("Unknown scalar feature name '{}'!".format(f))
feat2filter = np.unique(feat2filter)
for feat in feat2filter:
fstart = feat + " min"
fend = feat + " max"
must_be_filtered = (fstart in cfg_cur
and fend in cfg_cur
and cfg_cur[fstart] != cfg_cur[fend])
if ((fstart in cfg_cur and fend not in cfg_cur)
or (fstart not in cfg_cur and fend in cfg_cur)):
# User is responsible for setting min and max values!
raise ValueError("Box filter: Please make sure that both "
"'{}' and '{}' are set!".format(fstart, fend))
if feat in self.features:
# Get the current feature filter
feat_filt = self[feat]
feat_filt[:] = True
# If min and max exist and if they are not identical:
if must_be_filtered:
ivalstart = cfg_cur[fstart]
ivalend = cfg_cur[fend]
if ivalstart > ivalend:
msg = "inverting filter: {} > {}".format(fstart, fend)
warnings.warn(msg)
ivalstart, ivalend = ivalend, ivalstart
data = rtdc_ds[feat]
# treat nan-values in a special way
disnan = np.isnan(data)
if np.sum(disnan):
# this avoids RuntimeWarnings (invalid value
# encountered due to nan-values)
feat_filt[disnan] = False
idx = ~disnan
if not cfg_cur["remove invalid events"]:
msg = "Feature '{}' contains ".format(feat) \
+ "nan-values! Box filters remove those."
warnings.warn(msg, NanWarning)
else:
idx = slice(0, len(self.all)) # place-holder for [:]
feat_filt[idx] &= ivalstart <= data[idx]
feat_filt[idx] &= data[idx] <= ivalend
elif must_be_filtered:
warnings.warn("Dataset '{}' does ".format(rtdc_ds.identifier)
+ "not contain the feature '{}'! ".format(feat)
+ "A box filter has been ignored.")
# store box filters
self.box[:] = True
for feat in self._box_filters:
self.box &= self._box_filters[feat]
# 3. Filter with polygon filters
# check if something has changed
# perform polygon filtering
for pf_id in cfg_cur["polygon filters"]:
pf = PolygonFilter.get_instance_from_id(pf_id)
if (pf_id not in self._poly_filters
or pf.hash != self._poly_filters[pf_id][0]):
datax = rtdc_ds[pf.axes[0]]
datay = rtdc_ds[pf.axes[1]]
self._poly_filters[pf_id] = (pf.hash, pf.filter(datax, datay))
# store polygon filters
self.polygon[:] = True
for pf_id in self._poly_filters:
self.polygon &= self._poly_filters[pf_id][1]
# 4. Finally combine all filters and apply "limit events"
# get a list of all filters
if cfg_cur["enable filters"]:
self.all[:] = self.invalid & self.manual & self.polygon & self.box
# Filter with configuration keyword argument "limit events".
# This additional step limits the total number of events in
# self.all.
if cfg_cur["limit events"] > 0:
limit = cfg_cur["limit events"]
sub = self.all[self.all]
_f, idx = downsampling.downsample_rand(sub,
samples=limit,
ret_idx=True)
sub[~idx] = False
self.all[self.all] = sub
else:
self.all[:] = True
# Actual filtering is then done during plotting
self._old_config = rtdc_ds.config.copy()["filtering"]
| gpl-2.0 |
imply/chuu | third_party/closure_linter/closure_linter/common/simplefileflags.py | 285 | 5107 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Determines the list of files to be checked from command line arguments."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import glob
import os
import re
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_multistring(
'recurse',
None,
'Recurse in to the subdirectories of the given path',
short_name='r')
flags.DEFINE_list(
'exclude_directories',
('_demos'),
'Exclude the specified directories (only applicable along with -r or '
'--presubmit)',
short_name='e')
flags.DEFINE_list(
'exclude_files',
('deps.js'),
'Exclude the specified files',
short_name='x')
def MatchesSuffixes(filename, suffixes):
"""Returns whether the given filename matches one of the given suffixes.
Args:
filename: Filename to check.
suffixes: Sequence of suffixes to check.
Returns:
Whether the given filename matches one of the given suffixes.
"""
suffix = filename[filename.rfind('.'):]
return suffix in suffixes
def _GetUserSpecifiedFiles(argv, suffixes):
"""Returns files to be linted, specified directly on the command line.
Can handle the '*' wildcard in filenames, but no other wildcards.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type being checked.
Returns:
A sequence of files to be linted.
"""
files = argv[1:] or []
all_files = []
lint_files = []
# Perform any necessary globs.
for f in files:
if f.find('*') != -1:
for result in glob.glob(f):
all_files.append(result)
else:
all_files.append(f)
for f in all_files:
if MatchesSuffixes(f, suffixes):
lint_files.append(f)
return lint_files
def _GetRecursiveFiles(suffixes):
"""Returns files to be checked specified by the --recurse flag.
Args:
suffixes: Expected suffixes for the file type being checked.
Returns:
A list of files to be checked.
"""
lint_files = []
# Perform any request recursion
if FLAGS.recurse:
for start in FLAGS.recurse:
for root, subdirs, files in os.walk(start):
for f in files:
if MatchesSuffixes(f, suffixes):
lint_files.append(os.path.join(root, f))
return lint_files
def GetAllSpecifiedFiles(argv, suffixes):
"""Returns all files specified by the user on the commandline.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type
Returns:
A list of all files specified directly or indirectly (via flags) on the
command line by the user.
"""
files = _GetUserSpecifiedFiles(argv, suffixes)
if FLAGS.recurse:
files += _GetRecursiveFiles(suffixes)
return FilterFiles(files)
def FilterFiles(files):
"""Filters the list of files to be linted be removing any excluded files.
Filters out files excluded using --exclude_files and --exclude_directories.
Args:
files: Sequence of files that needs filtering.
Returns:
Filtered list of files to be linted.
"""
num_files = len(files)
ignore_dirs_regexs = []
for ignore in FLAGS.exclude_directories:
ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
result_files = []
for f in files:
add_file = True
for exclude in FLAGS.exclude_files:
if f.endswith('/' + exclude) or f == exclude:
add_file = False
break
for ignore in ignore_dirs_regexs:
if ignore.search(f):
# Break out of ignore loop so we don't add to
# filtered files.
add_file = False
break
if add_file:
# Convert everything to absolute paths so we can easily remove duplicates
# using a set.
result_files.append(os.path.abspath(f))
skipped = num_files - len(result_files)
if skipped:
print 'Skipping %d file(s).' % skipped
return set(result_files)
def GetFileList(argv, file_type, suffixes):
"""Parse the flags and return the list of files to check.
Args:
argv: Sequence of command line arguments.
suffixes: Sequence of acceptable suffixes for the file type.
Returns:
The list of files to check.
"""
return sorted(GetAllSpecifiedFiles(argv, suffixes))
def IsEmptyArgumentList(argv):
return not (len(argv[1:]) or FLAGS.recurse)
| bsd-3-clause |
Rapptz/discord.py | discord/guild.py | 1 | 94374 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import copy
from typing import (
Any,
ClassVar,
Dict,
List,
NamedTuple,
Sequence,
Set,
Literal,
Optional,
TYPE_CHECKING,
Tuple,
Union,
overload,
)
from . import utils, abc
from .role import Role
from .member import Member, VoiceState
from .emoji import Emoji
from .errors import InvalidData
from .permissions import PermissionOverwrite
from .colour import Colour
from .errors import InvalidArgument, ClientException
from .channel import *
from .channel import _guild_channel_factory
from .enums import (
AuditLogAction,
VideoQualityMode,
VoiceRegion,
ChannelType,
try_enum,
VerificationLevel,
ContentFilter,
NotificationLevel,
NSFWLevel,
)
from .mixins import Hashable
from .user import User
from .invite import Invite
from .iterators import AuditLogIterator, MemberIterator
from .widget import Widget
from .asset import Asset
from .flags import SystemChannelFlags
from .integrations import Integration, _integration_factory
from .stage_instance import StageInstance
from .threads import Thread
__all__ = (
'Guild',
)
MISSING = utils.MISSING
if TYPE_CHECKING:
from .abc import Snowflake, SnowflakeTime
from .types.guild import Ban as BanPayload, Guild as GuildPayload, MFALevel
from .types.threads import (
Thread as ThreadPayload,
)
from .types.voice import GuildVoiceState
from .permissions import Permissions
from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel
from .template import Template
from .webhook import Webhook
from .state import ConnectionState
from .voice_client import VoiceProtocol
import datetime
VocalGuildChannel = Union[VoiceChannel, StageChannel]
GuildChannel = Union[VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel]
ByCategoryItem = Tuple[Optional[CategoryChannel], List[GuildChannel]]
class BanEntry(NamedTuple):
reason: Optional[str]
user: User
class _GuildLimit(NamedTuple):
emoji: int
bitrate: float
filesize: int
class Guild(Hashable):
"""Represents a Discord guild.
This is referred to as a "server" in the official Discord UI.
.. container:: operations
.. describe:: x == y
Checks if two guilds are equal.
.. describe:: x != y
Checks if two guilds are not equal.
.. describe:: hash(x)
Returns the guild's hash.
.. describe:: str(x)
Returns the guild's name.
Attributes
----------
name: :class:`str`
The guild name.
emojis: Tuple[:class:`Emoji`, ...]
All emojis that the guild owns.
region: :class:`VoiceRegion`
The region the guild belongs on. There is a chance that the region
will be a :class:`str` if the value is not recognised by the enumerator.
afk_timeout: :class:`int`
The timeout to get sent to the AFK channel.
afk_channel: Optional[:class:`VoiceChannel`]
The channel that denotes the AFK channel. ``None`` if it doesn't exist.
id: :class:`int`
The guild's ID.
owner_id: :class:`int`
The guild owner's ID. Use :attr:`Guild.owner` instead.
unavailable: :class:`bool`
Indicates if the guild is unavailable. If this is ``True`` then the
reliability of other attributes outside of :attr:`Guild.id` is slim and they might
all be ``None``. It is best to not do anything with the guild if it is unavailable.
Check the :func:`on_guild_unavailable` and :func:`on_guild_available` events.
max_presences: Optional[:class:`int`]
The maximum amount of presences for the guild.
max_members: Optional[:class:`int`]
The maximum amount of members for the guild.
.. note::
This attribute is only available via :meth:`.Client.fetch_guild`.
max_video_channel_users: Optional[:class:`int`]
The maximum amount of users in a video channel.
.. versionadded:: 1.4
description: Optional[:class:`str`]
The guild's description.
mfa_level: :class:`int`
Indicates the guild's two factor authorisation level. If this value is 0 then
the guild does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The guild's verification level.
explicit_content_filter: :class:`ContentFilter`
The guild's explicit content filter.
default_notifications: :class:`NotificationLevel`
The guild's notification settings.
features: List[:class:`str`]
A list of features that the guild has. They are currently as follows:
- ``VIP_REGIONS``: Guild has VIP voice regions
- ``VANITY_URL``: Guild can have a vanity invite URL (e.g. discord.gg/discord-api)
- ``INVITE_SPLASH``: Guild's invite page can have a special splash.
- ``VERIFIED``: Guild is a verified server.
- ``PARTNERED``: Guild is a partnered server.
- ``MORE_EMOJI``: Guild is allowed to have more than 50 custom emoji.
- ``DISCOVERABLE``: Guild shows up in Server Discovery.
- ``FEATURABLE``: Guild is able to be featured in Server Discovery.
- ``COMMUNITY``: Guild is a community server.
- ``COMMERCE``: Guild can sell things using store channels.
- ``PUBLIC``: Guild is a public guild.
- ``NEWS``: Guild can create news channels.
- ``BANNER``: Guild can upload and use a banner. (i.e. :attr:`.banner`)
- ``ANIMATED_ICON``: Guild can upload an animated icon.
- ``PUBLIC_DISABLED``: Guild cannot be public.
- ``WELCOME_SCREEN_ENABLED``: Guild has enabled the welcome screen
- ``MEMBER_VERIFICATION_GATE_ENABLED``: Guild has Membership Screening enabled.
- ``PREVIEW_ENABLED``: Guild can be viewed before being accepted via Membership Screening.
premium_tier: :class:`int`
The premium tier for this guild. Corresponds to "Nitro Server" in the official UI.
The number goes from 0 to 3 inclusive.
premium_subscription_count: :class:`int`
The number of "boosts" this guild currently has.
preferred_locale: Optional[:class:`str`]
The preferred locale for the guild. Used when filtering Server Discovery
results to a specific language.
nsfw_level: :class:`NSFWLevel`
The guild's NSFW level.
.. versionadded:: 2.0
"""
__slots__ = (
'afk_timeout',
'afk_channel',
'name',
'id',
'unavailable',
'region',
'owner_id',
'mfa_level',
'emojis',
'features',
'verification_level',
'explicit_content_filter',
'default_notifications',
'description',
'max_presences',
'max_members',
'max_video_channel_users',
'premium_tier',
'premium_subscription_count',
'preferred_locale',
'nsfw_level',
'_members',
'_channels',
'_icon',
'_banner',
'_state',
'_roles',
'_member_count',
'_large',
'_splash',
'_voice_states',
'_system_channel_id',
'_system_channel_flags',
'_discovery_splash',
'_rules_channel_id',
'_public_updates_channel_id',
'_stage_instances',
'_threads',
)
_PREMIUM_GUILD_LIMITS: ClassVar[Dict[Optional[int], _GuildLimit]] = {
None: _GuildLimit(emoji=50, bitrate=96e3, filesize=8388608),
0: _GuildLimit(emoji=50, bitrate=96e3, filesize=8388608),
1: _GuildLimit(emoji=100, bitrate=128e3, filesize=8388608),
2: _GuildLimit(emoji=150, bitrate=256e3, filesize=52428800),
3: _GuildLimit(emoji=250, bitrate=384e3, filesize=104857600),
}
def __init__(self, *, data: GuildPayload, state: ConnectionState):
self._channels: Dict[int, GuildChannel] = {}
self._members: Dict[int, Member] = {}
self._voice_states: Dict[int, VoiceState] = {}
self._threads: Dict[int, Thread] = {}
self._state: ConnectionState = state
self._from_data(data)
def _add_channel(self, channel: GuildChannel, /) -> None:
self._channels[channel.id] = channel
def _remove_channel(self, channel: Snowflake, /) -> None:
self._channels.pop(channel.id, None)
def _voice_state_for(self, user_id: int, /) -> Optional[VoiceState]:
return self._voice_states.get(user_id)
def _add_member(self, member: Member, /) -> None:
self._members[member.id] = member
def _store_thread(self, payload: ThreadPayload, /) -> Thread:
thread = Thread(guild=self, state=self._state, data=payload)
self._threads[thread.id] = thread
return thread
def _remove_member(self, member: Snowflake, /) -> None:
self._members.pop(member.id, None)
def _add_thread(self, thread: Thread, /) -> None:
self._threads[thread.id] = thread
def _remove_thread(self, thread: Snowflake, /) -> None:
self._threads.pop(thread.id, None)
def _clear_threads(self) -> None:
self._threads.clear()
def _remove_threads_by_channel(self, channel_id: int) -> None:
to_remove = [k for k, t in self._threads.items() if t.parent_id == channel_id]
for k in to_remove:
del self._threads[k]
def _filter_threads(self, channel_ids: Set[int]) -> Dict[int, Thread]:
to_remove: Dict[int, Thread] = {k: t for k, t in self._threads.items() if t.parent_id in channel_ids}
for k in to_remove:
del self._threads[k]
return to_remove
def __str__(self) -> str:
return self.name or ''
def __repr__(self) -> str:
attrs = (
('id', self.id),
('name', self.name),
('shard_id', self.shard_id),
('chunked', self.chunked),
('member_count', getattr(self, '_member_count', None)),
)
inner = ' '.join('%s=%r' % t for t in attrs)
return f'<Guild {inner}>'
def _update_voice_state(self, data: GuildVoiceState, channel_id: int) -> Tuple[Optional[Member], VoiceState, VoiceState]:
user_id = int(data['user_id'])
channel = self.get_channel(channel_id)
try:
# check if we should remove the voice state from cache
if channel is None:
after = self._voice_states.pop(user_id)
else:
after = self._voice_states[user_id]
before = copy.copy(after)
after._update(data, channel)
except KeyError:
# if we're here then we're getting added into the cache
after = VoiceState(data=data, channel=channel)
before = VoiceState(data=data, channel=None)
self._voice_states[user_id] = after
member = self.get_member(user_id)
if member is None:
try:
member = Member(data=data['member'], state=self._state, guild=self)
except KeyError:
member = None
return member, before, after
def _add_role(self, role: Role, /) -> None:
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self._roles.values():
r.position += not r.is_default()
self._roles[role.id] = role
def _remove_role(self, role_id: int, /) -> Role:
# this raises KeyError if it fails..
role = self._roles.pop(role_id)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self._roles.values():
r.position -= r.position > role.position
return role
def _from_data(self, guild: GuildPayload) -> None:
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the guild.
member_count = guild.get('member_count', None)
if member_count is not None:
self._member_count: int = member_count
self.name: str = guild.get('name')
self.region: VoiceRegion = try_enum(VoiceRegion, guild.get('region'))
self.verification_level: VerificationLevel = try_enum(VerificationLevel, guild.get('verification_level'))
self.default_notifications: NotificationLevel = try_enum(
NotificationLevel, guild.get('default_message_notifications')
)
self.explicit_content_filter: ContentFilter = try_enum(ContentFilter, guild.get('explicit_content_filter', 0))
self.afk_timeout: int = guild.get('afk_timeout')
self._icon: Optional[str] = guild.get('icon')
self._banner: Optional[str] = guild.get('banner')
self.unavailable: bool = guild.get('unavailable', False)
self.id: int = int(guild['id'])
self._roles: Dict[int, Role] = {}
state = self._state # speed up attribute access
for r in guild.get('roles', []):
role = Role(guild=self, data=r, state=state)
self._roles[role.id] = role
self.mfa_level: MFALevel = guild.get('mfa_level')
self.emojis: Tuple[Emoji, ...] = tuple(map(lambda d: state.store_emoji(self, d), guild.get('emojis', [])))
self.features: List[str] = guild.get('features', [])
self._splash: Optional[str] = guild.get('splash')
self._system_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'system_channel_id')
self.description: Optional[str] = guild.get('description')
self.max_presences: Optional[int] = guild.get('max_presences')
self.max_members: Optional[int] = guild.get('max_members')
self.max_video_channel_users: Optional[int] = guild.get('max_video_channel_users')
self.premium_tier: int = guild.get('premium_tier', 0)
self.premium_subscription_count: int = guild.get('premium_subscription_count') or 0
self._system_channel_flags: int = guild.get('system_channel_flags', 0)
self.preferred_locale: Optional[str] = guild.get('preferred_locale')
self._discovery_splash: Optional[str] = guild.get('discovery_splash')
self._rules_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'rules_channel_id')
self._public_updates_channel_id: Optional[int] = utils._get_as_snowflake(guild, 'public_updates_channel_id')
self.nsfw_level: NSFWLevel = try_enum(NSFWLevel, guild.get('nsfw_level', 0))
self._stage_instances: Dict[int, StageInstance] = {}
for s in guild.get('stage_instances', []):
stage_instance = StageInstance(guild=self, data=s, state=state)
self._stage_instances[stage_instance.id] = stage_instance
cache_joined = self._state.member_cache_flags.joined
self_id = self._state.self_id
for mdata in guild.get('members', []):
member = Member(data=mdata, guild=self, state=state)
if cache_joined or member.id == self_id:
self._add_member(member)
self._sync(guild)
self._large: Optional[bool] = None if member_count is None else self._member_count >= 250
self.owner_id: Optional[int] = utils._get_as_snowflake(guild, 'owner_id')
self.afk_channel: Optional[VocalGuildChannel] = self.get_channel(utils._get_as_snowflake(guild, 'afk_channel_id')) # type: ignore
for obj in guild.get('voice_states', []):
self._update_voice_state(obj, int(obj['channel_id']))
# TODO: refactor/remove?
def _sync(self, data: GuildPayload) -> None:
try:
self._large = data['large']
except KeyError:
pass
empty_tuple = tuple()
for presence in data.get('presences', []):
user_id = int(presence['user']['id'])
member = self.get_member(user_id)
if member is not None:
member._presence_update(presence, empty_tuple) # type: ignore
if 'channels' in data:
channels = data['channels']
for c in channels:
factory, ch_type = _guild_channel_factory(c['type'])
if factory:
self._add_channel(factory(guild=self, data=c, state=self._state)) # type: ignore
if 'threads' in data:
threads = data['threads']
for thread in threads:
self._add_thread(Thread(guild=self, state=self._state, data=thread))
@property
def channels(self) -> List[GuildChannel]:
"""List[:class:`abc.GuildChannel`]: A list of channels that belongs to this guild."""
return list(self._channels.values())
@property
def threads(self) -> List[Thread]:
"""List[:class:`Thread`]: A list of threads that you have permission to view.
.. versionadded:: 2.0
"""
return list(self._threads.values())
@property
def large(self) -> bool:
""":class:`bool`: Indicates if the guild is a 'large' guild.
A large guild is defined as having more than ``large_threshold`` count
members, which for this library is set to the maximum of 250.
"""
if self._large is None:
try:
return self._member_count >= 250
except AttributeError:
return len(self._members) >= 250
return self._large
@property
def voice_channels(self) -> List[VoiceChannel]:
"""List[:class:`VoiceChannel`]: A list of voice channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, VoiceChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def stage_channels(self) -> List[StageChannel]:
"""List[:class:`StageChannel`]: A list of stage channels that belongs to this guild.
.. versionadded:: 1.7
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, StageChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def me(self) -> Member:
""":class:`Member`: Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
"""
self_id = self._state.user.id
# The self member is *always* cached
return self.get_member(self_id) # type: ignore
@property
def voice_client(self) -> Optional[VoiceProtocol]:
"""Optional[:class:`VoiceProtocol`]: Returns the :class:`VoiceProtocol` associated with this guild, if any."""
return self._state._get_voice_client(self.id)
@property
def text_channels(self) -> List[TextChannel]:
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
@property
def categories(self) -> List[CategoryChannel]:
"""List[:class:`CategoryChannel`]: A list of categories that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, CategoryChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r
def by_category(self) -> List[ByCategoryItem]:
"""Returns every :class:`CategoryChannel` and their associated channels.
These channels and categories are sorted in the official Discord UI order.
If the channels do not have a category, then the first element of the tuple is
``None``.
Returns
--------
List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]:
The categories and their associated channels.
"""
grouped: Dict[Optional[int], List[GuildChannel]] = {}
for channel in self._channels.values():
if isinstance(channel, CategoryChannel):
grouped.setdefault(channel.id, [])
continue
try:
grouped[channel.category_id].append(channel)
except KeyError:
grouped[channel.category_id] = [channel]
def key(t: ByCategoryItem) -> Tuple[Tuple[int, int], List[GuildChannel]]:
k, v = t
return ((k.position, k.id) if k else (-1, -1), v)
_get = self._channels.get
as_list: List[ByCategoryItem] = [(_get(k), v) for k, v in grouped.items()] # type: ignore
as_list.sort(key=key)
for _, channels in as_list:
channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id))
return as_list
def _resolve_channel(self, id: Optional[int], /) -> Optional[Union[GuildChannel, Thread]]:
if id is None:
return
return self._channels.get(id) or self._threads.get(id)
def get_channel(self, channel_id: int, /) -> Optional[GuildChannel]:
"""Returns a channel with the given ID.
.. note::
This does *not* search for threads.
Parameters
-----------
channel_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.abc.GuildChannel`]
The returned channel or ``None`` if not found.
"""
return self._channels.get(channel_id)
def get_thread(self, thread_id: int, /) -> Optional[Thread]:
"""Returns a thread with the given ID.
.. versionadded:: 2.0
Parameters
-----------
thread_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Thread`]
The returned thread or ``None`` if not found.
"""
return self._threads.get(thread_id)
@property
def system_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Returns the guild's channel used for system messages.
If no channel is set, then this returns ``None``.
"""
channel_id = self._system_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def system_channel_flags(self) -> SystemChannelFlags:
""":class:`SystemChannelFlags`: Returns the guild's system channel settings."""
return SystemChannelFlags._from_value(self._system_channel_flags)
@property
def rules_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel used for the rules.
The guild must be a Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.3
"""
channel_id = self._rules_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def public_updates_channel(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: Return's the guild's channel where admins and
moderators of the guilds receive notices from Discord. The guild must be a
Community guild.
If no channel is set, then this returns ``None``.
.. versionadded:: 1.4
"""
channel_id = self._public_updates_channel_id
return channel_id and self._channels.get(channel_id) # type: ignore
@property
def emoji_limit(self) -> int:
""":class:`int`: The maximum number of emoji slots this guild has."""
more_emoji = 200 if 'MORE_EMOJI' in self.features else 50
return max(more_emoji, self._PREMIUM_GUILD_LIMITS[self.premium_tier].emoji)
@property
def bitrate_limit(self) -> float:
""":class:`float`: The maximum bitrate for voice channels this guild can have."""
vip_guild = self._PREMIUM_GUILD_LIMITS[1].bitrate if 'VIP_REGIONS' in self.features else 96e3
return max(vip_guild, self._PREMIUM_GUILD_LIMITS[self.premium_tier].bitrate)
@property
def filesize_limit(self) -> int:
""":class:`int`: The maximum number of bytes files can have when uploaded to this guild."""
return self._PREMIUM_GUILD_LIMITS[self.premium_tier].filesize
@property
def members(self) -> List[Member]:
"""List[:class:`Member`]: A list of members that belong to this guild."""
return list(self._members.values())
def get_member(self, user_id: int) -> Optional[Member]:
"""Returns a member with the given ID.
Parameters
-----------
user_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Member`]
The member or ``None`` if not found.
"""
return self._members.get(user_id)
@property
def premium_subscribers(self) -> List[Member]:
"""List[:class:`Member`]: A list of members who have "boosted" this guild."""
return [member for member in self.members if member.premium_since is not None]
@property
def roles(self) -> List[Role]:
"""List[:class:`Role`]: Returns a :class:`list` of the guild's roles in hierarchy order.
The first element of this list will be the lowest role in the
hierarchy.
"""
return sorted(self._roles.values())
def get_role(self, role_id: int, /) -> Optional[Role]:
"""Returns a role with the given ID.
Parameters
-----------
role_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`Role`]
The role or ``None`` if not found.
"""
return self._roles.get(role_id)
@property
def default_role(self) -> Role:
""":class:`Role`: Gets the @everyone role that all members have by default."""
# The @everyone role is *always* given
return self.get_role(self.id) # type: ignore
@property
def premium_subscriber_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the premium subscriber role, AKA "boost" role, in this guild.
.. versionadded:: 1.6
"""
for role in self._roles.values():
if role.is_premium_subscriber():
return role
return None
@property
def self_role(self) -> Optional[Role]:
"""Optional[:class:`Role`]: Gets the role associated with this client's user, if any.
.. versionadded:: 1.6
"""
self_id = self._state.self_id
for role in self._roles.values():
tags = role.tags
if tags and tags.bot_id == self_id:
return role
return None
@property
def stage_instances(self) -> List[StageInstance]:
"""List[:class:`StageInstance`]: Returns a :class:`list` of the guild's stage instances that
are currently running.
.. versionadded:: 2.0
"""
return list(self._stage_instances.values())
def get_stage_instance(self, stage_instance_id: int, /) -> Optional[StageInstance]:
"""Returns a stage instance with the given ID.
.. versionadded:: 2.0
Parameters
-----------
stage_instance_id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`StageInstance`]
The stage instance or ``None`` if not found.
"""
return self._stage_instances.get(stage_instance_id)
@property
def owner(self) -> Optional[Member]:
"""Optional[:class:`Member`]: The member that owns the guild."""
return self.get_member(self.owner_id) # type: ignore
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(self._state, self.id, self._banner, path='banners')
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._splash, path='splashes')
@property
def discovery_splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's discovery splash asset, if available."""
if self._discovery_splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._discovery_splash, path='discovery-splashes')
@property
def member_count(self) -> int:
""":class:`int`: Returns the true member count regardless of it being loaded fully or not.
.. warning::
Due to a Discord limitation, in order for this attribute to remain up-to-date and
accurate, it requires :attr:`Intents.members` to be specified.
"""
return self._member_count
@property
def chunked(self) -> bool:
""":class:`bool`: Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members)
@property
def shard_id(self) -> int:
""":class:`int`: Returns the shard ID for this guild if applicable."""
count = self._state.shard_count
if count is None:
return 0
return (self.id >> 22) % count
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return utils.snowflake_time(self.id)
def get_member_named(self, name: str, /) -> Optional[Member]:
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
Optional[:class:`Member`]
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m: Member) -> bool:
return m.nick == name or m.name == name
return utils.find(pred, members)
def _create_channel(
self,
name: str,
channel_type: ChannelType,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[Snowflake] = None,
**options: Any,
):
if overwrites is MISSING:
overwrites = {}
elif not isinstance(overwrites, dict):
raise InvalidArgument('overwrites parameter expects a dict.')
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise InvalidArgument(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {'allow': allow.value, 'deny': deny.value, 'id': target.id}
if isinstance(target, Role):
payload['type'] = abc._Overwrites.ROLE
else:
payload['type'] = abc._Overwrites.MEMBER
perms.append(payload)
parent_id = category.id if category else None
return self._state.http.create_channel(
self.id, channel_type.value, name=name, parent_id=parent_id, permission_overwrites=perms, **options
)
async def create_text_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
topic: str = MISSING,
slowmode_delay: int = MISSING,
nsfw: bool = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> TextChannel:
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: :class:`str`
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
The maximum value possible is `21600`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if topic is not MISSING:
options['topic'] = topic
if slowmode_delay is not MISSING:
options['rate_limit_per_user'] = slowmode_delay
if nsfw is not MISSING:
options['nsfw'] = nsfw
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.text, category=category, reason=reason, **options
)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_voice_channel(
self,
name: str,
*,
reason: Optional[str] = None,
category: Optional[CategoryChannel] = None,
position: int = MISSING,
bitrate: int = MISSING,
user_limit: int = MISSING,
rtc_region: Optional[VoiceRegion] = MISSING,
video_quality_mode: VideoQualityMode = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
) -> VoiceChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`VoiceChannel` instead.
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
video_quality_mode: :class:`VideoQualityMode`
The camera video quality for the voice channel's participants.
.. versionadded:: 2.0
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
options = {}
if position is not MISSING:
options['position'] = position
if bitrate is not MISSING:
options['bitrate'] = bitrate
if user_limit is not MISSING:
options['user_limit'] = user_limit
if rtc_region is not MISSING:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
if video_quality_mode is not MISSING:
options['video_quality_mode'] = video_quality_mode.value
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.voice, category=category, reason=reason, **options
)
channel = VoiceChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_stage_channel(
self,
name: str,
*,
topic: str,
position: int = MISSING,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
category: Optional[CategoryChannel] = None,
reason: Optional[str] = None,
) -> StageChannel:
"""|coro|
This is similar to :meth:`create_text_channel` except makes a :class:`StageChannel` instead.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The channel's name.
topic: :class:`str`
The new channel's topic.
overwrites: Dict[Union[:class:`Role`, :class:`Member`], :class:`PermissionOverwrite`]
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`StageChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {
'topic': topic,
}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.stage_voice, category=category, reason=reason, **options
)
channel = StageChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
async def create_category(
self,
name: str,
*,
overwrites: Dict[Union[Role, Member], PermissionOverwrite] = MISSING,
reason: Optional[str] = None,
position: int = MISSING,
) -> CategoryChannel:
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
Raises
------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`CategoryChannel`
The channel that was just created.
"""
options: Dict[str, Any] = {}
if position is not MISSING:
options['position'] = position
data = await self._create_channel(
name, overwrites=overwrites, channel_type=ChannelType.category, reason=reason, **options
)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel
create_category_channel = create_category
async def leave(self) -> None:
"""|coro|
Leaves the guild.
.. note::
You cannot leave the guild that you own, you must delete it instead
via :meth:`delete`.
Raises
--------
HTTPException
Leaving the guild failed.
"""
await self._state.http.leave_guild(self.id)
async def delete(self) -> None:
"""|coro|
Deletes the guild. You must be the guild owner to delete the
guild.
Raises
--------
HTTPException
Deleting the guild failed.
Forbidden
You do not have permissions to delete the guild.
"""
await self._state.http.delete_guild(self.id)
async def edit(
self,
*,
reason: Optional[str] = MISSING,
name: str = MISSING,
description: Optional[str] = MISSING,
icon: Optional[bytes] = MISSING,
banner: Optional[bytes] = MISSING,
splash: Optional[bytes] = MISSING,
discovery_splash: Optional[bytes] = MISSING,
community: bool = MISSING,
region: Optional[Union[str, VoiceRegion]] = MISSING,
afk_channel: Optional[VoiceChannel] = MISSING,
owner: Snowflake = MISSING,
afk_timeout: int = MISSING,
default_notifications: NotificationLevel = MISSING,
verification_level: VerificationLevel = MISSING,
explicit_content_filter: ContentFilter = MISSING,
vanity_code: str = MISSING,
system_channel: Optional[TextChannel] = MISSING,
system_channel_flags: SystemChannelFlags = MISSING,
preferred_locale: str = MISSING,
rules_channel: Optional[TextChannel] = MISSING,
public_updates_channel: Optional[TextChannel] = MISSING,
) -> None:
r"""|coro|
Edits the guild.
You must have the :attr:`~Permissions.manage_guild` permission
to edit the guild.
.. versionchanged:: 1.4
The `rules_channel` and `public_updates_channel` keyword-only parameters were added.
.. versionchanged:: 2.0
The `discovery_splash` and `community` keyword-only parameters were added.
Parameters
----------
name: :class:`str`
The new name of the guild.
description: Optional[:class:`str`]
The new description of the guild. Could be ``None`` for no description.
This is only available to guilds that contain ``PUBLIC`` in :attr:`Guild.features`.
icon: :class:`bytes`
A :term:`py:bytes-like object` representing the icon. Only PNG/JPEG is supported.
GIF is only available to guilds that contain ``ANIMATED_ICON`` in :attr:`Guild.features`.
Could be ``None`` to denote removal of the icon.
banner: :class:`bytes`
A :term:`py:bytes-like object` representing the banner.
Could be ``None`` to denote removal of the banner. This is only available to guilds that contain
``BANNER`` in :attr:`Guild.features`.
splash: :class:`bytes`
A :term:`py:bytes-like object` representing the invite splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``INVITE_SPLASH``
in :attr:`Guild.features`.
discovery_splash: :class:`bytes`
A :term:`py:bytes-like object` representing the discovery splash.
Only PNG/JPEG supported. Could be ``None`` to denote removing the
splash. This is only available to guilds that contain ``DISCOVERABLE``
in :attr:`Guild.features`.
community: :class:`bool`
Whether the guild should be a Community guild. If set to ``True``\, both ``rules_channel``
and ``public_updates_channel`` parameters are required.
region: Union[:class:`str`, :class:`VoiceRegion`]
The new region for the guild's voice communication.
afk_channel: Optional[:class:`VoiceChannel`]
The new channel that is the AFK channel. Could be ``None`` for no AFK channel.
afk_timeout: :class:`int`
The number of seconds until someone is moved to the AFK channel.
owner: :class:`Member`
The new owner of the guild to transfer ownership to. Note that you must
be owner of the guild to do this.
verification_level: :class:`VerificationLevel`
The new verification level for the guild.
default_notifications: :class:`NotificationLevel`
The new default notification level for the guild.
explicit_content_filter: :class:`ContentFilter`
The new explicit content filter for the guild.
vanity_code: :class:`str`
The new vanity code for the guild.
system_channel: Optional[:class:`TextChannel`]
The new channel that is used for the system channel. Could be ``None`` for no system channel.
system_channel_flags: :class:`SystemChannelFlags`
The new system channel settings to use with the new system channel.
preferred_locale: :class:`str`
The new preferred locale for the guild. Used as the primary language in the guild.
If set, this must be an ISO 639 code, e.g. ``en-US`` or ``ja`` or ``zh-CN``.
rules_channel: Optional[:class:`TextChannel`]
The new channel that is used for rules. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no rules
channel.
public_updates_channel: Optional[:class:`TextChannel`]
The new channel that is used for public updates from Discord. This is only available to
guilds that contain ``PUBLIC`` in :attr:`Guild.features`. Could be ``None`` for no
public updates channel.
reason: Optional[:class:`str`]
The reason for editing this guild. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to edit the guild.
HTTPException
Editing the guild failed.
InvalidArgument
The image format passed in to ``icon`` is invalid. It must be
PNG or JPG. This is also raised if you are not the owner of the
guild and request an ownership transfer.
"""
http = self._state.http
if vanity_code is not MISSING:
await http.change_vanity_code(self.id, vanity_code, reason=reason)
fields: Dict[str, Any] = {}
if name is not MISSING:
fields['name'] = name
if description is not MISSING:
fields['description'] = description
if preferred_locale is not MISSING:
fields['preferred_locale'] = preferred_locale
if afk_timeout is not MISSING:
fields['afk_timeout'] = afk_timeout
if icon is not MISSING:
if icon is None:
fields['icon'] = icon
else:
fields['icon'] = utils._bytes_to_base64_data(icon)
if banner is not MISSING:
if banner is None:
fields['banner'] = banner
else:
fields['banner'] = utils._bytes_to_base64_data(banner)
if splash is not MISSING:
if splash is None:
fields['splash'] = splash
else:
fields['splash'] = utils._bytes_to_base64_data(splash)
if discovery_splash is not MISSING:
if discovery_splash is None:
fields['discovery_splash'] = discovery_splash
else:
fields['discovery_splash'] = utils._bytes_to_base64_data(discovery_splash)
if default_notifications is not MISSING:
if not isinstance(default_notifications, NotificationLevel):
raise InvalidArgument('default_notifications field must be of type NotificationLevel')
fields['default_message_notifications'] = default_notifications.value
if afk_channel is not MISSING:
if afk_channel is None:
fields['afk_channel_id'] = afk_channel
else:
fields['afk_channel_id'] = afk_channel.id
if system_channel is not MISSING:
if system_channel is None:
fields['system_channel_id'] = system_channel
else:
fields['system_channel_id'] = system_channel.id
if rules_channel is not MISSING:
if rules_channel is None:
fields['rules_channel_id'] = rules_channel
else:
fields['rules_channel_id'] = rules_channel.id
if public_updates_channel is not MISSING:
if public_updates_channel is None:
fields['public_updates_channel_id'] = public_updates_channel
else:
fields['public_updates_channel_id'] = public_updates_channel.id
if owner is not MISSING:
if self.owner_id != self._state.self_id:
raise InvalidArgument('To transfer ownership you must be the owner of the guild.')
fields['owner_id'] = owner.id
if region is not MISSING:
fields['region'] = str(region)
if verification_level is not MISSING:
if not isinstance(verification_level, VerificationLevel):
raise InvalidArgument('verification_level field must be of type VerificationLevel')
fields['verification_level'] = verification_level.value
if explicit_content_filter is not MISSING:
if not isinstance(explicit_content_filter, ContentFilter):
raise InvalidArgument('explicit_content_filter field must be of type ContentFilter')
fields['explicit_content_filter'] = explicit_content_filter.value
if system_channel_flags is not MISSING:
if not isinstance(system_channel_flags, SystemChannelFlags):
raise InvalidArgument('system_channel_flags field must be of type SystemChannelFlags')
fields['system_channel_flags'] = system_channel_flags.value
if community is not MISSING:
features = []
if community:
if 'rules_channel_id' in fields and 'public_updates_channel_id' in fields:
features.append('COMMUNITY')
else:
raise InvalidArgument(
'community field requires both rules_channel and public_updates_channel fields to be provided'
)
fields['features'] = features
await http.edit_guild(self.id, reason=reason, **fields)
async def fetch_channels(self) -> Sequence[GuildChannel]:
"""|coro|
Retrieves all :class:`abc.GuildChannel` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`channels` instead.
.. versionadded:: 1.2
Raises
-------
InvalidData
An unknown channel type was received from Discord.
HTTPException
Retrieving the channels failed.
Returns
-------
Sequence[:class:`abc.GuildChannel`]
All channels in the guild.
"""
data = await self._state.http.get_all_guild_channels(self.id)
def convert(d):
factory, ch_type = _guild_channel_factory(d['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(d))
channel = factory(guild=self, state=self._state, data=d)
return channel
return [convert(d) for d in data]
# TODO: Remove Optional typing here when async iterators are refactored
def fetch_members(self, *, limit: int = 1000, after: Optional[SnowflakeTime] = None) -> MemberIterator:
"""Retrieves an :class:`.AsyncIterator` that enables receiving the guild's members. In order to use this,
:meth:`Intents.members` must be enabled.
.. note::
This method is an API call. For general usage, consider :attr:`members` instead.
.. versionadded:: 1.3
All parameters are optional.
Parameters
----------
limit: Optional[:class:`int`]
The number of members to retrieve. Defaults to 1000.
Pass ``None`` to fetch all members. Note that this is potentially slow.
after: Optional[Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve members after this date or object.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
Raises
------
ClientException
The members intent is not enabled.
HTTPException
Getting the members failed.
Yields
------
:class:`.Member`
The member with the member data parsed.
Examples
--------
Usage ::
async for member in guild.fetch_members(limit=150):
print(member.name)
Flattening into a list ::
members = await guild.fetch_members(limit=150).flatten()
# members is now a list of Member...
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
return MemberIterator(self, limit=limit, after=after)
async def fetch_member(self, member_id: int, /) -> Member:
"""|coro|
Retrieves a :class:`Member` from a guild ID, and a member ID.
.. note::
This method is an API call. If you have :attr:`Intents.members` and member cache enabled, consider :meth:`get_member` instead.
Parameters
-----------
member_id: :class:`int`
The member's ID to fetch from.
Raises
-------
Forbidden
You do not have access to the guild.
HTTPException
Fetching the member failed.
Returns
--------
:class:`Member`
The member from the member ID.
"""
data = await self._state.http.get_member(self.id, member_id)
return Member(data=data, state=self._state, guild=self)
async def fetch_ban(self, user: Snowflake) -> BanEntry:
"""|coro|
Retrieves the :class:`BanEntry` for a user.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to get ban information from.
Raises
------
Forbidden
You do not have proper permissions to get the information.
NotFound
This user is not banned.
HTTPException
An error occurred while fetching the information.
Returns
-------
:class:`BanEntry`
The :class:`BanEntry` object for the specified user.
"""
data: BanPayload = await self._state.http.get_ban(user.id, self.id)
return BanEntry(user=User(state=self._state, data=data['user']), reason=data['reason'])
async def fetch_channel(self, channel_id: int, /) -> GuildChannel:
"""|coro|
Retrieves a :class:`.abc.GuildChannel` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel` instead.
.. versionadded:: 2.0
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord
or the guild the channel belongs to is not the same
as the one in this object points to.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
:class:`.abc.GuildChannel`
The channel from the ID.
"""
data = await self._state.http.get_channel(channel_id)
factory, ch_type = _guild_channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
raise InvalidData('Channel ID resolved to a private channel')
guild_id = int(data['guild_id'])
if self.id != guild_id:
raise InvalidData('Guild ID resolved to a different guild')
channel: GuildChannel = factory(guild=self, state=self._state, data=data) # type: ignore
return channel
async def bans(self) -> List[BanEntry]:
"""|coro|
Retrieves all the users that are banned from the guild as a :class:`list` of :class:`BanEntry`.
You must have the :attr:`~Permissions.ban_members` permission
to get this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
--------
List[:class:`BanEntry`]
A list of :class:`BanEntry` objects.
"""
data: List[BanPayload] = await self._state.http.get_bans(self.id)
return [BanEntry(user=User(state=self._state, data=e['user']), reason=e['reason']) for e in data]
async def prune_members(
self,
*,
days: int,
compute_prune_count: bool = True,
roles: List[Snowflake] = MISSING,
reason: Optional[str] = None,
) -> Optional[int]:
r"""|coro|
Prunes the guild from its inactive members.
The inactive members are denoted if they have not logged on in
``days`` number of days and they have no roles.
You must have the :attr:`~Permissions.kick_members` permission
to use this.
To check how many members you would prune without actually pruning,
see the :meth:`estimate_pruned_members` function.
To prune members that have specific roles see the ``roles`` parameter.
.. versionchanged:: 1.4
The ``roles`` keyword-only parameter was added.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
compute_prune_count: :class:`bool`
Whether to compute the prune count. This defaults to ``True``
which makes it prone to timeouts in very large guilds. In order
to prevent timeouts, you must set this to ``False``. If this is
set to ``False``\, then this function will always return ``None``.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the pruning process. If a member
has a role that is not specified, they'll be excluded.
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while pruning members.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
Optional[:class:`int`]
The number of members pruned. If ``compute_prune_count`` is ``False``
then this returns ``None``.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.prune_members(
self.id, days, compute_prune_count=compute_prune_count, roles=role_ids, reason=reason
)
return data['pruned']
async def templates(self) -> List[Template]:
"""|coro|
Gets the list of templates from this guild.
Requires :attr:`~.Permissions.manage_guild` permissions.
.. versionadded:: 1.7
Raises
-------
Forbidden
You don't have permissions to get the templates.
Returns
--------
List[:class:`Template`]
The templates for this guild.
"""
from .template import Template
data = await self._state.http.guild_templates(self.id)
return [Template(data=d, state=self._state) for d in data]
async def webhooks(self) -> List[Webhook]:
"""|coro|
Gets the list of webhooks from this guild.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this guild.
"""
from .webhook import Webhook
data = await self._state.http.guild_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def estimate_pruned_members(self, *, days: int, roles: List[Snowflake] = MISSING) -> int:
"""|coro|
Similar to :meth:`prune_members` except instead of actually
pruning members, it returns how many members it would prune
from the guild had it been called.
Parameters
-----------
days: :class:`int`
The number of days before counting as inactive.
roles: List[:class:`abc.Snowflake`]
A list of :class:`abc.Snowflake` that represent roles to include in the estimate. If a member
has a role that is not specified, they'll be excluded.
.. versionadded:: 1.7
Raises
-------
Forbidden
You do not have permissions to prune members.
HTTPException
An error occurred while fetching the prune members estimate.
InvalidArgument
An integer was not passed for ``days``.
Returns
---------
:class:`int`
The number of members estimated to be pruned.
"""
if not isinstance(days, int):
raise InvalidArgument(f'Expected int for ``days``, received {days.__class__.__name__} instead.')
if roles:
role_ids = [str(role.id) for role in roles]
else:
role_ids = []
data = await self._state.http.estimate_pruned_members(self.id, days, role_ids)
return data['pruned']
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from the guild.
You must have the :attr:`~Permissions.manage_guild` permission to get
this information.
Raises
-------
Forbidden
You do not have proper permissions to get the information.
HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`Invite`]
The list of invites that are currently active.
"""
data = await self._state.http.invites_from(self.id)
result = []
for invite in data:
channel = self.get_channel(int(invite['channel']['id']))
result.append(Invite(state=self._state, data=invite, guild=self, channel=channel))
return result
async def create_template(self, *, name: str, description: str = MISSING) -> Template:
"""|coro|
Creates a template for the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.7
Parameters
-----------
name: :class:`str`
The name of the template.
description: :class:`str`
The description of the template.
"""
from .template import Template
payload = {'name': name}
if description:
payload['description'] = description
data = await self._state.http.create_template(self.id, payload)
return Template(state=self._state, data=data)
async def create_integration(self, *, type: str, id: int) -> None:
"""|coro|
Attaches an integration to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Parameters
-----------
type: :class:`str`
The integration type (e.g. Twitch).
id: :class:`int`
The integration ID.
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
The account could not be found.
"""
await self._state.http.create_integration(self.id, type, id)
async def integrations(self) -> List[Integration]:
"""|coro|
Returns a list of all integrations attached to the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
do this.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permission to create the integration.
HTTPException
Fetching the integrations failed.
Returns
--------
List[:class:`Integration`]
The list of integrations that are attached to the guild.
"""
data = await self._state.http.get_all_integrations(self.id)
def convert(d):
factory, _ = _integration_factory(d['type'])
if factory is None:
raise InvalidData('Unknown integration type {type!r} for integration ID {id}'.format_map(d))
return factory(guild=self, data=d)
return [convert(d) for d in data]
async def fetch_emojis(self) -> List[Emoji]:
r"""|coro|
Retrieves all custom :class:`Emoji`\s from the guild.
.. note::
This method is an API call. For general usage, consider :attr:`emojis` instead.
Raises
---------
HTTPException
An error occurred fetching the emojis.
Returns
--------
List[:class:`Emoji`]
The retrieved emojis.
"""
data = await self._state.http.get_all_custom_emojis(self.id)
return [Emoji(guild=self, state=self._state, data=d) for d in data]
async def fetch_emoji(self, emoji_id: int, /) -> Emoji:
"""|coro|
Retrieves a custom :class:`Emoji` from the guild.
.. note::
This method is an API call.
For general usage, consider iterating over :attr:`emojis` instead.
Parameters
-------------
emoji_id: :class:`int`
The emoji's ID.
Raises
---------
NotFound
The emoji requested could not be found.
HTTPException
An error occurred fetching the emoji.
Returns
--------
:class:`Emoji`
The retrieved emoji.
"""
data = await self._state.http.get_custom_emoji(self.id, emoji_id)
return Emoji(guild=self, state=self._state, data=data)
async def create_custom_emoji(
self,
*,
name: str,
image: bytes,
roles: List[Role] = MISSING,
reason: Optional[str] = None,
) -> Emoji:
r"""|coro|
Creates a custom :class:`Emoji` for the guild.
There is currently a limit of 50 static and animated emojis respectively per guild,
unless the guild has the ``MORE_EMOJI`` feature which extends the limit to 200.
You must have the :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The emoji name. Must be at least 2 characters.
image: :class:`bytes`
The :term:`py:bytes-like object` representing the image data to use.
Only JPG, PNG and GIF images are supported.
roles: List[:class:`Role`]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for creating this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to create emojis.
HTTPException
An error occurred creating an emoji.
Returns
--------
:class:`Emoji`
The created emoji.
"""
img = utils._bytes_to_base64_data(image)
if roles:
role_ids = [role.id for role in roles]
else:
role_ids = []
data = await self._state.http.create_custom_emoji(self.id, name, img, roles=role_ids, reason=reason)
return self._state.store_emoji(self, data)
async def delete_emoji(self, emoji: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the custom :class:`Emoji` from the guild.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
emoji: :class:`abc.Snowflake`
The emoji you are deleting.
reason: Optional[:class:`str`]
The reason for deleting this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
await self._state.http.delete_custom_emoji(self.id, emoji.id, reason=reason)
async def fetch_roles(self) -> List[Role]:
"""|coro|
Retrieves all :class:`Role` that the guild has.
.. note::
This method is an API call. For general usage, consider :attr:`roles` instead.
.. versionadded:: 1.3
Raises
-------
HTTPException
Retrieving the roles failed.
Returns
-------
List[:class:`Role`]
All roles in the guild.
"""
data = await self._state.http.get_roles(self.id)
return [Role(guild=self, state=self._state, data=d) for d in data]
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
colour: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
@overload
async def create_role(
self,
*,
reason: Optional[str] = ...,
name: str = ...,
permissions: Permissions = ...,
color: Union[Colour, int] = ...,
hoist: bool = ...,
mentionable: bool = ...,
) -> Role:
...
async def create_role(
self,
*,
name: str = MISSING,
permissions: Permissions = MISSING,
color: Union[Colour, int] = MISSING,
colour: Union[Colour, int] = MISSING,
hoist: bool = MISSING,
mentionable: bool = MISSING,
reason: Optional[str] = None,
) -> Role:
"""|coro|
Creates a :class:`Role` for the guild.
All fields are optional.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionchanged:: 1.6
Can now pass ``int`` to ``colour`` keyword-only parameter.
Parameters
-----------
name: :class:`str`
The role name. Defaults to 'new role'.
permissions: :class:`Permissions`
The permissions to have. Defaults to no permissions.
colour: Union[:class:`Colour`, :class:`int`]
The colour for the role. Defaults to :meth:`Colour.default`.
This is aliased to ``color`` as well.
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
Defaults to ``False``.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
Defaults to ``False``.
reason: Optional[:class:`str`]
The reason for creating this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to create the role.
HTTPException
Creating the role failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
:class:`Role`
The newly created role.
"""
fields: Dict[str, Any] = {}
if permissions is not MISSING:
fields['permissions'] = str(permissions.value)
else:
fields['permissions'] = '0'
actual_colour = colour or color or Colour.default()
if isinstance(actual_colour, int):
fields['color'] = actual_colour
else:
fields['color'] = actual_colour.value
if hoist is not MISSING:
fields['hoist'] = hoist
if mentionable is not MISSING:
fields['mentionable'] = mentionable
if name is not MISSING:
fields['name'] = name
data = await self._state.http.create_role(self.id, reason=reason, **fields)
role = Role(guild=self, data=data, state=self._state)
# TODO: add to cache
return role
async def edit_role_positions(self, positions: Dict[Snowflake, int], *, reason: Optional[str] = None) -> List[Role]:
"""|coro|
Bulk edits a list of :class:`Role` in the guild.
You must have the :attr:`~Permissions.manage_roles` permission to
do this.
.. versionadded:: 1.4
Example:
.. code-block:: python3
positions = {
bots_role: 1, # penultimate role
tester_role: 2,
admin_role: 6
}
await guild.edit_role_positions(positions=positions)
Parameters
-----------
positions
A :class:`dict` of :class:`Role` to :class:`int` to change the positions
of each given role.
reason: Optional[:class:`str`]
The reason for editing the role positions. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to move the roles.
HTTPException
Moving the roles failed.
InvalidArgument
An invalid keyword argument was given.
Returns
--------
List[:class:`Role`]
A list of all the roles in the guild.
"""
if not isinstance(positions, dict):
raise InvalidArgument('positions parameter expects a dict.')
role_positions: List[Dict[str, Any]] = []
for role, position in positions.items():
payload = {'id': role.id, 'position': position}
role_positions.append(payload)
data = await self._state.http.move_role_position(self.id, role_positions, reason=reason)
roles: List[Role] = []
for d in data:
role = Role(guild=self, data=d, state=self._state)
roles.append(role)
self._roles[role.id] = role
return roles
async def kick(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Kicks a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.kick_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to kick from their guild.
reason: Optional[:class:`str`]
The reason the user got kicked.
Raises
-------
Forbidden
You do not have the proper permissions to kick.
HTTPException
Kicking failed.
"""
await self._state.http.kick(user.id, self.id, reason=reason)
async def ban(
self,
user: Snowflake,
*,
reason: Optional[str] = None,
delete_message_days: Literal[0, 1, 2, 3, 4, 5, 6, 7] = 1,
) -> None:
"""|coro|
Bans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to ban from their guild.
delete_message_days: :class:`int`
The number of days worth of messages to delete from the user
in the guild. The minimum is 0 and the maximum is 7.
reason: Optional[:class:`str`]
The reason the user got banned.
Raises
-------
Forbidden
You do not have the proper permissions to ban.
HTTPException
Banning failed.
"""
await self._state.http.ban(user.id, self.id, delete_message_days, reason=reason)
async def unban(self, user: Snowflake, *, reason: Optional[str] = None) -> None:
"""|coro|
Unbans a user from the guild.
The user must meet the :class:`abc.Snowflake` abc.
You must have the :attr:`~Permissions.ban_members` permission to
do this.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to unban.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to unban.
HTTPException
Unbanning failed.
"""
await self._state.http.unban(user.id, self.id, reason=reason)
async def vanity_invite(self) -> Optional[Invite]:
"""|coro|
Returns the guild's special vanity invite.
The guild must have ``VANITY_URL`` in :attr:`~Guild.features`.
You must have the :attr:`~Permissions.manage_guild` permission to use
this as well.
Raises
-------
Forbidden
You do not have the proper permissions to get this.
HTTPException
Retrieving the vanity invite failed.
Returns
--------
Optional[:class:`Invite`]
The special vanity invite. If ``None`` then the guild does not
have a vanity invite set.
"""
# we start with { code: abc }
payload = await self._state.http.get_vanity_code(self.id)
if not payload['code']:
return None
# get the vanity URL channel since default channels aren't
# reliable or a thing anymore
data = await self._state.http.get_invite(payload['code'])
channel = self.get_channel(int(data['channel']['id']))
payload['revoked'] = False
payload['temporary'] = False
payload['max_uses'] = 0
payload['max_age'] = 0
payload['uses'] = payload.get('uses', 0)
return Invite(state=self._state, data=payload, guild=self, channel=channel)
# TODO: use MISSING when async iterators get refactored
def audit_logs(
self,
*,
limit: int = 100,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = None,
user: Snowflake = None,
action: AuditLogAction = None,
) -> AuditLogIterator:
"""Returns an :class:`AsyncIterator` that enables receiving the guild's audit logs.
You must have the :attr:`~Permissions.view_audit_log` permission to use this.
Examples
----------
Getting the first 100 entries: ::
async for entry in guild.audit_logs(limit=100):
print(f'{entry.user} did {entry.action} to {entry.target}')
Getting entries for a specific action: ::
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban):
print(f'{entry.user} banned {entry.target}')
Getting entries made by a specific user: ::
entries = await guild.audit_logs(limit=None, user=guild.me).flatten()
await channel.send(f'I made {len(entries)} moderation actions.')
Parameters
-----------
limit: Optional[:class:`int`]
The number of entries to retrieve. If ``None`` retrieve all entries.
before: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries before this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]
Retrieve entries after this date or entry.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
oldest_first: :class:`bool`
If set to ``True``, return entries in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
user: :class:`abc.Snowflake`
The moderator to filter entries from.
action: :class:`AuditLogAction`
The action to filter with.
Raises
-------
Forbidden
You are not allowed to fetch audit logs
HTTPException
An error occurred while fetching the audit logs.
Yields
--------
:class:`AuditLogEntry`
The audit log entry.
"""
if user is not None:
user_id = user.id
else:
user_id = None
if action:
action = action.value
return AuditLogIterator(
self, before=before, after=after, limit=limit, oldest_first=oldest_first, user_id=user_id, action_type=action
)
async def widget(self) -> Widget:
"""|coro|
Returns the widget of the guild.
.. note::
The guild must have the widget enabled to get this information.
Raises
-------
Forbidden
The widget for this guild is disabled.
HTTPException
Retrieving the widget failed.
Returns
--------
:class:`Widget`
The guild's widget.
"""
data = await self._state.http.get_widget(self.id)
return Widget(state=self._state, data=data)
async def edit_widget(self, *, enabled: bool = MISSING, channel: Optional[Snowflake] = MISSING) -> None:
"""|coro|
Edits the widget of the guild.
You must have the :attr:`~Permissions.manage_guild` permission to
use this
.. versionadded:: 2.0
Parameters
-----------
enabled: :class:`bool`
Whether to enable the widget for the guild.
channel: Optional[:class:`~discord.abc.Snowflake`]
The new widget channel. ``None`` removes the widget channel.
Raises
-------
Forbidden
You do not have permission to edit the widget.
HTTPException
Editing the widget failed.
"""
payload = {}
if channel is not MISSING:
payload['channel_id'] = None if channel is None else channel.id
if enabled is not MISSING:
payload['enabled'] = enabled
await self._state.http.edit_widget(self.id, payload=payload)
async def chunk(self, *, cache: bool = True) -> None:
"""|coro|
Requests all members that belong to this guild. In order to use this,
:meth:`Intents.members` must be enabled.
This is a websocket operation and can be slow.
.. versionadded:: 1.5
Parameters
-----------
cache: :class:`bool`
Whether to cache the members as well.
Raises
-------
ClientException
The members intent is not enabled.
"""
if not self._state._intents.members:
raise ClientException('Intents.members must be enabled to use this.')
if not self._state.is_guild_evicted(self):
return await self._state.chunk_guild(self, cache=cache)
async def query_members(
self,
query: Optional[str] = None,
*,
limit: int = 5,
user_ids: Optional[List[int]] = None,
presences: bool = False,
cache: bool = True,
) -> List[Member]:
"""|coro|
Request members that belong to this guild whose username starts with
the query given.
This is a websocket operation and can be slow.
.. versionadded:: 1.3
Parameters
-----------
query: Optional[:class:`str`]
The string that the username's start with.
limit: :class:`int`
The maximum number of members to send back. This must be
a number between 5 and 100.
presences: :class:`bool`
Whether to request for presences to be provided. This defaults
to ``False``.
.. versionadded:: 1.6
cache: :class:`bool`
Whether to cache the members internally. This makes operations
such as :meth:`get_member` work for those that matched.
user_ids: Optional[List[:class:`int`]]
List of user IDs to search for. If the user ID is not in the guild then it won't be returned.
.. versionadded:: 1.4
Raises
-------
asyncio.TimeoutError
The query timed out waiting for the members.
ValueError
Invalid parameters were passed to the function
ClientException
The presences intent is not enabled.
Returns
--------
List[:class:`Member`]
The list of members that have matched the query.
"""
if presences and not self._state._intents.presences:
raise ClientException('Intents.presences must be enabled to use this.')
if query is None:
if query == '':
raise ValueError('Cannot pass empty query string.')
if user_ids is None:
raise ValueError('Must pass either query or user_ids')
if user_ids is not None and query is not None:
raise ValueError('Cannot pass both query and user_ids')
if user_ids is not None and not user_ids:
raise ValueError('user_ids must contain at least 1 value')
limit = min(100, limit or 5)
return await self._state.query_members(
self, query=query, limit=limit, user_ids=user_ids, presences=presences, cache=cache
)
async def change_voice_state(
self, *, channel: Optional[VocalGuildChannel], self_mute: bool = False, self_deaf: bool = False
):
"""|coro|
Changes client's voice state in the guild.
.. versionadded:: 1.4
Parameters
-----------
channel: Optional[:class:`VoiceChannel`]
Channel the client wants to join. Use ``None`` to disconnect.
self_mute: :class:`bool`
Indicates if the client should be self-muted.
self_deaf: :class:`bool`
Indicates if the client should be self-deafened.
"""
ws = self._state._get_websocket(self.id)
channel_id = channel.id if channel else None
await ws.voice_state(self.id, channel_id, self_mute, self_deaf)
| mit |
Mattze96/youtube-dl | youtube_dl/extractor/cracked.py | 170 | 3213 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
str_to_int,
)
class CrackedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html'
_TESTS = [{
'url': 'http://www.cracked.com/video_19070_if-animal-actors-got-e21-true-hollywood-stories.html',
'md5': '89b90b9824e3806ca95072c4d78f13f7',
'info_dict': {
'id': '19070',
'ext': 'mp4',
'title': 'If Animal Actors Got E! True Hollywood Stories',
'timestamp': 1404954000,
'upload_date': '20140710',
}
}, {
# youtube embed
'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html',
'md5': 'ccd52866b50bde63a6ef3b35016ba8c7',
'info_dict': {
'id': 'EjI00A3rZD0',
'ext': 'mp4',
'title': "4 Plot Holes You Didn't Notice in Your Favorite Movies - The Spit Take",
'description': 'md5:c603708c718b796fe6079e2b3351ffc7',
'upload_date': '20140725',
'uploader_id': 'Cracked',
'uploader': 'Cracked',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_url = self._search_regex(
r'<iframe[^>]+src="((?:https?:)?//www\.youtube\.com/embed/[^"]+)"',
webpage, 'youtube url', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_url = self._html_search_regex(
[r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'],
webpage, 'video URL')
title = self._search_regex(
[r'property="?og:title"?\s+content="([^"]+)"', r'class="?title"?>([^<]+)'],
webpage, 'title')
description = self._search_regex(
r'name="?(?:og:)?description"?\s+content="([^"]+)"',
webpage, 'description', default=None)
timestamp = self._html_search_regex(
r'"date"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)
if timestamp:
timestamp = parse_iso8601(timestamp[:-6])
view_count = str_to_int(self._html_search_regex(
r'<span\s+class="?views"? id="?viewCounts"?>([\d,\.]+) Views</span>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'<span\s+id="?commentCounts"?>([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url)
if m:
width = int(m.group('width'))
height = int(m.group('height'))
else:
width = height = None
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'height': height,
'width': width,
}
| unlicense |
pschmitt/home-assistant | homeassistant/components/geizhals/sensor.py | 4 | 2874 | """Parse prices of a device from geizhals."""
from datetime import timedelta
import logging
from geizhals import Device, Geizhals
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_DESCRIPTION = "description"
CONF_PRODUCT_ID = "product_id"
CONF_LOCALE = "locale"
ICON = "mdi:currency-usd-circle"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_PRODUCT_ID): cv.positive_int,
vol.Optional(CONF_DESCRIPTION, default="Price"): cv.string,
vol.Optional(CONF_LOCALE, default="DE"): vol.In(["AT", "EU", "DE", "UK", "PL"]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Geizwatch sensor."""
name = config.get(CONF_NAME)
description = config.get(CONF_DESCRIPTION)
product_id = config.get(CONF_PRODUCT_ID)
domain = config.get(CONF_LOCALE)
add_entities([Geizwatch(name, description, product_id, domain)], True)
class Geizwatch(Entity):
"""Implementation of Geizwatch."""
def __init__(self, name, description, product_id, domain):
"""Initialize the sensor."""
# internal
self._name = name
self._geizhals = Geizhals(product_id, domain)
self._device = Device()
# external
self.description = description
self.product_id = product_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the best price of the selected product."""
if not self._device.prices:
return None
return self._device.prices[0]
@property
def device_state_attributes(self):
"""Return the state attributes."""
while len(self._device.prices) < 4:
self._device.prices.append("None")
attrs = {
"device_name": self._device.name,
"description": self.description,
"unit_of_measurement": self._device.price_currency,
"product_id": self.product_id,
"price1": self._device.prices[0],
"price2": self._device.prices[1],
"price3": self._device.prices[2],
"price4": self._device.prices[3],
}
return attrs
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest price from geizhals and updates the state."""
self._device = self._geizhals.parse()
| apache-2.0 |
shobhitmittal/textract | textract/exceptions.py | 1 | 2697 | import os
# traceback from exceptions that inherit from this class are suppressed
class CommandLineError(Exception):
"""The traceback of all CommandLineError's is supressed when the
errors occur on the command line to provide a useful command line
interface.
"""
def render(self, msg):
return msg % vars(self)
class ExtensionNotSupported(CommandLineError):
"""This error is raised with unsupported extensions"""
def __init__(self, ext):
self.ext = ext
def __str__(self):
return self.render((
'The filename extension %(ext)s is not yet supported by\n'
'textract. Please suggest this filename extension here:\n\n'
' https://github.com/deanmalmgren/textract/issues\n'
))
class MissingFileError(CommandLineError):
"""This error is raised when the file can not be located at the
specified path.
"""
def __init__(self, filename):
self.filename = filename
self.root, self.ext = os.path.splitext(filename)
def __str__(self):
return self.render((
'The file "%(filename)s" can not be found.\n'
'Is this the right path/to/file/you/want/to/extract%(ext)s?'
))
class UnknownMethod(CommandLineError):
"""This error is raised when the specified --method on the command
line is unknown.
"""
def __init__(self, method):
self.method = method
def __str__(self):
return self.render((
'The method "%(method)s" can not be found for this filetype.'
))
class ShellError(CommandLineError):
"""This error is raised when a shell.run returns a non-zero exit code
(meaning the command failed).
"""
def __init__(self, command, exit_code):
self.command = command
self.exit_code = exit_code
self.executable = self.command.split()[0]
def is_uninstalled(self):
return os.name == 'posix' and self.exit_code == 127
def uninstalled_message(self):
return (
"The command `%(command)s` failed because the executable\n"
"`%(executable)s` is not installed on your system. Please make\n"
"sure the appropriate dependencies are installed before using\n"
"textract:\n\n"
" http://textract.readthedocs.org/en/latest/installation.html\n"
) % vars(self)
def failed_message(self):
return (
"The command `%(command)s` failed with exit code %(exit_code)d"
) % vars(self)
def __str__(self):
if self.is_uninstalled():
return self.uninstalled_message()
else:
return self.failed_message()
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_conversions.py | 12 | 2192 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, dec
from scipy.lib._version import NumpyVersion
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
def test_csgraph_from_dense():
np.random.seed(1234)
G = np.random.random((10, 10))
some_nulls = (G < 0.4)
all_nulls = (G < 0.8)
for null_value in [0, np.nan, np.inf]:
G[all_nulls] = null_value
olderr = np.seterr(invalid="ignore")
try:
G_csr = csgraph_from_dense(G, null_value=0)
finally:
np.seterr(**olderr)
G[all_nulls] = 0
assert_array_almost_equal(G, G_csr.toarray())
for null_value in [np.nan, np.inf]:
G[all_nulls] = 0
G[some_nulls] = null_value
olderr = np.seterr(invalid="ignore")
try:
G_csr = csgraph_from_dense(G, null_value=0)
finally:
np.seterr(**olderr)
G[all_nulls] = 0
assert_array_almost_equal(G, G_csr.toarray())
@dec.skipif(NumpyVersion(np.__version__) < '1.6.0',
"Can't test arrays with infs.")
def test_csgraph_to_dense():
np.random.seed(1234)
G = np.random.random((10, 10))
nulls = (G < 0.8)
G[nulls] = np.inf
G_csr = csgraph_from_dense(G)
for null_value in [0, 10, -np.inf, np.inf]:
G[nulls] = null_value
assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
def test_multiple_edges():
# create a random sqare matrix with an even number of elements
np.random.seed(1234)
X = np.random.random((10, 10))
Xcsr = csr_matrix(X)
# now double-up every other column
Xcsr.indices[::2] = Xcsr.indices[1::2]
# normal sparse toarray() will sum the duplicated edges
Xdense = Xcsr.toarray()
assert_array_almost_equal(Xdense[:, 1::2],
X[:, ::2] + X[:, 1::2])
# csgraph_to_dense chooses the minimum of each duplicated edge
Xdense = csgraph_to_dense(Xcsr)
assert_array_almost_equal(Xdense[:, 1::2],
np.minimum(X[:, ::2], X[:, 1::2]))
| apache-2.0 |
taigaio/taiga-back | taiga/users/utils.py | 1 | 1620 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def attach_roles(queryset, as_field="roles_attr"):
"""Attach roles to each object of the queryset.
:param queryset: A Django user stories queryset object.
:param as_field: Attach the roles as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
sql = """SELECT ARRAY(
SELECT DISTINCT(users_role.name)
FROM projects_membership
INNER JOIN users_role ON users_role.id = projects_membership.role_id
WHERE projects_membership.user_id = {tbl}.id
ORDER BY users_role.name)
"""
sql = sql.format(tbl=model._meta.db_table)
queryset = queryset.extra(select={as_field: sql})
return queryset
def attach_extra_info(queryset, user=None):
queryset = attach_roles(queryset)
return queryset
| agpl-3.0 |
seninds/yeti | tests/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| bsd-2-clause |
Shanec132006/project | server/lib/werkzeug/script.py | 318 | 11249 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action
| apache-2.0 |
crazyhottommy/manta | scratch/util/largeIntrachromFilter.py | 2 | 4381 | #!/usr/bin/env python
#
# Manta - Structural Variant and Indel Caller
# Copyright (c) 2013-2015 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
remove intrachomosomal events above a specified size
"""
import os, sys
import re
def isInfoFlag(infoString,key) :
word=infoString.split(";")
for w in word :
if w == key : return True
return False
def getKeyVal(infoString,key) :
match=re.search("%s=([^;\t]*);?" % (key) ,infoString)
if match is None : return None
return match.group(1);
class VCFID :
CHROM = 0
POS = 1
REF = 3
ALT = 4
QUAL = 5
FILTER = 6
INFO = 7
class VcfRecord :
def __init__(self, line) :
self.line = line
w=line.strip().split('\t')
self.chrom=w[VCFID.CHROM]
self.pos=int(w[VCFID.POS])
self.qual=w[VCFID.QUAL]
self.isPass=(w[VCFID.FILTER] == "PASS")
self.Filter=w[VCFID.FILTER]
self.endPos=self.pos+len(w[VCFID.REF])-1
val = getKeyVal(w[VCFID.INFO],"END")
if val is not None :
self.endPos = int(val)
val = getKeyVal(w[VCFID.INFO],"SOMATICSCORE")
if val is not None :
self.ss = int(val)
else :
self.ss = None
self.svtype = getKeyVal(w[VCFID.INFO],"SVTYPE")
self.isInv3 = isInfoFlag(w[VCFID.INFO],"INV3")
self.isInv5 = isInfoFlag(w[VCFID.INFO],"INV5")
class Constants :
import re
contigpat = re.compile("^##contig=<ID=([^,>]*)[,>]")
def processStream(vcfFp, chromOrder, header, recList) :
"""
read in a vcf stream
"""
import re
for line in vcfFp :
if line[0] == "#" :
header.append(line)
match = re.match(Constants.contigpat,line)
if match is not None :
chromOrder.append(match.group(1))
else :
recList.append(VcfRecord(line))
def getOptions() :
from optparse import OptionParser
usage = "usage: %prog [options] < vcf > filtered_vcf"
parser = OptionParser(usage=usage)
parser.add_option("--maxSize", type="int",dest="maxSize",
help="maximum intrachrom event size [required] (no default)")
(opt,args) = parser.parse_args()
if len(args) != 0 :
parser.print_help()
sys.exit(2)
if opt.maxSize is None :
parser.print_help()
sys.exit(2)
return (opt,args)
def resolveRec(recEqualSet, recList) :
"""
determine which of a set of vcf records presumed to refer to the same inversion are kept
right now best is a record with PASS in the filter field, and secondarily the high quality
"""
if not recEqualSet: return
bestIndex=0
bestSS=0.
bestPos=0
bestIsPass=False
for (index,rec) in enumerate(recEqualSet) :
assert rec.pos > 0
isNewPass=((not bestIsPass) and rec.isPass)
isHighQual=((bestIsPass == rec.isPass) and (rec.pos < bestPos)) #(rec.ss > bestSS))
if (isNewPass or isHighQual) :
bestIndex = index
bestPos = rec.pos
bestIsPass = rec.isPass
# potentially could reward two non-pass inversion calls here:
# if not bestIsPass and (len(recEqualSet) == 2) :
# if (recEqualSet[0].isInv3 and reEqualSet[1].isInv5) or
# recEqualSet[1].isInv3 and reEqualSet[0].isInv5)) :
recList.append(recEqualSet[bestIndex])
def main() :
outfp = sys.stdout
(opt,args) = getOptions()
header=[]
recList=[]
chromOrder=[]
processStream(sys.stdin, chromOrder, header, recList)
for line in header :
outfp.write(line)
for vcfrec in recList :
if (vcfrec.endPos-vcfrec.pos) > opt.maxSize : continue
outfp.write(vcfrec.line)
main()
| gpl-3.0 |
ar45/django | django/contrib/gis/geos/mutable_list.py | 238 | 10705 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://static.aryehleib.com/oldsite/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from functools import total_ordering
from django.utils import six
from django.utils.six.moves import range
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
"""
_minlength = 0
_maxlength = None
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
riking/youtube-dl | test/test_YoutubeDL.py | 2 | 10850 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.extractor import YoutubeIE
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': 'x'},
{'ext': 'mp4', 'height': 460, 'url': 'y'},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': 'a'},
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': '_'},
{'ext': 'mp4', 'height': 720, 'url': '_'},
{'ext': 'flv', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': '_'},
{'ext': 'webm', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_limit(self):
formats = [
{'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1},
{'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2},
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
]
info_dict = _make_result(formats)
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
ydl = YDL({'format_limit': 'good'})
assert ydl.params['format_limit'] == 'good'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'good')
ydl = YDL({'format_limit': 'great', 'format': 'all'})
ydl.process_ie_result(info_dict.copy())
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh')
self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good')
self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great')
self.assertTrue('3' in ydl.msgs[0])
ydl = YDL()
ydl.params['format_limit'] = 'excellent'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'138', '137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '139', '171',
]
for f1id, f2id in zip(order, order[1:]):
f1 = YoutubeIE._formats[f1id].copy()
f1['format_id'] = f1id
f1['url'] = 'url:' + f1id
f2 = YoutubeIE._formats[f2id].copy()
f2['format_id'] = f2id
f2['url'] = 'url:' + f2id
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
# Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
if __name__ == '__main__':
unittest.main()
| unlicense |
Frankkkkk/arctic | tests/integration/scripts/test_initialize_library.py | 5 | 2561 | from mock import patch
import pytest
from arctic.auth import Credential
from arctic.arctic import Arctic
from arctic.scripts import arctic_init_library as mil
from ...util import run_as_main
def test_init_library(mongo_host):
# Create the user agains the current mongo database
with patch('arctic.scripts.arctic_init_library.do_db_auth', return_value=True), \
patch('pymongo.database.Database.authenticate', return_value=True):
run_as_main(mil.main, '--host', mongo_host, '--library', 'arctic_user.library')
# Should be able to write something to the library now
store = Arctic(mongo_host)
assert store['user.library']._arctic_lib.get_library_metadata('QUOTA') == 10240 * 1024 * 1024
store['user.library'].write('key', {'a': 'b'})
assert store['user.library'].read('key').data == {'a': 'b'}
def test_init_library_no_arctic_prefix(mongo_host):
# Create the user agains the current mongo database
with patch('arctic.scripts.arctic_init_library.do_db_auth', return_value=True), \
patch('pymongo.database.Database.authenticate', return_value=True):
run_as_main(mil.main, '--host', mongo_host, '--library', 'user.library')
# Should be able to write something to the library now
store = Arctic(mongo_host)
assert store['user.library']._arctic_lib.get_library_metadata('QUOTA') == 10240 * 1024 * 1024
store['user.library'].write('key', {'a': 'b'})
assert store['user.library'].read('key').data == {'a': 'b'}
def test_init_library_quota(mongo_host):
# Create the user agains the current mongo database
with patch('arctic.scripts.arctic_init_library.do_db_auth', return_value=True), \
patch('pymongo.database.Database.authenticate', return_value=True):
run_as_main(mil.main, '--host', mongo_host, '--library', 'arctic_user.library', '--quota', '100')
# Should be able to write something to the library now
store = Arctic(mongo_host)
assert store['user.library']._arctic_lib.get_library_metadata('QUOTA') == 100 * 1024 * 1024 * 1024
def test_init_library_bad_library(mongo_host):
with pytest.raises(Exception):
with patch('arctic.arctic.get_auth', return_value=Credential('admin', 'adminuser', 'adminpwd', 'admin')), \
patch('pymongo.database.Database.authenticate', return_value=True), \
patch('argparse.ArgumentParser.error', side_effect=Exception):
# Create the user agains the current mongo database
run_as_main(mil.main, '--host', mongo_host, '--library', 'user')
| lgpl-2.1 |
petercable/mi-instrument | mi/instrument/mclane/ras/d1000/driver.py | 2 | 42602 | """
@package mi.instrument.mclane.ras.ooicore.driver
@file marine-integrations/mi/instrument/mclane/ras/ooicore/driver.py
@author Dan Mergens
@brief Driver for the D1000 (D1421)
Release notes:
initial version
"""
from datetime import datetime
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import re
import math
import time
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum, Units
from mi.core.exceptions import SampleException, \
InstrumentParameterException, InstrumentProtocolException, InstrumentTimeoutException
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.driver_scheduler import \
DriverSchedulerConfigKey, \
TriggerType
from mi.core.util import dict_equal
from mi.core.instrument.instrument_driver import \
SingleConnectionInstrumentDriver, \
DriverEvent, \
DriverAsyncEvent, \
DriverConfigKey, \
DriverProtocolState, \
DriverParameter, \
ResourceAgentState
from mi.core.instrument.data_particle import \
DataParticle, \
CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
NEWLINE = '\r'
# default timeout.
INTER_CHARACTER_DELAY = .2
####
# Driver Constant Definitions
####
DEFAULT_SAMPLE_RATE = 15 # sample periodicity in seconds
MIN_SAMPLE_RATE = 1 # in seconds
MAX_SAMPLE_RATE = 3600 # in seconds (1 hour)
SAMPLE_TIMEOUT = 10
def checksum(data):
"""
Calculate checksum on value string.
@retval checksum - base 10 integer representing last two hexadecimal digits of the checksum
"""
total = sum([ord(x) for x in data])
return total & 0xff
def valid_response(line):
"""
Perform a checksum calculation on provided data. The checksum used for comparison is the last two characters of
the line.
@param line - response line from the instrument, must start with '*'
@retval checksum validity - whether or not the checksum provided in the line matches calculated checksum
"""
cksum = int(line[-2:], 16) # checksum is last two characters in ASCII hex
data = line[:-2] # remove checksum from data
calc_cksum = checksum(data)
if cksum != calc_cksum:
log.debug('checksum failed (%r): should be %s', line, hex(calc_cksum))
return False
return True
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
DISCOVER = DriverEvent.DISCOVER
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
DISCOVER = DriverEvent.DISCOVER
# baud rate (9- 57600, 8- 115200, 7- 300, 6- 600, 5- 1200, 4- 2400, 3- 4800, 2-9600, 1- 19200, 0-38400)
class BaudRate(BaseEnum):
BAUD_38400 = 0
BAUD_19200 = 1
BAUD_9600 = 2
BAUD_4800 = 3
BAUD_2400 = 4
BAUD_1200 = 5
BAUD_600 = 6
BAUD_300 = 7
BAUD_115200 = 8
BAUD_57600 = 9
class AlarmType(BaseEnum):
MOMENTARY = 0
LATCHING = 1
class RTDSelect(BaseEnum):
RTD_3_WIRE = 0
RTD_4_WIRE = 1
class UnitPrecision(BaseEnum):
"""
Data output precision (0- 4 digits, 1- 5, 2- 6, 3- 7)
"""
DIGITS_4 = 0
DIGITS_5 = 1
DIGITS_6 = 2
DIGITS_7 = 3
# (0- no filter, 1- .25, 2- .5, 3- 1, 4- 2, 5- 4, 6- 8, 7- 16)
def filter_enum(value):
if value == 0:
return 0
return math.log(value) / math.log(2) + 3
def filter_value(enum_value):
if enum_value == 0:
return 0
return 2 ** (enum_value - 3)
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
# rate at which to stream temperature reports
SAMPLE_INTERVAL = 'output_period'
# Factory default: 0x31070182
# Lab default: 0x310214C2
# Byte 1 - identifies the module channel address
# default: 0x31 ('1')
CHANNEL_ADDRESS = 'channel_address'
# Byte 2
# Bit 7 - when true, the D1000 will generate line feeds, should be false for driver
# default: 0
LINEFEED = 'linefeed'
# Bit 6 - parity type (0 - even, 1 - odd)
# default: 0
PARITY_TYPE = 'parity_type'
# Bit 5 - parity enabled flag
# default: 0
PARITY_ENABLE = 'parity_enable'
# Bit 4 - addressing mode (0- normal, 1- extended)
# default: 0
EXTENDED_ADDRESSING = 'addressing_mode'
# Bits 3-0 - baud rate (9- 57600, 8- 115200, 7- 300, 6- 600, 5- 1200, 4- 2400, 3- 4800, 2-9600, 1- 19200, 0-38400)
# default: 2 (9600)
BAUD_RATE = 'baud_rate'
# Byte 3
# Bit 7 - enable alarm
# default: 0
ALARM_ENABLE = 'alarm_enable'
# Bit 6 - low alarm latch (0- momentary, 1- latching) see also $LO
# default: 0
LOW_ALARM_LATCH = 'low_alarm_latching'
# Bit 5 - high alarm latch (0- momentary, 1- latching) see also $HI
# default: 0
HIGH_ALARM_LATCH = 'high_alarm_latching'
# Bit 4 - 3/4 wire select - must match RTD configuration (0- 3 wire, 1- 4 wire)
# default: 1
RTD_4_WIRE = 'rtd_34_wire'
# Bit 3 - temperature output scaling (0- celsius, 1- fahrenheit) - must change HI/LO to match after
# default: 0
TEMP_UNITS = 'fahrenheit_select'
# Bit 2 - echo for daisy-chained configuration
# default: 1
ECHO = 'echo_commands'
# Bits 1-0 - units of delay in the response (based on baud rate)
# default: 0
COMMUNICATION_DELAY = 'delay'
# Byte 4
# Bits 7-6 - data output precision (0- 4 digits, 1- 5, 2- 6, 3- 7)
# default: 3 (7)
PRECISION = 'output_precision'
# Bits 5-3 - (0- no filter, 1- .25, 2- .5, 3- 1, 4- 2, 5- 4, 6- 8, 7- 16)
# default: 0 (no filter)
LARGE_SIGNAL_FILTER_C = 'large_filter_signal_constant'
# Bits 2-0 - (0- no filter, 1- .25, 2- .5, 3- 1, 4- 2, 5- 4, 6- 8, 7- 16)
# Should be larger than the large signal filter constant value
# default: 2 (0.5)
SMALL_SIGNAL_FILTER_C = 'small_filter_signal_constant'
class Command(BaseEnum):
"""
Instrument command strings - case sensitive
"""
READ = 'RD'
ENABLE_WRITE = 'WE'
SETUP = 'SU'
READ_SETUP = 'RS'
CLEAR_ZERO = 'CZ'
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = '\r\n'
CR = '\r'
class ScheduledJob(BaseEnum):
SAMPLE = 'scheduled_sample'
class Response(BaseEnum):
"""
Expected device response strings
"""
# *1RD+00019.16AB
READ = re.compile(r'\*[123]RD')
ENABLE_WRITE = re.compile(r'\*[123]WE')
SETUP = re.compile(r'\*[123]SU')
READ_SETUP = re.compile(r'\*[123]RS')
CLEAR_ZERO = re.compile(r'\*[123]CZ')
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
D1000_PARSED = 'd1000_sample'
###############################################################################
# Data Particles
###############################################################################
class D1000TemperatureDataParticleKey(BaseEnum):
TEMP1 = 'temperature1'
TEMP2 = 'temperature2'
TEMP3 = 'temperature3'
class D1000TemperatureDataParticle(DataParticle):
_data_particle_type = DataParticleType.D1000_PARSED
@staticmethod
def regex():
# Data looks like this if we are sampling via the driver:
# #1RD
# *1RD+00019.16AB
# #2RD
# ...
# Data looks like this if we are capturing the output from the RSN collection script
# $1RD
# *+00003.50
# $2RD
# *+00003.26
# $3RD
# *+00013.30
# This regular expression needs to capture either format
# exp = r'\*([123])RD([+-]\d*\.?\d+)(\w{2})' + NEWLINE
exp = \
r'[*$]1RD.*?([+-]\d*\.?\d+).*?' + \
r'[*$]2RD.*?([+-]\d*\.?\d+).*?' + \
r'[*$]3RD.*?([+-]\d*\.?\d+).*?' + NEWLINE
return exp
@staticmethod
def regex_compiled():
return re.compile(D1000TemperatureDataParticle.regex(), re.DOTALL)
def _build_parsed_values(self):
match = self.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("D1000TemperatureDataParticle: No regex match of parsed sample data: [%s]",
self.raw_data)
if '#2RD' in self.raw_data:
# data should contain a checksum, verify it
for line in self.raw_data.split(NEWLINE):
if line.startswith('*'):
if not valid_response(line):
raise SampleException('Checksum failed - temperature sample is corrupt: %s', self.raw_data)
result = [
self._encode_value(D1000TemperatureDataParticleKey.TEMP1, match.group(1), float),
self._encode_value(D1000TemperatureDataParticleKey.TEMP2, match.group(2), float),
self._encode_value(D1000TemperatureDataParticleKey.TEMP3, match.group(3), float),
]
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
@staticmethod
def get_resource_params():
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.EXIT, self._handler_unknown_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.EXIT, self._handler_command_exit),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_sample),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_autosample),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.SET, self._handler_command_set),
],
ProtocolState.AUTOSAMPLE: [
(ProtocolEvent.ENTER, self._handler_autosample_enter),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_sample),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop),
(ProtocolEvent.EXIT, self._handler_autosample_exit),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXIT, self._handler_direct_access_exit),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Add build handlers for device commands - we are only using simple commands
for cmd in Command.list():
self._add_build_handler(cmd, self._build_command)
self._add_response_handler(cmd, self._check_command)
self._add_build_handler(Command.SETUP, self._build_setup_command)
self._add_response_handler(Command.READ_SETUP, self._read_setup_response_handler)
# Add response handlers for device commands.
# self._add_response_handler(Command.xyz, self._parse_xyz_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(Protocol.sieve_function)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._sent_cmds = None
self.initialize_scheduler()
# unit identifiers - must match the setup command (SU31 - '1')
self._units = ['1', '2', '3']
self._setup = None # set by the read setup command handler for comparison to see if the config needs reset
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(D1000TemperatureDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
if not return_list:
log.debug("sieve_function: raw_data=%r, return_list=%s", raw_data, return_list)
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: chunk=%s", chunk)
self._extract_sample(D1000TemperatureDataParticle, D1000TemperatureDataParticle.regex_compiled(), chunk,
timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# implement virtual methods from base class.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters. If
startup is set to true that means we are setting startup values
and immutable parameters can be set. Otherwise only READ_WRITE
parameters can be set.
must be overloaded in derived classes
@param params dictionary containing parameter name and value pairs
@param startup - a flag, true indicates initializing, false otherwise
"""
params = args[0]
# check for attempt to set readonly parameters (read-only or immutable set outside startup)
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
self._param_dict.set_value(key, val)
new_config = self._param_dict.get_config()
# check for parameter change
if not dict_equal(old_config, new_config):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def apply_startup_params(self):
"""
Apply startup parameters
"""
config = self.get_startup_config()
for param in Parameter.list():
if param in config:
self._param_dict.set_value(param, config[param])
########################################################################
# Private helpers.
########################################################################
def _wakeup(self, wakeup_timeout=10, response_timeout=3):
"""
Over-ridden because the D1000 does not go to sleep and requires no special wake-up commands.
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
@throw InstrumentTimeoutException if the device could not be woken.
"""
pass
def _do_command(self, cmd, unit, **kwargs):
"""
Send command and ensure it matches appropriate response. Simply enforces sending the unit identifier as a
required argument.
@param cmd - Command to send to instrument
@param unit - unit identifier
@retval - response from instrument
"""
self._do_cmd_resp(cmd, unit, write_delay=INTER_CHARACTER_DELAY, **kwargs)
def _build_command(self, cmd, unit):
"""
@param cmd - Command to process
@param unit - unit identifier
"""
return '#' + unit + cmd + NEWLINE
def _build_setup_command(self, cmd, unit):
"""
@param cmd - command to send - should be 'SU'
@param unit - unit identifier - should be '1', '2', or '3', must be a single character
"""
# use defaults - in the future, may consider making some of these parameters
# byte 0
channel_address = unit
# byte 1
line_feed = self._param_dict.format(Parameter.LINEFEED)
parity_type = self._param_dict.format(Parameter.PARITY_TYPE)
parity_enable = self._param_dict.format(Parameter.PARITY_ENABLE)
extended_addressing = self._param_dict.format(Parameter.EXTENDED_ADDRESSING)
baud_rate = self._param_dict.format(Parameter.BAUD_RATE)
baud_rate = getattr(BaudRate, 'BAUD_%d' % baud_rate, BaudRate.BAUD_9600)
# byte 2
alarm_enable = self._param_dict.format(Parameter.ALARM_ENABLE)
low_alarm_latch = self._param_dict.format(Parameter.LOW_ALARM_LATCH)
high_alarm_latch = self._param_dict.format(Parameter.HIGH_ALARM_LATCH)
rtd_wire = self._param_dict.format(Parameter.RTD_4_WIRE)
temp_units = self._param_dict.format(Parameter.TEMP_UNITS)
echo = self._param_dict.format(Parameter.ECHO)
delay_units = self._param_dict.format(Parameter.COMMUNICATION_DELAY)
# byte 3
precision = self._param_dict.format(Parameter.PRECISION)
precision = getattr(UnitPrecision, 'DIGITS_%d' % precision, UnitPrecision.DIGITS_6)
large_signal_filter_constant = self._param_dict.format(Parameter.LARGE_SIGNAL_FILTER_C)
large_signal_filter_constant = filter_enum(large_signal_filter_constant)
small_signal_filter_constant = self._param_dict.format(Parameter.SMALL_SIGNAL_FILTER_C)
small_signal_filter_constant = filter_enum(small_signal_filter_constant)
# # Factory default: 0x31070182
# # Lab default: 0x310214C2
byte_0 = int(channel_address.encode("hex"), 16)
log.debug('byte 0: %s', byte_0)
byte_1 = \
(line_feed << 7) + \
(parity_type << 6) + \
(parity_enable << 5) + \
(extended_addressing << 4) + \
baud_rate
log.debug('byte 1: %s', byte_1)
byte_2 = \
(alarm_enable << 7) + \
(low_alarm_latch << 6) + \
(high_alarm_latch << 5) + \
(rtd_wire << 4) + \
(temp_units << 3) + \
(echo << 2) + \
delay_units
log.debug('byte 2: %s', byte_2)
byte_3 = \
(precision << 6) + \
(large_signal_filter_constant << 3) + \
small_signal_filter_constant
log.debug('byte 3: %s', byte_3)
setup_command = '#%sSU%02x%02x%02x%02x' % (unit[0], byte_0, byte_1, byte_2, byte_3) + NEWLINE
log.debug('default setup command (%r) for unit %02x (%s)' % (setup_command, byte_0, unit[0]))
return setup_command
def _check_command(self, resp, prompt):
"""
Perform a checksum calculation on provided data. The checksum used for comparison is the last two characters of
the line.
@param resp - response from the instrument to the command
@param prompt - expected prompt (or the joined groups from a regex match)
@retval
"""
for line in resp.split(NEWLINE):
if line.startswith('?'):
raise InstrumentProtocolException('error processing command (%r)', resp[1:])
if line.startswith('*'): # response
if not valid_response(line):
raise InstrumentProtocolException('checksum failed (%r)', line)
def _read_setup_response_handler(self, resp, prompt):
"""
Save the setup.
@param resp - response from the instrument to the command
@param prompt - expected prompt (or the joined groups from a regex match)
"""
self._check_command(resp, prompt)
self._setup = resp
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with commands.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _add_setup_param(self, name, fmt, **kwargs):
"""
Add setup command to the parameter dictionary. All 'SU' parameters are not startup parameter, but should be
restored upon return from direct access. These parameters are all part of the instrument command 'SU'.
"""
self._param_dict.add(name, '', None, fmt,
startup_param=False,
direct_access=True,
visibility=ParameterDictVisibility.READ_ONLY,
**kwargs)
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.SAMPLE_INTERVAL,
'', # this is a driver only parameter
None,
int,
type=ParameterDictType.INT,
startup_param=True,
display_name='D1000 Sample Periodicity',
range=(1, 3600),
description='Periodicity of D1000 temperature sample in autosample mode: (1-3600)',
default_value=DEFAULT_SAMPLE_RATE,
units=Units.SECOND,
visibility=ParameterDictVisibility.READ_WRITE)
self._add_setup_param(Parameter.CHANNEL_ADDRESS,
int,
type=ParameterDictType.INT,
display_name='Base Channel Address',
description='Hex value of ASCII character to ID unit, e.g. 31 is the ASCII code for 1:'
' (30-31, 41-5A, 61-7A)',
range=(0x30, 0x7A),
default_value=0x31)
self._add_setup_param(Parameter.LINEFEED,
bool,
type=ParameterDictType.BOOL,
display_name='Line Feed Flag',
range={'True': True, 'False': False},
description='Enable D1000 to generate a linefeed before and after each response:'
' (true | false)',
default_value=False)
self._add_setup_param(Parameter.PARITY_TYPE,
bool,
type=ParameterDictType.BOOL,
display_name='Parity Type',
range={'Odd': True, 'Even': False},
description='Sets the parity: (true:odd | false:even)',
default_value=False)
self._add_setup_param(Parameter.PARITY_ENABLE,
bool,
type=ParameterDictType.BOOL,
display_name='Parity Flag',
range={'True': True, 'False': False},
description='Enable use of parity bit, a parity error will be issued if detected:'
' (true | false)',
default_value=False)
self._add_setup_param(Parameter.EXTENDED_ADDRESSING,
bool,
type=ParameterDictType.BOOL,
display_name='Extended Addressing',
range={'True': True, 'False': False},
description='Enable extended addressing: (true | false)',
default_value=False)
self._add_setup_param(Parameter.BAUD_RATE,
int,
type=ParameterDictType.INT,
display_name='Baud Rate',
range={'38400': 0, '19200': 1, '9600': 2, '4800': 3, '2400': 4, '1200': 5, '600': 6,
'300': 7, '57600': 8},
description='Using ethernet interface in deployed configuration: (300, 600, '
'1200, 2400, 4800, 9600, 19200, 38400, 57600)',
default_value=9600,
units=Units.BAUD)
self._add_setup_param(Parameter.ALARM_ENABLE,
bool,
type=ParameterDictType.BOOL,
display_name='Enable Alarms',
range={'True': True, 'False': False},
description='Enable alarms to be controlled by the Digital Output (DO) command:'
' (true | false)',
default_value=False)
self._add_setup_param(Parameter.LOW_ALARM_LATCH,
bool,
type=ParameterDictType.BOOL,
display_name='Low Alarm Latching',
range={'True': True, 'False': False},
description='Enable changing the alarm to latching mode: (true | false)',
default_value=False)
self._add_setup_param(Parameter.HIGH_ALARM_LATCH,
bool,
type=ParameterDictType.BOOL,
display_name='High Alarm Latching',
range={'True': True, 'False': False},
description='Enable changing the alarm to latching mode: (true | false)',
default_value=False)
self._add_setup_param(Parameter.RTD_4_WIRE,
bool,
type=ParameterDictType.BOOL,
display_name='4 Wire RTD Flag',
range={'True': True, 'False': False},
description='Represents a physical configuration of the instrument, '
'disabling may cause data to be misaligned: (true | false)',
default_value=True)
self._add_setup_param(Parameter.TEMP_UNITS,
bool,
type=ParameterDictType.BOOL,
display_name='Fahrenheit Flag',
range={'Fahrenheit': True, 'Celsius': False},
description='Flag to control the temperature format: (true:Fahrenheit | false:Celsius)',
default_value=False)
self._add_setup_param(Parameter.ECHO,
bool,
type=ParameterDictType.BOOL,
display_name='Daisy Chain',
range={'True': True, 'False': False},
description='If not set, only 1 out of 3 D1000s will process commands: (true | false)',
default_value=True)
self._add_setup_param(Parameter.COMMUNICATION_DELAY,
int,
type=ParameterDictType.INT,
display_name='Communication Delay',
range=(0, 3),
description='The number of delays to add when processing commands: (0-3)',
default_value=0)
self._add_setup_param(Parameter.PRECISION,
int,
type=ParameterDictType.INT,
display_name='Precision',
range={'4 digits': 0, '5 digits': 1, '6 digits': 2, '7 digits': 3},
description='Number of digits the instrument should output for temperature query: '
'(0=4-3=7)',
default_value=6)
self._add_setup_param(Parameter.LARGE_SIGNAL_FILTER_C,
float,
type=ParameterDictType.FLOAT,
display_name='Large Signal Filter Constant',
range={'0': 0, '.25': 1, '.5': 2, '1': 3, '2': 4, '4': 5, '8': 6, '16': 7},
description='Time to reach 63% of its final value: '
'(0 = 0.0, 1 = 0.25, 2 = 0.5, 3 = 1.0, 4 = 2.0, 5 = 4.0, 6 = 8.0, 7 = 16.0)',
default_value=0.0,
units=Units.SECOND)
self._add_setup_param(Parameter.SMALL_SIGNAL_FILTER_C,
float,
type=ParameterDictType.FLOAT,
display_name='Small Signal Filter Constant',
range={'0': 0, '.25': 1, '.5': 2, '1': 3, '2': 4, '4': 5, '8': 6, '16': 7},
description='Smaller filter constant, should be larger than large filter constant: '
'(0 = 0.0, 1 = 0.25, 2 = 0.5, 3 = 1.0, 4 = 2.0, 5 = 4.0, 6 = 8.0, 7 = 16.0)',
default_value=0.50,
units=Units.SECOND)
for key in self._param_dict.get_keys():
self._param_dict.set_default(key)
def _update_params(self):
"""
Update the parameter dictionary.
"""
pass
def _restore_params(self):
"""
Restore D1000, clearing any alarms and set-point.
"""
# make sure the alarms are disabled - preferred over doing setup, then clear alarms commands
self._param_dict.set_value(Parameter.ALARM_ENABLE, False)
for i in self._units:
current_setup = None # set in READ_SETUP response handler
try:
self._do_command(Command.READ_SETUP, i, response_regex=Response.READ_SETUP)
current_setup = self._setup[4:][:-2] # strip off the leader and checksum
except InstrumentTimeoutException:
log.error('D1000 unit %s has been readdressed, unable to restore settings' % i[0])
new_setup = self._build_setup_command(Command.SETUP, i)[4:] # strip leader (no checksum)
if not current_setup == new_setup:
log.debug('restoring setup to default state (%s) from current state (%s)', new_setup, current_setup)
self._do_command(Command.ENABLE_WRITE, i)
self._do_command(Command.SETUP, i)
self._do_command(Command.ENABLE_WRITE, i)
self._do_command(Command.CLEAR_ZERO, i)
########################################################################
# Event handlers for UNKNOWN state.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state
@retval next_state, (next_state, result)
"""
# force to command mode, this instrument has no autosample mode
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Event handlers for COMMAND state.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# Command device to update parameters and send a config change event if needed.
self._restore_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
no writable parameters so does nothing, just implemented to make framework happy
"""
input_params = args[0]
for key, value in input_params.items():
if not Parameter.has(key):
raise InstrumentParameterException('Invalid parameter supplied to set: %s' % key)
try:
value = int(value)
except TypeError:
raise InstrumentParameterException('Invalid value [%s] for parameter %s' % (value, key))
if key == Parameter.SAMPLE_INTERVAL:
if value < MIN_SAMPLE_RATE or value > MAX_SAMPLE_RATE:
raise InstrumentParameterException('Parameter %s value [%d] is out of range [%d %d]' %
(key, value, MIN_SAMPLE_RATE, MAX_SAMPLE_RATE))
startup = False
try:
startup = args[1]
except IndexError:
pass
self._set_params(input_params, startup)
return None, None
def _handler_command_autosample(self, *args, **kwargs):
"""
Begin autosample.
"""
return ProtocolState.AUTOSAMPLE, (ProtocolState.AUTOSAMPLE, None)
def _handler_command_start_direct(self, *args, **kwargs):
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
########################################################################
# Event handlers for AUTOSAMPLE state.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Start auto polling the temperature sensors.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._protocol_fsm.on_event(ProtocolEvent.ACQUIRE_SAMPLE)
job_name = ScheduledJob.SAMPLE
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)
}
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.SAMPLE, ProtocolEvent.ACQUIRE_SAMPLE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Stop autosampling - remove the scheduled autosample
"""
if self._scheduler is not None:
try:
self._remove_scheduler(ScheduledJob.SAMPLE)
except KeyError:
log.debug('_remove_scheduler count not find: %s', ScheduledJob.SAMPLE)
def _handler_sample(self, *args, **kwargs):
"""
Poll the three temperature probes for current temperature readings.
"""
next_state = None
timeout = time.time() + SAMPLE_TIMEOUT
for i in self._units:
self._do_command(Command.READ, i)
particles = self.wait_for_particles([DataParticleType.D1000_PARSED], timeout)
return next_state, (next_state, particles)
def _handler_autosample_stop(self, *args, **kwargs):
"""
Terminate autosampling
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
def _handler_direct_access_execute_direct(self, data):
self._do_cmd_direct(data)
return None, (None, [])
def _handler_direct_access_stop_direct(self, *args, **kwargs):
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
class PlaybackProtocol(Protocol):
timestamp_regex = re.compile(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} UTC)')
def __init__(self, driver_event):
super(PlaybackProtocol, self). __init__(None, None, driver_event)
self.log_timestamp = None
self._chunker = StringChunker(self.sieve_function)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(D1000TemperatureDataParticle.regex_compiled())
matchers.append(PlaybackProtocol.timestamp_regex)
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
if not return_list:
log.debug("sieve_function: raw_data=%r, return_list=%s", raw_data, return_list)
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
match = PlaybackProtocol.timestamp_regex.match(chunk)
if match:
dt = datetime.strptime(match.group(1), '%Y-%m-%d %H:%M:%S %Z')
self.log_timestamp = (dt - datetime(1900, 1, 1)).total_seconds()
return
# only publish D1000 playback if we have a timestamp
if self.log_timestamp is not None:
self._extract_sample(D1000TemperatureDataParticle, D1000TemperatureDataParticle.regex_compiled(), chunk,
self.log_timestamp)
def create_playback_protocol(callback):
return PlaybackProtocol(callback)
| bsd-2-clause |
pronexo-odoo/odoo-argentina | l10n_ar_wsafip_fe/wizard/query_invoices.py | 1 | 7160 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from datetime import datetime
import logging
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
class query_invoices(osv.osv_memory):
_name = 'l10n_ar_wsafip_fe.query_invoices'
_description = 'Query for invoices in AFIP web services'
_columns = {
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'first_invoice_number': fields.integer('First invoice number', required=True),
'last_invoice_number': fields.integer('Last invoice number', required=True),
'update_invoices': fields.boolean('Update CAE if invoice exists'),
'create_invoices': fields.boolean('Create invoice in draft if not exists'),
}
_defaults = {
'first_invoice_number': 1,
'last_invoice_number': 1,
}
def onchange_journal_id(self, cr, uid, ids, first_invoice_number, journal_id):
journal_obj = self.pool.get('account.journal')
res = {}
if journal_id:
num_items = journal_obj.browse(cr, uid, journal_id).afip_items_generated
res['value'] = {
'first_invoice_number': min(first_invoice_number, num_items),
'last_invoice_number': num_items,
}
return res
def execute(self, cr, uid, ids, context=None):
context = context or {}
invoice_obj = self.pool.get('account.invoice')
partner_obj = self.pool.get('res.partner')
document_type_obj = self.pool.get('afip.document_type')
for qi in self.browse(cr, uid, ids):
conn = qi.journal_id.afip_connection_id
serv = qi.journal_id.afip_connection_id.server_id
number_format = "%s%%0%sd%s" % (qi.journal_id.sequence_id.prefix, qi.journal_id.sequence_id.padding , qi.journal_id.sequence_id.suffix)
if qi.first_invoice_number > qi.last_invoice_number:
raise osv.except_osv(_(u'Qrong invoice range numbers'), _('Please, first invoice number must be less than last invoice'))
def _fch_(s):
if s and len(s) == 8:
return datetime.strptime(s, "%Y%m%d").strftime('%Y-%m-%d %H:%M:%S')
elif s and len(s) > 8:
return datetime.strptime(s, "%Y%m%d%H%M%S").strftime('%Y-%m-%d %H:%M:%S')
else:
return False
for inv_number in range(qi.first_invoice_number, qi.last_invoice_number+1):
r = serv.wsfe_query_invoice(conn.id,
qi.journal_id.journal_class_id.afip_code,
inv_number,
qi.journal_id.point_of_sale)
r = r[serv.id]
if r['EmisionTipo'] == 'CAE':
inv_ids = invoice_obj.search(cr, uid, [
( 'journal_id', '=', qi.journal_id.id),
( 'number', '=', number_format % inv_number),
])
if inv_ids and qi.update_invoices:
# Update Invoice
# TODO: if invoice in draft complete all data.
# TODO: if invoice in not draft just complete cae if not set.
_logger.debug("Update invoice number: %s" % (number_format % inv_number))
elif not inv_ids and qi.create_invoices:
partner_id = partner_obj.search(cr, uid, [
('document_type_id.afip_code','=',r['DocTipo']),
('document_number','=',r['DocNro']),
])
if partner_id:
# Take partner
partner_id = partner_id[0]
else:
# Create partner
_logger.debug("Creating partner doc number: %s" % r['DocNro'])
document_type_id = document_type_obj.search(cr, uid, [
('afip_code','=',r['DocTipo']),
])
assert len(document_type_id) == 1
document_type_id = document_type_id[0]
partner_id = partner_obj.create(cr, uid, {
'name': r['DocNro'],
'document_type_id': document_type_id,
'document_number': r['DocNro'],
})
_logger.debug("Creating invoice number: %s" % (number_format % inv_number))
partner = partner_obj.browse(cr, uid, partner_id)
if not partner.property_account_receivable.id:
raise osv.except_osv(_(u'Partner has not set a receivable account'), _('Please, first set the receivable account for %s') % partner.name)
inv_id = invoice_obj.create(cr, uid, {
'company_id': qi.journal_id.company_id.id,
'account_id': partner.property_account_receivable.id,
'internal_number': number_format % inv_number,
'name': 'Created from AFIP (%s)' % number_format % inv_number,
'journal_id': qi.journal_id.id,
'partner_id': partner_id,
'date_invoice': _fch_(r['CbteFch']),
'afip_cae': r['CodAutorizacion'],
'afip_cae_due': _fch_(r['FchProceso']),
'afip_service_start': _fch_(r['FchServDesde']),
'afip_service_end': _fch_(r['FchServHasta']),
'amount_total': r['ImpTotal'],
'state': 'draft',
})
else:
_logger.debug("Ignoring invoice: %s" % (number_format % inv_number))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mihbor/kafka | tests/kafkatest/tests/core/throttling_test.py | 14 | 8758 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import math
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
from kafkatest.services.performance import ProducerPerformanceService
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.utils import is_int
class ThrottlingTest(ProduceConsumeValidateTest):
"""Tests throttled partition reassignment. This is essentially similar
to the reassign_partitions_test, except that we throttle the reassignment
and verify that it takes a sensible amount of time given the throttle
and the amount of data being moved.
Since the correctness is time dependent, this test also simplifies the
cluster topology. In particular, we fix the number of brokers, the
replication-factor, the number of partitions, the partition size, and
the number of partitions being moved so that we can accurately predict
the time throttled reassignment should take.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(ThrottlingTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
# Because we are starting the producer/consumer/validate cycle _after_
# seeding the cluster with big data (to test throttling), we need to
# Start the consumer from the end of the stream. further, we need to
# ensure that the consumer is fully started before the producer starts
# so that we don't miss any messages. This timeout ensures the sufficient
# condition.
self.consumer_init_timeout_sec = 20
self.num_brokers = 6
self.num_partitions = 3
self.kafka = KafkaService(test_context,
num_nodes=self.num_brokers,
zk=self.zk,
topics={
self.topic: {
"partitions": self.num_partitions,
"replication-factor": 2,
"configs": {
"segment.bytes": 64 * 1024 * 1024
}
}
})
self.producer_throughput = 1000
self.timeout_sec = 400
self.num_records = 2000
self.record_size = 4096 * 100 # 400 KB
# 1 MB per partition on average.
self.partition_size = (self.num_records * self.record_size) / self.num_partitions
self.num_producers = 2
self.num_consumers = 1
self.throttle = 4 * 1024 * 1024 # 4 MB/s
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(ThrottlingTest, self).min_cluster_size() +\
self.num_producers + self.num_consumers
def clean_bounce_some_brokers(self):
"""Bounce every other broker"""
for node in self.kafka.nodes[::2]:
self.kafka.restart_node(node, clean_shutdown=True)
def reassign_partitions(self, bounce_brokers, throttle):
"""This method reassigns partitions using a throttle. It makes an
assertion about the minimum amount of time the reassignment should take
given the value of the throttle, the number of partitions being moved,
and the size of each partition.
"""
partition_info = self.kafka.parse_describe_topic(
self.kafka.describe_topic(self.topic))
self.logger.debug("Partitions before reassignment:" +
str(partition_info))
max_num_moves = 0
for i in range(0, self.num_partitions):
old_replicas = set(partition_info["partitions"][i]["replicas"])
new_part = (i+1) % self.num_partitions
new_replicas = set(partition_info["partitions"][new_part]["replicas"])
max_num_moves = max(len(new_replicas - old_replicas), max_num_moves)
partition_info["partitions"][i]["partition"] = new_part
self.logger.debug("Jumbled partitions: " + str(partition_info))
self.kafka.execute_reassign_partitions(partition_info,
throttle=throttle)
start = time.time()
if bounce_brokers:
# bounce a few brokers at the same time
self.clean_bounce_some_brokers()
# Wait until finished or timeout
size_per_broker = max_num_moves * self.partition_size
self.logger.debug("Max amount of data transfer per broker: %fb",
size_per_broker)
estimated_throttled_time = math.ceil(float(size_per_broker) /
self.throttle)
estimated_time_with_buffer = estimated_throttled_time * 2
self.logger.debug("Waiting %ds for the reassignment to complete",
estimated_time_with_buffer)
wait_until(lambda: self.kafka.verify_reassign_partitions(partition_info),
timeout_sec=estimated_time_with_buffer, backoff_sec=.5)
stop = time.time()
time_taken = stop - start
self.logger.debug("Transfer took %d second. Estimated time : %ds",
time_taken,
estimated_throttled_time)
assert time_taken >= estimated_throttled_time * 0.9, \
("Expected rebalance to take at least %ds, but it took %ds" % (
estimated_throttled_time,
time_taken))
@cluster(num_nodes=10)
@parametrize(bounce_brokers=True)
@parametrize(bounce_brokers=False)
def test_throttled_reassignment(self, bounce_brokers):
security_protocol = 'PLAINTEXT'
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
producer_id = 'bulk_producer'
bulk_producer = ProducerPerformanceService(
context=self.test_context, num_nodes=1, kafka=self.kafka,
topic=self.topic, num_records=self.num_records,
record_size=self.record_size, throughput=-1, client_id=producer_id)
self.producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka, topic=self.topic,
message_validator=is_int,
throughput=self.producer_throughput)
self.consumer = ConsoleConsumer(self.test_context,
self.num_consumers,
self.kafka,
self.topic,
consumer_timeout_ms=60000,
message_validator=is_int,
from_beginning=False)
self.kafka.start()
bulk_producer.run()
self.run_produce_consume_validate(core_test_action=
lambda: self.reassign_partitions(bounce_brokers, self.throttle))
self.logger.debug("Bulk producer outgoing-byte-rates: %s",
(metric.value for k, metrics in
bulk_producer.metrics(group='producer-metrics', name='outgoing-byte-rate', client_id=producer_id) for
metric in metrics)
) | apache-2.0 |
susansls/zulip | api/integrations/perforce/zulip_perforce_config.py | 33 | 2514 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "p4-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
ZULIP_SITE = "https://zulip.example.com"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the Perforce depot on the server
# * changelist = the changelist id
#
# Returns a dictionary encoding the stream and topic to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for ones in the
# "master-plan" and "secret" subdirectories of //depot/ to:
# * stream "depot_subdirectory-commits"
# * subject "change_root"
def commit_notice_destination(path, changelist):
dirs = path.split('/')
if len(dirs) >= 4 and dirs[3] not in ("*", "..."):
directory = dirs[3]
else:
# No subdirectory, so just use "depot"
directory = dirs[2]
if directory not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "%s-commits" % (directory,),
subject = path)
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
| apache-2.0 |
secretdataz/OpenKore-Src | src/scons-local-2.0.1/SCons/Tool/packaging/rpm.py | 61 | 13485 | """SCons.Tool.Packaging.rpm
The rpm packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/rpm.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import SCons.Builder
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
# Guessing based on what os.uname() returns at least allows it
# to work for both i386 and x86_64 Linux systems.
archmap = {
'i686' : 'i386',
'i586' : 'i386',
'i486' : 'i386',
}
buildarchitecture = os.uname()[4]
buildarchitecture = archmap.get(buildarchitecture, buildarchitecture)
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if 'SOURCE_URL' not in kw:
#kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return bld(env, target, source, **kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is chagned
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
#sources = [s for s in sources if not (s in target)]
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
#sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
spec_file = lambda s: str(s).rfind('.spec') != -1
sources.extend( list(filter(spec_file, source)) )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
#tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
file = open(target[0].abspath, 'w')
str = ""
try:
file.write( build_specfile_header(env) )
file.write( build_specfile_sections(env) )
file.write( build_specfile_filesection(env, source) )
file.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overriden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = 'mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all section but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n', }
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n', }
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under System, which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
# if not s.has_key('x_rpm_BuildRequires'):
# s['x_rpm_BuildRequires'] = 'scons'
if 'X_RPM_BUILDROOT' not in spec:
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
tags[k]=getattr(file, k)
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.PACKAGING_INSTALL_LOCATION
str = str + '\n\n'
return str
class SimpleTagCompiler(object):
""" This class is a simple string substition utility:
the replacement specfication is stored in the tagset dictionary, something
like:
{ "abc" : "cdef %s ",
"abc_" : "cdef %s %s" }
the compile function gets a value dictionary, which may look like:
{ "abc" : "ghij",
"abc_gh" : "ij" }
The resulting string will be:
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" compiles the tagset and returns a str containing the result
"""
def is_international(tag):
#return tag.endswith('_')
return tag[-1:] == '_'
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = list(self.tagset.items())
str = ""
#domestic = [ (k,v) for k,v in replacements if not is_international(k) ]
domestic = [t for t in replacements if not is_international(t[0])]
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError, e:
if self.mandatory:
raise e
#international = [ (k,v) for k,v in replacements if is_international(k) ]
international = [t for t in replacements if is_international(t[0])]
for key, replacement in international:
try:
#int_values_for_key = [ (get_country_code(k),v) for k,v in values.items() if strip_country_code(k) == key ]
x = [t for t in values.items() if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
except KeyError, e:
if self.mandatory:
raise e
return str
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
Maxklos/WeatherProjekt | client/client.py | 1 | 1310 | import time
def installData(s,idpi,n =[]):
#check if all names are valid
c=''
for i in n:
c = c + str(i) + '-'
s.send(c+idpi)
def sendData(s,t, **kwargs):
#print kwargs
#print '--------------------------'
s.send("time:{}\n".format(t))
for i in kwargs:
s.send("{}:{}\n".format(i,kwargs[i]))
def recv_timeout(the_socket,timeout=2): #doesn't work stable at all
#make socket non blocking
the_socket.setblocking(0)
#total data partwise in an array
total_data=[];
data='';
#beginning time
begin=time.time()
while 1:
#if you got some data, then break after timeout
if total_data and time.time()-begin > timeout:
break
#if you got no data at all, wait a little longer, twice the timeout
elif time.time()-begin > timeout*2:
break
#recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
#change the beginning time for measurement
begin=time.time()
else:
#sleep for sometime to indicate a gap
time.sleep(0.1)
except:
pass
#join all parts to make final string
return ''.join(total_data)
| mit |
sbidoul/odoo | openerp/addons/base/tests/test_qweb.py | 289 | 4814 | # -*- coding: utf-8 -*-
import cgi
import json
import os.path
import glob
import re
import collections
from lxml import etree
import openerp.addons.base.ir.ir_qweb
import openerp.modules
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
class TestQWebTField(common.TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.engine = self.registry('ir.qweb')
def context(self, values):
return ir_qweb.QWebContext(
self.cr, self.uid, values, context={'inherit_branding': True})
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
company_id = Companies.create(self.cr, self.uid, {
'name': "My Test Company"
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
"My Test Company",))
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company_id = Companies.create(self.cr, self.uid, {
'name': s,
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
cgi.escape(s.encode('utf-8')),))
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^RTE widgets do not work correctly'):
self.engine.render_node(field, self.context({
'company': None
}))
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^t-field can not be used on a t element'):
self.engine.render_node(field, self.context({
'company': None
}))
class TestQWeb(common.TransactionCase):
matcher = re.compile('^qweb-test-(.*)\.xml$')
@classmethod
def get_cases(cls):
path = cls.qweb_test_file_path()
return (
cls("test_qweb_{}".format(cls.matcher.match(f).group(1)))
for f in os.listdir(path)
# js inheritance
if f != 'qweb-test-extend.xml'
if cls.matcher.match(f)
)
@classmethod
def qweb_test_file_path(cls):
path = os.path.dirname(
openerp.modules.get_module_resource(
'web', 'static', 'lib', 'qweb', 'qweb2.js'))
return path
def __getattr__(self, item):
if not item.startswith('test_qweb_'):
raise AttributeError("No {} on {}".format(item, self))
f = 'qweb-test-{}.xml'.format(item[10:])
path = self.qweb_test_file_path()
return lambda: self.run_test_file(os.path.join(path, f))
def run_test_file(self, path):
context = openerp.addons.base.ir.ir_qweb.QWebContext(self.cr, self.uid, {})
qweb = self.env['ir.qweb']
doc = etree.parse(path).getroot()
qweb.load_document(doc, None, context)
for template in context.templates:
if template.startswith('_'): continue
param = doc.find('params[@id="{}"]'.format(template))
# OrderedDict to ensure JSON mappings are iterated in source order
# so output is predictable & repeatable
params = {} if param is None else json.loads(param.text, object_pairs_hook=collections.OrderedDict)
ctx = context.copy()
ctx.update(params)
result = doc.find('result[@id="{}"]'.format(template)).text
self.assertEqual(
qweb.render(template, qwebcontext=ctx).strip(),
(result or u'').strip().encode('utf-8'),
template
)
def load_tests(loader, suite, _):
# can't override TestQWeb.__dir__ because dir() called on *class* not
# instance
suite.addTests(TestQWeb.get_cases())
return suite
| agpl-3.0 |
ulfjack/bazel | src/test/py/bazel/runfiles_test.py | 7 | 13361 | # pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import six
from src.test.py.bazel import test_base
class RunfilesTest(test_base.TestBase):
def _AssertRunfilesLibraryInBazelToolsRepo(self, family, lang_name):
for s, t, exe in [("WORKSPACE.mock", "WORKSPACE",
False), ("foo/BUILD.mock", "foo/BUILD",
False), ("foo/foo.py", "foo/foo.py", True),
("foo/Foo.java", "foo/Foo.java",
False), ("foo/foo.sh", "foo/foo.sh",
True), ("foo/foo.cc", "foo/foo.cc", False),
("foo/datadep/hello.txt", "foo/datadep/hello.txt",
False), ("bar/BUILD.mock", "bar/BUILD",
False), ("bar/bar.py", "bar/bar.py", True),
("bar/bar-py-data.txt", "bar/bar-py-data.txt",
False), ("bar/Bar.java", "bar/Bar.java",
False), ("bar/bar-java-data.txt",
"bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh",
True), ("bar/bar-sh-data.txt", "bar/bar-sh-data.txt",
False), ("bar/bar.cc", "bar/bar.cc",
False), ("bar/bar-cc-data.txt",
"bar/bar-cc-data.txt", False)]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
# TODO(brandjon): (Issue #8169) Make this test compatible with Python
# toolchains. Blocked on the fact that there's no PY3 environment on our Mac
# workers (bazelbuild/continuous-integration#578).
exit_code, _, stderr = self.RunBazel([
"build",
"--verbose_failures",
"--incompatible_use_python_toolchains=false",
"//foo:runfiles-" + family
])
self.AssertExitCode(exit_code, 0, stderr)
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "foo/runfiles-%s.exe" % family)
else:
bin_path = os.path.join(bazel_bin, "foo/runfiles-" + family)
self.assertTrue(os.path.exists(bin_path))
exit_code, stdout, stderr = self.RunProgram(
[bin_path], env_add={"TEST_SRCDIR": "__ignore_me__"})
self.AssertExitCode(exit_code, 0, stderr)
# 10 output lines: 2 from foo-<family>, and 2 from each of bar-<lang>.
if len(stdout) != 10:
self.fail("stdout: %s" % stdout)
self.assertEqual(stdout[0], "Hello %s Foo!" % lang_name)
six.assertRegex(self, stdout[1], "^rloc=.*/foo/datadep/hello.txt")
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines: %s" % lines)
self.assertEqual(lines[0], "world")
i = 2
for lang in [("py", "Python", "bar.py"), ("java", "Java", "Bar.java"),
("sh", "Bash", "bar.sh"), ("cc", "C++", "bar.cc")]:
self.assertEqual(stdout[i], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[i + 1],
"^rloc=.*/bar/bar-%s-data.txt" % lang[0])
self.assertNotIn("__ignore_me__", stdout[i + 1])
with open(stdout[i + 1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines(%s): %s" % (lang[0], lines))
self.assertEqual(lines[0], "data for " + lang[2])
i += 2
def testPythonRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("py", "Python")
def testJavaRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("java", "Java")
def testBashRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("sh", "Bash")
def testCppRunfilesLibraryInBazelToolsRepo(self):
self._AssertRunfilesLibraryInBazelToolsRepo("cc", "C++")
def testRunfilesLibrariesFindRunfilesWithoutEnvvars(self):
for s, t, exe in [
("WORKSPACE.mock", "WORKSPACE", False),
("bar/BUILD.mock", "bar/BUILD", False),
("bar/bar.py", "bar/bar.py", True),
("bar/bar-py-data.txt", "bar/bar-py-data.txt", False),
("bar/Bar.java", "bar/Bar.java", False),
("bar/bar-java-data.txt", "bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh", True),
("bar/bar-sh-data.txt", "bar/bar-sh-data.txt", False),
("bar/bar.cc", "bar/bar.cc", False),
("bar/bar-cc-data.txt", "bar/bar-cc-data.txt", False),
]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
exit_code, _, stderr = self.RunBazel([
"build", "--verbose_failures",
"//bar:bar-py", "//bar:bar-java", "//bar:bar-sh", "//bar:bar-cc"
])
self.AssertExitCode(exit_code, 0, stderr)
for lang in [("py", "Python", "bar.py"), ("java", "Java", "Bar.java"),
("sh", "Bash", "bar.sh"), ("cc", "C++", "bar.cc")]:
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "bar/bar-%s.exe" % lang[0])
else:
bin_path = os.path.join(bazel_bin, "bar/bar-" + lang[0])
self.assertTrue(os.path.exists(bin_path))
exit_code, stdout, stderr = self.RunProgram(
[bin_path],
env_remove=set([
"RUNFILES_MANIFEST_FILE",
"RUNFILES_MANIFEST_ONLY",
"RUNFILES_DIR",
"JAVA_RUNFILES",
]),
env_add={"TEST_SRCDIR": "__ignore_me__"})
self.AssertExitCode(exit_code, 0, stderr)
if len(stdout) < 2:
self.fail("stdout(%s): %s" % (lang[0], stdout))
self.assertEqual(stdout[0], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[1], "^rloc=.*/bar/bar-%s-data.txt" % lang[0])
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines(%s): %s" % (lang[0], lines))
self.assertEqual(lines[0], "data for " + lang[2])
def testRunfilesLibrariesFindRunfilesWithRunfilesManifestEnvvar(self):
for s, t, exe in [
("WORKSPACE.mock", "WORKSPACE", False),
("bar/BUILD.mock", "bar/BUILD", False),
# Note: do not test Python here, because py_binary always needs a
# runfiles tree, even on Windows, because it needs __init__.py files in
# every directory where there may be importable modules, so Bazel always
# needs to create a runfiles tree for py_binary.
("bar/Bar.java", "bar/Bar.java", False),
("bar/bar-java-data.txt", "bar/bar-java-data.txt", False),
("bar/bar.sh", "bar/bar.sh", True),
("bar/bar-sh-data.txt", "bar/bar-sh-data.txt", False),
("bar/bar.cc", "bar/bar.cc", False),
("bar/bar-cc-data.txt", "bar/bar-cc-data.txt", False),
]:
self.CopyFile(
self.Rlocation("io_bazel/src/test/py/bazel/testdata/runfiles_test/" +
s), t, exe)
exit_code, stdout, stderr = self.RunBazel(["info", "bazel-bin"])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = stdout[0]
for lang in [("java", "Java"), ("sh", "Bash"), ("cc", "C++")]:
exit_code, _, stderr = self.RunBazel([
"build", "--verbose_failures", "--enable_runfiles=no",
"//bar:bar-" + lang[0]
])
self.AssertExitCode(exit_code, 0, stderr)
if test_base.TestBase.IsWindows():
bin_path = os.path.join(bazel_bin, "bar/bar-%s.exe" % lang[0])
else:
bin_path = os.path.join(bazel_bin, "bar/bar-" + lang[0])
manifest_path = bin_path + ".runfiles_manifest"
self.assertTrue(os.path.exists(bin_path))
self.assertTrue(os.path.exists(manifest_path))
# Create a copy of the runfiles manifest, replacing
# "bar/bar-<lang>-data.txt" with a custom file.
mock_bar_dep = self.ScratchFile("bar-%s-mockdata.txt" % lang[0],
["mock %s data" % lang[0]])
if test_base.TestBase.IsWindows():
# Runfiles manifests use forward slashes as path separators, even on
# Windows.
mock_bar_dep = mock_bar_dep.replace("\\", "/")
manifest_key = "foo_ws/bar/bar-%s-data.txt" % lang[0]
mock_manifest_line = manifest_key + " " + mock_bar_dep
with open(manifest_path, "rt") as f:
# Only rstrip newlines. Do not rstrip() completely, because that would
# remove spaces too. This is necessary in order to have at least one
# space in every manifest line.
# Some manifest entries don't have any path after this space, namely the
# "__init__.py" entries. (Bazel writes such manifests on every
# platform). The reason is that these files are never symlinks in the
# runfiles tree, Bazel actually creates empty __init__.py files (again
# on every platform). However to keep these manifest entries correct,
# they need to have a space character.
# We could probably strip thses lines completely, but this test doesn't
# aim to exercise what would happen in that case.
mock_manifest_data = [
mock_manifest_line
if line.split(" ", 1)[0] == manifest_key else line.rstrip("\n\r")
for line in f
]
substitute_manifest = self.ScratchFile(
"mock-%s.runfiles/MANIFEST" % lang[0], mock_manifest_data)
exit_code, stdout, stderr = self.RunProgram(
[bin_path],
env_remove=set(["RUNFILES_DIR"]),
env_add={
# On Linux/macOS, the Java launcher picks up JAVA_RUNFILES and
# ignores RUNFILES_MANIFEST_FILE.
"JAVA_RUNFILES": substitute_manifest[:-len("/MANIFEST")],
# On Windows, the Java launcher picks up RUNFILES_MANIFEST_FILE.
# The C++ runfiles library picks up RUNFILES_MANIFEST_FILE on all
# platforms.
"RUNFILES_MANIFEST_FILE": substitute_manifest,
"RUNFILES_MANIFEST_ONLY": "1",
"TEST_SRCDIR": "__ignore_me__",
})
self.AssertExitCode(exit_code, 0, stderr)
if len(stdout) < 2:
self.fail("stdout: %s" % stdout)
self.assertEqual(stdout[0], "Hello %s Bar!" % lang[1])
six.assertRegex(self, stdout[1], "^rloc=" + mock_bar_dep)
self.assertNotIn("__ignore_me__", stdout[1])
with open(stdout[1].split("=", 1)[1], "r") as f:
lines = [l.strip() for l in f.readlines()]
if len(lines) != 1:
self.fail("lines: %s" % lines)
self.assertEqual(lines[0], "mock %s data" % lang[0])
def testLegacyExternalRunfilesOption(self):
self.ScratchDir("A")
self.ScratchFile("A/WORKSPACE")
self.ScratchFile("A/BUILD", [
"py_library(",
" name = 'lib',",
" srcs = ['lib.py'],",
" visibility = ['//visibility:public'],",
")",
])
self.ScratchFile("A/lib.py")
work_dir = self.ScratchDir("B")
self.ScratchFile("B/WORKSPACE",
["local_repository(name = 'A', path='../A')"])
self.ScratchFile("B/bin.py")
self.ScratchFile("B/BUILD", [
"py_binary(",
" name = 'bin',",
" srcs = ['bin.py'],",
" deps = ['@A//:lib'],",
")",
"",
"genrule(",
" name = 'gen',",
" outs = ['output'],",
" cmd = 'echo $(location //:bin) > $@',",
" tools = ['//:bin'],",
")",
])
exit_code, stdout, stderr = self.RunBazel(
args=["info", "output_path"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
bazel_output = stdout[0]
exit_code, _, stderr = self.RunBazel(
args=["build", "--nolegacy_external_runfiles", ":gen"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
if self.IsWindows():
manifest_path = os.path.join(bazel_output,
"host/bin/bin.exe.runfiles_manifest")
else:
manifest_path = os.path.join(bazel_output,
"host/bin/bin.runfiles_manifest")
self.AssertFileContentNotContains(manifest_path, "__main__/external/A")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
tartavull/google-cloud-python | storage/tests/unit/test_acl.py | 4 | 26727 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_ACLEntity(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import _ACLEntity
return _ACLEntity
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_default_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(entity.type, TYPE)
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set())
def test_ctor_w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(entity.type, TYPE)
self.assertEqual(entity.identifier, ID)
self.assertEqual(entity.get_roles(), set())
def test___str__no_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(str(entity), TYPE)
def test___str__w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(str(entity), '%s-%s' % (TYPE, ID))
def test_grant_simple(self):
TYPE = 'type'
ROLE = 'role'
entity = self._make_one(TYPE)
entity.grant(ROLE)
self.assertEqual(entity.get_roles(), set([ROLE]))
def test_grant_duplicate(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.grant(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2]))
def test_revoke_miss(self):
TYPE = 'type'
ROLE = 'nonesuch'
entity = self._make_one(TYPE)
entity.revoke(ROLE)
self.assertEqual(entity.get_roles(), set())
def test_revoke_hit(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.revoke(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE2]))
def test_grant_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_read()
self.assertEqual(entity.get_roles(), set([entity.READER_ROLE]))
def test_grant_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_write()
self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE]))
def test_grant_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_owner()
self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE]))
def test_revoke_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.READER_ROLE)
entity.revoke_read()
self.assertEqual(entity.get_roles(), set())
def test_revoke_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.WRITER_ROLE)
entity.revoke_write()
self.assertEqual(entity.get_roles(), set())
def test_revoke_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.OWNER_ROLE)
entity.revoke_owner()
self.assertEqual(entity.get_roles(), set())
class Test_ACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ACL
return ACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
acl = self._make_one()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test__ensure_loaded(self):
acl = self._make_one()
def _reload():
acl._really_loaded = True
acl.reload = _reload
acl._ensure_loaded()
self.assertTrue(acl._really_loaded)
def test_client_is_abstract(self):
acl = self._make_one()
self.assertRaises(NotImplementedError, lambda: acl.client)
def test_reset(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
acl.reset()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test___iter___empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(list(acl), [])
def test___iter___empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(list(acl), [])
self.assertTrue(acl.loaded)
def test___iter___non_empty_no_roles(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertEqual(list(acl), [])
def test___iter___non_empty_w_roles(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': '%s-%s' % (TYPE, ID), 'role': ROLE}])
def test___iter___non_empty_w_empty_role(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant('')
self.assertEqual(list(acl), [])
def test_entity_from_dict_allUsers_eager(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allUsers', 'role': ROLE})
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_allAuthenticatedUsers(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allAuthenticatedUsers',
'role': ROLE})
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_w_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'type-id', 'role': ROLE})
self.assertEqual(entity.type, 'type')
self.assertEqual(entity.identifier, 'id')
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_wo_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
self.assertRaises(ValueError,
acl.entity_from_dict,
{'entity': 'bogus', 'role': ROLE})
self.assertEqual(list(acl.get_entities()), [])
def test_has_entity_miss_str_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity('nonesuch'))
def test_has_entity_miss_str_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertFalse(acl.has_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_has_entity_miss_entity(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity(entity))
def test_has_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_has_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_get_entity_miss_str_no_default_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity('nonesuch'))
def test_get_entity_miss_str_no_default_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertIsNone(acl.get_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_get_entity_miss_entity_no_default(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity(entity))
def test_get_entity_miss_str_w_default(self):
DEFAULT = object()
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity('nonesuch', DEFAULT), DEFAULT)
def test_get_entity_miss_entity_w_default(self):
from google.cloud.storage.acl import _ACLEntity
DEFAULT = object()
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity(entity, DEFAULT), DEFAULT)
def test_get_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_get_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_add_entity_miss_eager(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_add_entity_miss_lazy(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
self.assertTrue(acl.loaded)
def test_add_entity_hit(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ENTITY_VAL = '%s-%s' % (TYPE, ID)
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertIsNot(acl.get_entity(ENTITY_VAL), before)
self.assertIs(acl.get_entity(ENTITY_VAL), entity)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_miss(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.loaded)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_hit(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
before.grant(ROLE)
entity = acl.entity(TYPE, ID)
self.assertIs(entity, before)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_user(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.user(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'user')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'user-%s' % ID, 'role': ROLE}])
def test_group(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.group(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'group')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'group-%s' % ID, 'role': ROLE}])
def test_domain(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.domain(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'domain')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'domain-%s' % ID, 'role': ROLE}])
def test_all(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
def test_all_authenticated(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all_authenticated()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
def test_get_entities_empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(acl.get_entities(), [])
def test_get_entities_empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(acl.get_entities(), [])
self.assertTrue(acl.loaded)
def test_get_entities_nonempty(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertEqual(acl.get_entities(), [entity])
def test_reload_missing(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/652
ROLE = 'role'
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_reload_empty_result_clears_local(self):
ROLE = 'role'
connection = _Connection({'items': []})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_reload_nonempty_result(self):
ROLE = 'role'
connection = _Connection(
{'items': [{'entity': 'allUsers', 'role': ROLE}]})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_save_none_set_none_passed(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.save(client=client)
kw = connection._requested
self.assertEqual(len(kw), 0)
def test_save_existing_missing_none_passed(self):
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_no_acl(self):
ROLE = 'role'
AFTER = [{'entity': 'allUsers', 'role': ROLE}]
connection = _Connection({'acl': AFTER})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers').grant(ROLE)
acl.save(client=client)
self.assertEqual(list(acl), AFTER)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': AFTER})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_w_acl(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
new_acl = [{'entity': 'allUsers', 'role': ROLE1}]
connection = _Connection({'acl': [STICKY] + new_acl})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save(new_acl, client=client)
entries = list(acl)
self.assertEqual(len(entries), 2)
self.assertTrue(STICKY in entries)
self.assertTrue(new_acl[0] in entries)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': new_acl})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_prefefined_invalid(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
with self.assertRaises(ValueError):
acl.save_predefined('bogus', client=client)
def test_save_predefined_valid(self):
PREDEFINED = 'private'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full', 'predefinedAcl': PREDEFINED})
def test_save_predefined_w_XML_alias(self):
PREDEFINED_XML = 'project-private'
PREDEFINED_JSON = 'projectPrivate'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED_XML, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full',
'predefinedAcl': PREDEFINED_JSON})
def test_save_predefined_valid_w_alternate_query_param(self):
# Cover case where subclass overrides _PREDEFINED_QUERY_PARAM
PREDEFINED = 'publicRead'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl._PREDEFINED_QUERY_PARAM = 'alternate'
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full', 'alternate': PREDEFINED})
def test_clear(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
connection = _Connection({'acl': [STICKY]})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers', ROLE1)
acl.clear(client=client)
self.assertEqual(list(acl), [STICKY])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
class Test_BucketACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import BucketACL
return BucketACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/acl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
class Test_DefaultObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import DefaultObjectACL
return DefaultObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/defaultObjectAcl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
class Test_ObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ObjectACL
return ObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
bucket = _Bucket(NAME)
blob = _Blob(bucket, BLOB_NAME)
acl = self._make_one(blob)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.blob, blob)
self.assertEqual(acl.reload_path, '/b/%s/o/%s/acl' % (NAME, BLOB_NAME))
self.assertEqual(acl.save_path, '/b/%s/o/%s' % (NAME, BLOB_NAME))
class _Blob(object):
def __init__(self, bucket, blob):
self.bucket = bucket
self.blob = blob
@property
def path(self):
return '%s/o/%s' % (self.bucket.path, self.blob)
class _Bucket(object):
def __init__(self, name):
self.name = name
@property
def path(self):
return '/b/%s' % self.name
class _Connection(object):
_delete_ok = False
def __init__(self, *responses):
self._responses = responses
self._requested = []
self._deleted = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _Client(object):
def __init__(self, connection):
self._connection = connection
| apache-2.0 |
Intel-tensorflow/tensorflow | tensorflow/python/kernel_tests/softplus_op_test.py | 14 | 5094 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.cached_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = self.evaluate(softplus)
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testGradGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegex(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softplus(constant_op.constant(42)).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
doganaltunbay/odoo | addons/procurement_jit/procurement_jit.py | 130 | 1446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class procurement_order(osv.osv):
_inherit = "procurement.order"
def create(self, cr, uid, vals, context=None):
procurement_id = super(procurement_order, self).create(cr, uid, vals, context=context)
self.run(cr, uid, [procurement_id], context=context)
self.check(cr, uid, [procurement_id], context=context)
return procurement_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tcmitchell/geni-ch | plugins/chapiv1rpc/plugin.py | 2 | 2848 | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import tools.pluginmanager as pm
from chapi.Clearinghouse import CHv1Handler, CHv1DelegateBase
from chapi.MemberAuthority import MAv1Handler, MAv1DelegateBase
from chapi.SliceAuthority import SAv1Handler, SAv1DelegateBase
from chapi.GuardBase import GuardBase
from chapi.Parameters import set_parameters, configure_logging
def setup():
# load all the parameter values into the config database
set_parameters()
# Configure logging
configure_logging()
# register xmlrpc endpoint
xmlrpc = pm.getService('xmlrpc')
# Invoke the CH, SA and MA and set them with default/dummy
# guards and delegates
# Subsequent plugins should replace these with proper guard and delegate
# implementations
ch_handler = CHv1Handler()
ch_delegate = CHv1DelegateBase()
ch_guard = GuardBase()
ch_handler.setDelegate(ch_delegate)
ch_handler.setGuard(ch_guard)
pm.registerService('chv1handler', ch_handler)
xmlrpc.registerXMLRPC('ch1', ch_handler, '/CH') # name, handlerObj, endpoint
sa_handler = SAv1Handler()
sa_delegate = SAv1DelegateBase()
sa_guard = GuardBase()
sa_handler.setDelegate(sa_delegate)
sa_handler.setGuard(sa_guard)
pm.registerService('sav1handler', sa_handler)
xmlrpc.registerXMLRPC('sa1', sa_handler, '/SA') # name, handlerObj, endpoint
ma_handler = MAv1Handler()
ma_delegate = MAv1DelegateBase()
ma_guard = GuardBase()
ma_handler.setDelegate(ma_delegate)
ma_handler.setGuard(ma_guard)
pm.registerService('mav1handler', ma_handler)
xmlrpc.registerXMLRPC('ma1', ma_handler, '/MA') # name, handlerObj, endpoint
| mit |
johndpope/tensorflow | tensorflow/contrib/saved_model/python/saved_model/signature_def_utils.py | 113 | 1664 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_signature_def_by_key(meta_graph_def, signature_def_key):
"""Utility function to get a SignatureDef protocol buffer by its key.
Args:
meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefMap to
look up.
signature_def_key: Key of the SignatureDef protocol buffer to find in the
SignatureDefMap.
Returns:
A SignatureDef protocol buffer corresponding to the supplied key, if it
exists.
Raises:
ValueError: If no entry corresponding to the supplied key is found in the
SignatureDefMap of the MetaGraphDef.
"""
if signature_def_key not in meta_graph_def.signature_def:
raise ValueError("No SignatureDef with key '%s' found in MetaGraphDef." %
signature_def_key)
return meta_graph_def.signature_def[signature_def_key]
| apache-2.0 |
kubernetes-client/python | kubernetes/client/models/v1beta1_rule_with_operations.py | 1 | 9404 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1RuleWithOperations(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_groups': 'list[str]',
'api_versions': 'list[str]',
'operations': 'list[str]',
'resources': 'list[str]',
'scope': 'str'
}
attribute_map = {
'api_groups': 'apiGroups',
'api_versions': 'apiVersions',
'operations': 'operations',
'resources': 'resources',
'scope': 'scope'
}
def __init__(self, api_groups=None, api_versions=None, operations=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501
"""V1beta1RuleWithOperations - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_groups = None
self._api_versions = None
self._operations = None
self._resources = None
self._scope = None
self.discriminator = None
if api_groups is not None:
self.api_groups = api_groups
if api_versions is not None:
self.api_versions = api_versions
if operations is not None:
self.operations = operations
if resources is not None:
self.resources = resources
if scope is not None:
self.scope = scope
@property
def api_groups(self):
"""Gets the api_groups of this V1beta1RuleWithOperations. # noqa: E501
APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:return: The api_groups of this V1beta1RuleWithOperations. # noqa: E501
:rtype: list[str]
"""
return self._api_groups
@api_groups.setter
def api_groups(self, api_groups):
"""Sets the api_groups of this V1beta1RuleWithOperations.
APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:param api_groups: The api_groups of this V1beta1RuleWithOperations. # noqa: E501
:type: list[str]
"""
self._api_groups = api_groups
@property
def api_versions(self):
"""Gets the api_versions of this V1beta1RuleWithOperations. # noqa: E501
APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:return: The api_versions of this V1beta1RuleWithOperations. # noqa: E501
:rtype: list[str]
"""
return self._api_versions
@api_versions.setter
def api_versions(self, api_versions):
"""Sets the api_versions of this V1beta1RuleWithOperations.
APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:param api_versions: The api_versions of this V1beta1RuleWithOperations. # noqa: E501
:type: list[str]
"""
self._api_versions = api_versions
@property
def operations(self):
"""Gets the operations of this V1beta1RuleWithOperations. # noqa: E501
Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:return: The operations of this V1beta1RuleWithOperations. # noqa: E501
:rtype: list[str]
"""
return self._operations
@operations.setter
def operations(self, operations):
"""Sets the operations of this V1beta1RuleWithOperations.
Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required. # noqa: E501
:param operations: The operations of this V1beta1RuleWithOperations. # noqa: E501
:type: list[str]
"""
self._operations = operations
@property
def resources(self):
"""Gets the resources of this V1beta1RuleWithOperations. # noqa: E501
Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
:return: The resources of this V1beta1RuleWithOperations. # noqa: E501
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1RuleWithOperations.
Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501
:param resources: The resources of this V1beta1RuleWithOperations. # noqa: E501
:type: list[str]
"""
self._resources = resources
@property
def scope(self):
"""Gets the scope of this V1beta1RuleWithOperations. # noqa: E501
scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
:return: The scope of this V1beta1RuleWithOperations. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this V1beta1RuleWithOperations.
scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501
:param scope: The scope of this V1beta1RuleWithOperations. # noqa: E501
:type: str
"""
self._scope = scope
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1RuleWithOperations):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1RuleWithOperations):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
damirda/ansible-modules-core | utilities/logic/include.py | 11 | 1970 | #!/usr/bin/python
# -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: include
short_description: include a play or task list.
description:
- Loads a file with a list of plays or tasks to be executed in the current playbook.
- Files with a list of plays can only be included at the top level, lists of tasks can only be included where tasks normally run (in play).
- Before 2.0 all includes were 'static', executed at play load time.
- Since 2.0 task includes are dynamic and behave more like real tasks. This means they can be looped, skipped and use variables from any source.
Ansible tries to auto detect this, use the `static` directive (new in 2.1) to bypass autodetection.
version_added: "0.6"
options:
free-form:
description:
- This module allows you to specify the name of the file directly w/o any other options.
notes:
- This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can.
'''
EXAMPLES = """
# include a play after another play
- hosts: localhost
tasks:
- debug: msg="play1"
- include: otherplays.yml
# include task list in play
- hosts: all
tasks:
- debug: msg=task1
- include: stuff.yml
- debug: msg=task10
# dyanmic include task list in play
- hosts: all
tasks:
- debug: msg=task1
- include: {{hostvar}}.yml
static: no
when: hostvar is defined
"""
RETURN = """
# this module does not return anything except plays or tasks to execute
"""
| gpl-3.0 |
ParthGanatra/mitmproxy | mitmproxy/cmdline.py | 1 | 25259 | from __future__ import absolute_import
import os
import re
import configargparse
from netlib.tcp import Address, sslversion_choices
import netlib.utils
from . import filt, utils, version
from .proxy import config
APP_HOST = "mitm.it"
APP_PORT = 80
class ParseException(Exception):
pass
def _parse_hook(s):
sep, rem = s[0], s[1:]
parts = rem.split(sep, 2)
if len(parts) == 2:
patt = ".*"
a, b = parts
elif len(parts) == 3:
patt, a, b = parts
else:
raise ParseException(
"Malformed hook specifier - too few clauses: %s" % s
)
if not a:
raise ParseException("Empty clause: %s" % str(patt))
if not filt.parse(patt):
raise ParseException("Malformed filter pattern: %s" % patt)
return patt, a, b
def parse_replace_hook(s):
"""
Returns a (pattern, regex, replacement) tuple.
The general form for a replacement hook is as follows:
/patt/regex/replacement
The first character specifies the separator. Example:
:~q:foo:bar
If only two clauses are specified, the pattern is set to match
universally (i.e. ".*"). Example:
/foo/bar/
Clauses are parsed from left to right. Extra separators are taken to be
part of the final clause. For instance, the replacement clause below is
"foo/bar/":
/one/two/foo/bar/
Checks that pattern and regex are both well-formed. Raises
ParseException on error.
"""
patt, regex, replacement = _parse_hook(s)
try:
re.compile(regex)
except re.error as e:
raise ParseException("Malformed replacement regex: %s" % str(e.message))
return patt, regex, replacement
def parse_setheader(s):
"""
Returns a (pattern, header, value) tuple.
The general form for a replacement hook is as follows:
/patt/header/value
The first character specifies the separator. Example:
:~q:foo:bar
If only two clauses are specified, the pattern is set to match
universally (i.e. ".*"). Example:
/foo/bar/
Clauses are parsed from left to right. Extra separators are taken to be
part of the final clause. For instance, the value clause below is
"foo/bar/":
/one/two/foo/bar/
Checks that pattern and regex are both well-formed. Raises
ParseException on error.
"""
return _parse_hook(s)
def parse_server_spec(url):
try:
p = netlib.utils.parse_url(url)
if p[0] not in ("http", "https"):
raise ValueError()
except ValueError:
raise configargparse.ArgumentTypeError(
"Invalid server specification: %s" % url
)
address = Address(p[1:3])
scheme = p[0].lower()
return config.ServerSpec(scheme, address)
def get_common_options(options):
stickycookie, stickyauth = None, None
if options.stickycookie_filt:
stickycookie = options.stickycookie_filt
if options.stickyauth_filt:
stickyauth = options.stickyauth_filt
stream_large_bodies = utils.parse_size(options.stream_large_bodies)
reps = []
for i in options.replace:
try:
p = parse_replace_hook(i)
except ParseException as e:
raise configargparse.ArgumentTypeError(e.message)
reps.append(p)
for i in options.replace_file:
try:
patt, rex, path = parse_replace_hook(i)
except ParseException as e:
raise configargparse.ArgumentTypeError(e.message)
try:
v = open(path, "rb").read()
except IOError as e:
raise configargparse.ArgumentTypeError(
"Could not read replace file: %s" % path
)
reps.append((patt, rex, v))
setheaders = []
for i in options.setheader:
try:
p = parse_setheader(i)
except ParseException as e:
raise configargparse.ArgumentTypeError(e.message)
setheaders.append(p)
return dict(
app=options.app,
app_host=options.app_host,
app_port=options.app_port,
anticache=options.anticache,
anticomp=options.anticomp,
client_replay=options.client_replay,
kill=options.kill,
no_server=options.no_server,
refresh_server_playback=not options.norefresh,
rheaders=options.rheaders,
rfile=options.rfile,
replacements=reps,
setheaders=setheaders,
server_replay=options.server_replay,
scripts=options.scripts,
stickycookie=stickycookie,
stickyauth=stickyauth,
stream_large_bodies=stream_large_bodies,
showhost=options.showhost,
outfile=options.outfile,
verbosity=options.verbose,
nopop=options.nopop,
replay_ignore_content=options.replay_ignore_content,
replay_ignore_params=options.replay_ignore_params,
replay_ignore_payload_params=options.replay_ignore_payload_params,
replay_ignore_host=options.replay_ignore_host
)
def basic_options(parser):
parser.add_argument(
'--version',
action='version',
version="%(prog)s" + " " + version.VERSION
)
parser.add_argument(
'--shortversion',
action='version',
help="show program's short version number and exit",
version=version.VERSION
)
parser.add_argument(
"--anticache",
action="store_true", dest="anticache", default=False,
help="""
Strip out request headers that might cause the server to return
304-not-modified.
"""
)
parser.add_argument(
"--cadir",
action="store", type=str, dest="cadir", default=config.CA_DIR,
help="Location of the default mitmproxy CA files. (%s)" % config.CA_DIR
)
parser.add_argument(
"--host",
action="store_true", dest="showhost", default=False,
help="Use the Host header to construct URLs for display."
)
parser.add_argument(
"-q", "--quiet",
action="store_true", dest="quiet",
help="Quiet."
)
parser.add_argument(
"-r", "--read-flows",
action="store", dest="rfile", default=None,
help="Read flows from file."
)
parser.add_argument(
"-s", "--script",
action="append", type=str, dest="scripts", default=[],
metavar='"script.py --bar"',
help="""
Run a script. Surround with quotes to pass script arguments. Can be
passed multiple times.
"""
)
parser.add_argument(
"-t", "--stickycookie",
action="store",
dest="stickycookie_filt",
default=None,
metavar="FILTER",
help="Set sticky cookie filter. Matched against requests."
)
parser.add_argument(
"-u", "--stickyauth",
action="store", dest="stickyauth_filt", default=None, metavar="FILTER",
help="Set sticky auth filter. Matched against requests."
)
parser.add_argument(
"-v", "--verbose",
action="store_const", dest="verbose", default=1, const=2,
help="Increase event log verbosity."
)
outfile = parser.add_mutually_exclusive_group()
outfile.add_argument(
"-w", "--wfile",
action="store", dest="outfile", type=lambda f: (f, "wb"),
help="Write flows to file."
)
outfile.add_argument(
"-a", "--afile",
action="store", dest="outfile", type=lambda f: (f, "ab"),
help="Append flows to file."
)
parser.add_argument(
"-z", "--anticomp",
action="store_true", dest="anticomp", default=False,
help="Try to convince servers to send us un-compressed data."
)
parser.add_argument(
"-Z", "--body-size-limit",
action="store", dest="body_size_limit", default=None,
metavar="SIZE",
help="Byte size limit of HTTP request and response bodies."
" Understands k/m/g suffixes, i.e. 3m for 3 megabytes."
)
parser.add_argument(
"--stream",
action="store", dest="stream_large_bodies", default=None,
metavar="SIZE",
help="""
Stream data to the client if response body exceeds the given
threshold. If streamed, the body will not be stored in any way.
Understands k/m/g suffixes, i.e. 3m for 3 megabytes.
"""
)
def proxy_modes(parser):
group = parser.add_argument_group("Proxy Modes").add_mutually_exclusive_group()
group.add_argument(
"-R", "--reverse",
action="store",
type=parse_server_spec,
dest="reverse_proxy",
default=None,
help="""
Forward all requests to upstream HTTP server:
http[s][2http[s]]://host[:port]
"""
)
group.add_argument(
"--socks",
action="store_true", dest="socks_proxy", default=False,
help="Set SOCKS5 proxy mode."
)
group.add_argument(
"-T", "--transparent",
action="store_true", dest="transparent_proxy", default=False,
help="Set transparent proxy mode."
)
group.add_argument(
"-U", "--upstream",
action="store",
type=parse_server_spec,
dest="upstream_proxy",
default=None,
help="Forward all requests to upstream proxy server: http://host[:port]"
)
def proxy_options(parser):
group = parser.add_argument_group("Proxy Options")
group.add_argument(
"-b", "--bind-address",
action="store", type=str, dest="addr", default='',
help="Address to bind proxy to (defaults to all interfaces)"
)
group.add_argument(
"-I", "--ignore",
action="append", type=str, dest="ignore_hosts", default=[],
metavar="HOST",
help="""
Ignore host and forward all traffic without processing it. In
transparent mode, it is recommended to use an IP address (range),
not the hostname. In regular mode, only SSL traffic is ignored and
the hostname should be used. The supplied value is interpreted as a
regular expression and matched on the ip or the hostname. Can be
passed multiple times.
"""
)
group.add_argument(
"--tcp",
action="append", type=str, dest="tcp_hosts", default=[],
metavar="HOST",
help="""
Generic TCP SSL proxy mode for all hosts that match the pattern.
Similar to --ignore, but SSL connections are intercepted. The
communication contents are printed to the event log in verbose mode.
"""
)
group.add_argument(
"-n", "--no-server",
action="store_true", dest="no_server",
help="Don't start a proxy server."
)
group.add_argument(
"-p", "--port",
action="store", type=int, dest="port", default=8080,
help="Proxy service port."
)
group.add_argument(
"--no-http2",
action="store_false", dest="http2",
help="""
Explicitly disable HTTP/2 support.
If your OpenSSL version supports ALPN, HTTP/2 is enabled by default.
"""
)
rawtcp = group.add_mutually_exclusive_group()
rawtcp.add_argument("--raw-tcp", action="store_true", dest="rawtcp")
rawtcp.add_argument("--no-raw-tcp", action="store_false", dest="rawtcp",
help="Explicitly enable/disable experimental raw tcp support. "
"Disabled by default. "
"Default value will change in a future version."
)
def proxy_ssl_options(parser):
# TODO: Agree to consistently either use "upstream" or "server".
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert",
dest='certs',
default=[],
type=str,
metavar="SPEC",
action="append",
help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
'The domain may include a wildcard, and is equal to "*" if not specified. '
'The file at path is a certificate in PEM format. If a private key is included '
'in the PEM, it is used, else the default key in the conf dir is used. '
'The PEM file should contain the full certificate chain, with the leaf certificate '
'as the first entry. Can be passed multiple times.')
group.add_argument(
"--ciphers-client", action="store",
type=str, dest="ciphers_client", default=config.DEFAULT_CLIENT_CIPHERS,
help="Set supported ciphers for client connections. (OpenSSL Syntax)"
)
group.add_argument(
"--ciphers-server", action="store",
type=str, dest="ciphers_server", default=None,
help="Set supported ciphers for server connections. (OpenSSL Syntax)"
)
group.add_argument(
"--client-certs", action="store",
type=str, dest="clientcerts", default=None,
help="Client certificate file or directory."
)
group.add_argument(
"--no-upstream-cert", default=False,
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
)
group.add_argument(
"--verify-upstream-cert", default=False,
action="store_true", dest="ssl_verify_upstream_cert",
help="Verify upstream server SSL/TLS certificates and fail if invalid "
"or not present."
)
group.add_argument(
"--upstream-trusted-cadir", default=None, action="store",
dest="ssl_verify_upstream_trusted_cadir",
help="Path to a directory of trusted CA certificates for upstream "
"server verification prepared using the c_rehash tool."
)
group.add_argument(
"--upstream-trusted-ca", default=None, action="store",
dest="ssl_verify_upstream_trusted_ca",
help="Path to a PEM formatted trusted CA certificate."
)
group.add_argument(
"--ssl-version-client", dest="ssl_version_client",
default="secure", action="store",
choices=sslversion_choices.keys(),
help="Set supported SSL/TLS versions for client connections. "
"SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure, which is TLS1.0+."
)
group.add_argument(
"--ssl-version-server", dest="ssl_version_server",
default="secure", action="store",
choices=sslversion_choices.keys(),
help="Set supported SSL/TLS versions for server connections. "
"SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure, which is TLS1.0+."
)
def onboarding_app(parser):
group = parser.add_argument_group("Onboarding App")
group.add_argument(
"--noapp",
action="store_false", dest="app", default=True,
help="Disable the mitmproxy onboarding app."
)
group.add_argument(
"--app-host",
action="store", dest="app_host", default=APP_HOST, metavar="host",
help="""
Domain to serve the onboarding app from. For transparent mode, use
an IP when a DNS entry for the app domain is not present. Default:
%s
""" % APP_HOST
)
group.add_argument(
"--app-port",
action="store",
dest="app_port",
default=APP_PORT,
type=int,
metavar="80",
help="Port to serve the onboarding app from."
)
def client_replay(parser):
group = parser.add_argument_group("Client Replay")
group.add_argument(
"-c", "--client-replay",
action="append", dest="client_replay", default=None, metavar="PATH",
help="Replay client requests from a saved file."
)
def server_replay(parser):
group = parser.add_argument_group("Server Replay")
group.add_argument(
"-S", "--server-replay",
action="append", dest="server_replay", default=None, metavar="PATH",
help="Replay server responses from a saved file."
)
group.add_argument(
"-k", "--kill",
action="store_true", dest="kill", default=False,
help="Kill extra requests during replay."
)
group.add_argument(
"--rheader",
action="append", dest="rheaders", type=str,
help="Request headers to be considered during replay. "
"Can be passed multiple times."
)
group.add_argument(
"--norefresh",
action="store_true", dest="norefresh", default=False,
help="""
Disable response refresh, which updates times in cookies and headers
for replayed responses.
"""
)
group.add_argument(
"--no-pop",
action="store_true", dest="nopop", default=False,
help="Disable response pop from response flow. "
"This makes it possible to replay same response multiple times."
)
payload = group.add_mutually_exclusive_group()
payload.add_argument(
"--replay-ignore-content",
action="store_true", dest="replay_ignore_content", default=False,
help="""
Ignore request's content while searching for a saved flow to replay
"""
)
payload.add_argument(
"--replay-ignore-payload-param",
action="append", dest="replay_ignore_payload_params", type=str,
help="""
Request's payload parameters (application/x-www-form-urlencoded or multipart/form-data) to
be ignored while searching for a saved flow to replay.
Can be passed multiple times.
"""
)
group.add_argument(
"--replay-ignore-param",
action="append", dest="replay_ignore_params", type=str,
help="""
Request's parameters to be ignored while searching for a saved flow
to replay. Can be passed multiple times.
"""
)
group.add_argument(
"--replay-ignore-host",
action="store_true",
dest="replay_ignore_host",
default=False,
help="Ignore request's destination host while searching for a saved flow to replay")
def replacements(parser):
group = parser.add_argument_group(
"Replacements",
"""
Replacements are of the form "/pattern/regex/replacement", where
the separator can be any character. Please see the documentation
for more information.
""".strip()
)
group.add_argument(
"--replace",
action="append", type=str, dest="replace", default=[],
metavar="PATTERN",
help="Replacement pattern."
)
group.add_argument(
"--replace-from-file",
action="append", type=str, dest="replace_file", default=[],
metavar="PATH",
help="""
Replacement pattern, where the replacement clause is a path to a
file.
"""
)
def set_headers(parser):
group = parser.add_argument_group(
"Set Headers",
"""
Header specifications are of the form "/pattern/header/value",
where the separator can be any character. Please see the
documentation for more information.
""".strip()
)
group.add_argument(
"--setheader",
action="append", type=str, dest="setheader", default=[],
metavar="PATTERN",
help="Header set pattern."
)
def proxy_authentication(parser):
group = parser.add_argument_group(
"Proxy Authentication",
"""
Specify which users are allowed to access the proxy and the method
used for authenticating them.
"""
).add_mutually_exclusive_group()
group.add_argument(
"--nonanonymous",
action="store_true", dest="auth_nonanonymous",
help="Allow access to any user long as a credentials are specified."
)
group.add_argument(
"--singleuser",
action="store", dest="auth_singleuser", type=str,
metavar="USER",
help="""
Allows access to a a single user, specified in the form
username:password.
"""
)
group.add_argument(
"--htpasswd",
action="store", dest="auth_htpasswd", type=str,
metavar="PATH",
help="Allow access to users specified in an Apache htpasswd file."
)
def common_options(parser):
basic_options(parser)
proxy_modes(parser)
proxy_options(parser)
proxy_ssl_options(parser)
onboarding_app(parser)
client_replay(parser)
server_replay(parser)
replacements(parser)
set_headers(parser)
proxy_authentication(parser)
def mitmproxy():
# Don't import mitmproxy.console for mitmdump, urwid is not available on all
# platforms.
from .console import palettes
parser = configargparse.ArgumentParser(
usage="%(prog)s [options]",
args_for_setting_config_path=["--conf"],
default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmproxy.conf")
],
add_config_file_help=True,
add_env_var_help=True
)
common_options(parser)
parser.add_argument(
"--palette", type=str, default=palettes.DEFAULT,
action="store", dest="palette",
choices=sorted(palettes.palettes.keys()),
help="Select color palette: " + ", ".join(palettes.palettes.keys())
)
parser.add_argument(
"--palette-transparent",
action="store_true", dest="palette_transparent", default=False,
help="Set transparent background for palette."
)
parser.add_argument(
"-e", "--eventlog",
action="store_true", dest="eventlog",
help="Show event log."
)
parser.add_argument(
"-f", "--follow",
action="store_true", dest="follow",
help="Follow flow list."
)
parser.add_argument(
"--no-mouse",
action="store_true", dest="no_mouse",
help="Disable mouse interaction."
)
group = parser.add_argument_group(
"Filters",
"See help in mitmproxy for filter expression syntax."
)
group.add_argument(
"-i", "--intercept", action="store",
type=str, dest="intercept", default=None,
help="Intercept filter expression."
)
group.add_argument(
"-l", "--limit", action="store",
type=str, dest="limit", default=None,
help="Limit filter expression."
)
return parser
def mitmdump():
parser = configargparse.ArgumentParser(
usage="%(prog)s [options] [filter]",
args_for_setting_config_path=["--conf"],
default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmdump.conf")
],
add_config_file_help=True,
add_env_var_help=True
)
common_options(parser)
parser.add_argument(
"--keepserving",
action="store_true", dest="keepserving", default=False,
help="""
Continue serving after client playback or file read. We exit by
default.
"""
)
parser.add_argument(
"-d", "--detail",
action="count", dest="flow_detail", default=1,
help="Increase flow detail display level. Can be passed multiple times."
)
parser.add_argument('args', nargs="...")
return parser
def mitmweb():
parser = configargparse.ArgumentParser(
usage="%(prog)s [options]",
args_for_setting_config_path=["--conf"],
default_config_files=[
os.path.join(config.CA_DIR, "common.conf"),
os.path.join(config.CA_DIR, "mitmweb.conf")
],
add_config_file_help=True,
add_env_var_help=True
)
group = parser.add_argument_group("Mitmweb")
group.add_argument(
"--wport",
action="store", type=int, dest="wport", default=8081,
metavar="PORT",
help="Mitmweb port."
)
group.add_argument(
"--wiface",
action="store", dest="wiface", default="127.0.0.1",
metavar="IFACE",
help="Mitmweb interface."
)
group.add_argument(
"--wdebug",
action="store_true", dest="wdebug",
help="Turn on mitmweb debugging"
)
group.add_argument(
"--wsingleuser",
action="store", dest="wsingleuser", type=str,
metavar="USER",
help="""
Allows access to a a single user, specified in the form
username:password.
"""
)
group.add_argument(
"--whtpasswd",
action="store", dest="whtpasswd", type=str,
metavar="PATH",
help="Allow access to users specified in an Apache htpasswd file."
)
common_options(parser)
group = parser.add_argument_group(
"Filters",
"See help in mitmproxy for filter expression syntax."
)
group.add_argument(
"-i", "--intercept", action="store",
type=str, dest="intercept", default=None,
help="Intercept filter expression."
)
return parser
| mit |
mohamed--abdel-maksoud/chromium.src | tools/telemetry/telemetry/results/html_output_formatter_unittest.py | 15 | 7922 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import unittest
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import html_output_formatter
from telemetry.results import page_test_results
from telemetry.value import scalar
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
return ps
class DeterministicHtmlOutputFormatter(
html_output_formatter.HtmlOutputFormatter):
def _GetBuildTime(self):
return 'build_time'
def _GetRevision(self):
return 'revision'
class FakeMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(FakeMetadataForTest, self).__init__('test_name')
# Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO):
name = 'fake_output_file'
class HtmlOutputFormatterTest(unittest.TestCase):
def test_basic_summary(self):
test_page_set = _MakePageSet()
output_file = StringIOFile()
# Run the first time and verify the results are written to the HTML file.
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 3))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 7))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = {
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}
self.assertEquals(expected, formatter.GetResults())
# Run the second time and verify the results are appended to the HTML file.
output_file.seek(0)
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 4))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 8))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = [
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
},
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [4, 8],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False,
},
"a.http://www.bar.com/": {
"current": [8],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.foo.com/": {
"current": [4],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, formatter.GetCombinedResults())
last_output_len = len(output_file.getvalue())
# Now reset the results and verify the old ones are gone.
output_file.seek(0)
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 5))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 9))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), True, False, 'browser_type')
formatter.Format(results)
expected = [{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [5, 9],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [9],
"units": "seconds",
"important": False
},
"telemetry_page_measurement_results.num_errored": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.foo.com/": {
"current": [5],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, formatter.GetCombinedResults())
self.assertTrue(len(output_file.getvalue()) < last_output_len)
| bsd-3-clause |
Servir-Mekong/SurfaceWaterTool | lib/pyasn1_modules/rfc5934.py | 14 | 23798 | # This file is being contributed to pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Trust Anchor Format
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc5934.txt
from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
from pyasn1_modules import rfc2985
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5914
MAX = float('inf')
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
# Imports from RFC 2985
SingleAttribute = rfc2985.SingleAttribute
# Imports from RFC5914
CertPathControls = rfc5914.CertPathControls
TrustAnchorChoice = rfc5914.TrustAnchorChoice
TrustAnchorTitle = rfc5914.TrustAnchorTitle
# Imports from RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
AnotherName = rfc5280.AnotherName
Attribute = rfc5280.Attribute
Certificate = rfc5280.Certificate
CertificateSerialNumber = rfc5280.CertificateSerialNumber
Extension = rfc5280.Extension
Extensions = rfc5280.Extensions
KeyIdentifier = rfc5280.KeyIdentifier
Name = rfc5280.Name
SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
TBSCertificate = rfc5280.TBSCertificate
Validity = rfc5280.Validity
# Object Identifier Arc for TAMP Message Content Types
id_tamp = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.77')
# TAMP Status Query Message
id_ct_TAMP_statusQuery = _OID(id_tamp, 1)
class TAMPVersion(univ.Integer):
pass
TAMPVersion.namedValues = namedval.NamedValues(
('v1', 1),
('v2', 2)
)
class TerseOrVerbose(univ.Enumerated):
pass
TerseOrVerbose.namedValues = namedval.NamedValues(
('terse', 1),
('verbose', 2)
)
class HardwareSerialEntry(univ.Choice):
pass
HardwareSerialEntry.componentType = namedtype.NamedTypes(
namedtype.NamedType('all', univ.Null()),
namedtype.NamedType('single', univ.OctetString()),
namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('low', univ.OctetString()),
namedtype.NamedType('high', univ.OctetString())
))
)
)
class HardwareModules(univ.Sequence):
pass
HardwareModules.componentType = namedtype.NamedTypes(
namedtype.NamedType('hwType', univ.ObjectIdentifier()),
namedtype.NamedType('hwSerialEntries', univ.SequenceOf(
componentType=HardwareSerialEntry()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class HardwareModuleIdentifierList(univ.SequenceOf):
pass
HardwareModuleIdentifierList.componentType = HardwareModules()
HardwareModuleIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class Community(univ.ObjectIdentifier):
pass
class CommunityIdentifierList(univ.SequenceOf):
pass
CommunityIdentifierList.componentType = Community()
CommunityIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
class TargetIdentifier(univ.Choice):
pass
TargetIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('hwModules', HardwareModuleIdentifierList().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('communities', CommunityIdentifierList().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('allModules', univ.Null().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('uri', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('otherName', AnotherName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
)
class SeqNumber(univ.Integer):
pass
SeqNumber.subtypeSpec = constraint.ValueRangeConstraint(0, 9223372036854775807)
class TAMPMsgRef(univ.Sequence):
pass
TAMPMsgRef.componentType = namedtype.NamedTypes(
namedtype.NamedType('target', TargetIdentifier()),
namedtype.NamedType('seqNum', SeqNumber())
)
class TAMPStatusQuery(univ.Sequence):
pass
TAMPStatusQuery.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.DefaultedNamedType('terse', TerseOrVerbose().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(value='verbose')),
namedtype.NamedType('query', TAMPMsgRef())
)
tamp_status_query = rfc5652.ContentInfo()
tamp_status_query['contentType'] = id_ct_TAMP_statusQuery
tamp_status_query['content'] = TAMPStatusQuery()
# TAMP Status Response Message
id_ct_TAMP_statusResponse = _OID(id_tamp, 2)
class KeyIdentifiers(univ.SequenceOf):
pass
KeyIdentifiers.componentType = KeyIdentifier()
KeyIdentifiers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class TrustAnchorChoiceList(univ.SequenceOf):
pass
TrustAnchorChoiceList.componentType = TrustAnchorChoice()
TrustAnchorChoiceList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class TAMPSequenceNumber(univ.Sequence):
pass
TAMPSequenceNumber.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyId', KeyIdentifier()),
namedtype.NamedType('seqNumber', SeqNumber())
)
class TAMPSequenceNumbers(univ.SequenceOf):
pass
TAMPSequenceNumbers.componentType = TAMPSequenceNumber()
TAMPSequenceNumbers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class TerseStatusResponse(univ.Sequence):
pass
TerseStatusResponse.componentType = namedtype.NamedTypes(
namedtype.NamedType('taKeyIds', KeyIdentifiers()),
namedtype.OptionalNamedType('communities', CommunityIdentifierList())
)
class VerboseStatusResponse(univ.Sequence):
pass
VerboseStatusResponse.componentType = namedtype.NamedTypes(
namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
namedtype.OptionalNamedType('continPubKeyDecryptAlg',
AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('communities',
CommunityIdentifierList().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tampSeqNumbers',
TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class StatusResponse(univ.Choice):
pass
StatusResponse.componentType = namedtype.NamedTypes(
namedtype.NamedType('terseResponse', TerseStatusResponse().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('verboseResponse', VerboseStatusResponse().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class TAMPStatusResponse(univ.Sequence):
pass
TAMPStatusResponse.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('query', TAMPMsgRef()),
namedtype.NamedType('response', StatusResponse()),
namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
)
tamp_status_response = rfc5652.ContentInfo()
tamp_status_response['contentType'] = id_ct_TAMP_statusResponse
tamp_status_response['content'] = TAMPStatusResponse()
# Trust Anchor Update Message
id_ct_TAMP_update = _OID(id_tamp, 3)
class TBSCertificateChangeInfo(univ.Sequence):
pass
TBSCertificateChangeInfo.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('serialNumber', CertificateSerialNumber()),
namedtype.OptionalNamedType('signature', AlgorithmIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('issuer', Name().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('validity', Validity().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('subject', Name().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 5)))
)
class TrustAnchorChangeInfo(univ.Sequence):
pass
TrustAnchorChangeInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('keyId', KeyIdentifier()),
namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
namedtype.OptionalNamedType('certPath', CertPathControls()),
namedtype.OptionalNamedType('exts', Extensions().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class TrustAnchorChangeInfoChoice(univ.Choice):
pass
TrustAnchorChangeInfoChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertChange', TBSCertificateChangeInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('taChange', TrustAnchorChangeInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class TrustAnchorUpdate(univ.Choice):
pass
TrustAnchorUpdate.componentType = namedtype.NamedTypes(
namedtype.NamedType('add', TrustAnchorChoice().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('remove', SubjectPublicKeyInfo().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('change', TrustAnchorChangeInfoChoice().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
)
class TAMPUpdate(univ.Sequence):
pass
TAMPUpdate.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.DefaultedNamedType('terse',
TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(value='verbose')),
namedtype.NamedType('msgRef', TAMPMsgRef()),
namedtype.NamedType('updates',
univ.SequenceOf(componentType=TrustAnchorUpdate()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.OptionalNamedType('tampSeqNumbers',
TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 2)))
)
tamp_update = rfc5652.ContentInfo()
tamp_update['contentType'] = id_ct_TAMP_update
tamp_update['content'] = TAMPUpdate()
# Trust Anchor Update Confirm Message
id_ct_TAMP_updateConfirm = _OID(id_tamp, 4)
class StatusCode(univ.Enumerated):
pass
StatusCode.namedValues = namedval.NamedValues(
('success', 0),
('decodeFailure', 1),
('badContentInfo', 2),
('badSignedData', 3),
('badEncapContent', 4),
('badCertificate', 5),
('badSignerInfo', 6),
('badSignedAttrs', 7),
('badUnsignedAttrs', 8),
('missingContent', 9),
('noTrustAnchor', 10),
('notAuthorized', 11),
('badDigestAlgorithm', 12),
('badSignatureAlgorithm', 13),
('unsupportedKeySize', 14),
('unsupportedParameters', 15),
('signatureFailure', 16),
('insufficientMemory', 17),
('unsupportedTAMPMsgType', 18),
('apexTAMPAnchor', 19),
('improperTAAddition', 20),
('seqNumFailure', 21),
('contingencyPublicKeyDecrypt', 22),
('incorrectTarget', 23),
('communityUpdateFailed', 24),
('trustAnchorNotFound', 25),
('unsupportedTAAlgorithm', 26),
('unsupportedTAKeySize', 27),
('unsupportedContinPubKeyDecryptAlg', 28),
('missingSignature', 29),
('resourcesBusy', 30),
('versionNumberMismatch', 31),
('missingPolicySet', 32),
('revokedCertificate', 33),
('unsupportedTrustAnchorFormat', 34),
('improperTAChange', 35),
('malformed', 36),
('cmsError', 37),
('unsupportedTargetIdentifier', 38),
('other', 127)
)
class StatusCodeList(univ.SequenceOf):
pass
StatusCodeList.componentType = StatusCode()
StatusCodeList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class TerseUpdateConfirm(StatusCodeList):
pass
class VerboseUpdateConfirm(univ.Sequence):
pass
VerboseUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('status', StatusCodeList()),
namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
namedtype.OptionalNamedType('tampSeqNumbers', TAMPSequenceNumbers()),
namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
)
class UpdateConfirm(univ.Choice):
pass
UpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('terseConfirm', TerseUpdateConfirm().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('verboseConfirm', VerboseUpdateConfirm().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class TAMPUpdateConfirm(univ.Sequence):
pass
TAMPUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('update', TAMPMsgRef()),
namedtype.NamedType('confirm', UpdateConfirm())
)
tamp_update_confirm = rfc5652.ContentInfo()
tamp_update_confirm['contentType'] = id_ct_TAMP_updateConfirm
tamp_update_confirm['content'] = TAMPUpdateConfirm()
# Apex Trust Anchor Update Message
id_ct_TAMP_apexUpdate = _OID(id_tamp, 5)
class TAMPApexUpdate(univ.Sequence):
pass
TAMPApexUpdate.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.DefaultedNamedType('terse',
TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(value='verbose')),
namedtype.NamedType('msgRef', TAMPMsgRef()),
namedtype.NamedType('clearTrustAnchors', univ.Boolean()),
namedtype.NamedType('clearCommunities', univ.Boolean()),
namedtype.OptionalNamedType('seqNumber', SeqNumber()),
namedtype.NamedType('apexTA', TrustAnchorChoice())
)
tamp_apex_update = rfc5652.ContentInfo()
tamp_apex_update['contentType'] = id_ct_TAMP_apexUpdate
tamp_apex_update['content'] = TAMPApexUpdate()
# Apex Trust Anchor Update Confirm Message
id_ct_TAMP_apexUpdateConfirm = _OID(id_tamp, 6)
class TerseApexUpdateConfirm(StatusCode):
pass
class VerboseApexUpdateConfirm(univ.Sequence):
pass
VerboseApexUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('status', StatusCode()),
namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
namedtype.OptionalNamedType('communities',
CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('tampSeqNumbers',
TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)))
)
class ApexUpdateConfirm(univ.Choice):
pass
ApexUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('terseApexConfirm',
TerseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0))),
namedtype.NamedType('verboseApexConfirm',
VerboseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 1)))
)
class TAMPApexUpdateConfirm(univ.Sequence):
pass
TAMPApexUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('apexReplace', TAMPMsgRef()),
namedtype.NamedType('apexConfirm', ApexUpdateConfirm())
)
tamp_apex_update_confirm = rfc5652.ContentInfo()
tamp_apex_update_confirm['contentType'] = id_ct_TAMP_apexUpdateConfirm
tamp_apex_update_confirm['content'] = TAMPApexUpdateConfirm()
# Community Update Message
id_ct_TAMP_communityUpdate = _OID(id_tamp, 7)
class CommunityUpdates(univ.Sequence):
pass
CommunityUpdates.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('remove',
CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('add',
CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 2)))
)
class TAMPCommunityUpdate(univ.Sequence):
pass
TAMPCommunityUpdate.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.DefaultedNamedType('terse',
TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1)).subtype(value='verbose')),
namedtype.NamedType('msgRef', TAMPMsgRef()),
namedtype.NamedType('updates', CommunityUpdates())
)
tamp_community_update = rfc5652.ContentInfo()
tamp_community_update['contentType'] = id_ct_TAMP_communityUpdate
tamp_community_update['content'] = TAMPCommunityUpdate()
# Community Update Confirm Message
id_ct_TAMP_communityUpdateConfirm = _OID(id_tamp, 8)
class TerseCommunityConfirm(StatusCode):
pass
class VerboseCommunityConfirm(univ.Sequence):
pass
VerboseCommunityConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('status', StatusCode()),
namedtype.OptionalNamedType('communities', CommunityIdentifierList())
)
class CommunityConfirm(univ.Choice):
pass
CommunityConfirm.componentType = namedtype.NamedTypes(
namedtype.NamedType('terseCommConfirm',
TerseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0))),
namedtype.NamedType('verboseCommConfirm',
VerboseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatConstructed, 1)))
)
class TAMPCommunityUpdateConfirm(univ.Sequence):
pass
TAMPCommunityUpdateConfirm.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('update', TAMPMsgRef()),
namedtype.NamedType('commConfirm', CommunityConfirm())
)
tamp_community_update_confirm = rfc5652.ContentInfo()
tamp_community_update_confirm['contentType'] = id_ct_TAMP_communityUpdateConfirm
tamp_community_update_confirm['content'] = TAMPCommunityUpdateConfirm()
# Sequence Number Adjust Message
id_ct_TAMP_seqNumAdjust = _OID(id_tamp, 10)
class SequenceNumberAdjust(univ.Sequence):
pass
SequenceNumberAdjust.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('msgRef', TAMPMsgRef())
)
tamp_sequence_number_adjust = rfc5652.ContentInfo()
tamp_sequence_number_adjust['contentType'] = id_ct_TAMP_seqNumAdjust
tamp_sequence_number_adjust['content'] = SequenceNumberAdjust()
# Sequence Number Adjust Confirm Message
id_ct_TAMP_seqNumAdjustConfirm = _OID(id_tamp, 11)
class SequenceNumberAdjustConfirm(univ.Sequence):
pass
SequenceNumberAdjustConfirm.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('adjust', TAMPMsgRef()),
namedtype.NamedType('status', StatusCode())
)
tamp_sequence_number_adjust_confirm = rfc5652.ContentInfo()
tamp_sequence_number_adjust_confirm['contentType'] = id_ct_TAMP_seqNumAdjustConfirm
tamp_sequence_number_adjust_confirm['content'] = SequenceNumberAdjustConfirm()
# TAMP Error Message
id_ct_TAMP_error = _OID(id_tamp, 9)
class TAMPError(univ.Sequence):
pass
TAMPError.componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version',
TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)).subtype(value='v2')),
namedtype.NamedType('msgType', univ.ObjectIdentifier()),
namedtype.NamedType('status', StatusCode()),
namedtype.OptionalNamedType('msgRef', TAMPMsgRef())
)
tamp_error = rfc5652.ContentInfo()
tamp_error['contentType'] = id_ct_TAMP_error
tamp_error['content'] = TAMPError()
# Object Identifier Arc for Attributes
id_attributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.5')
# contingency-public-key-decrypt-key unsigned attribute
id_aa_TAMP_contingencyPublicKeyDecryptKey = _OID(id_attributes, 63)
class PlaintextSymmetricKey(univ.OctetString):
pass
contingency_public_key_decrypt_key = Attribute()
contingency_public_key_decrypt_key['type'] = id_aa_TAMP_contingencyPublicKeyDecryptKey
contingency_public_key_decrypt_key['values'][0] = PlaintextSymmetricKey()
# id-pe-wrappedApexContinKey extension
id_pe_wrappedApexContinKey =univ.ObjectIdentifier('1.3.6.1.5.5.7.1.20')
class ApexContingencyKey(univ.Sequence):
pass
ApexContingencyKey.componentType = namedtype.NamedTypes(
namedtype.NamedType('wrapAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('wrappedContinPubKey', univ.OctetString())
)
wrappedApexContinKey = Extension()
wrappedApexContinKey['extnID'] = id_pe_wrappedApexContinKey
wrappedApexContinKey['critical'] = 0
wrappedApexContinKey['extnValue'] = univ.OctetString()
# Add to the map of CMS Content Type OIDs to Content Types in
# rfc5652.py
_cmsContentTypesMapUpdate = {
id_ct_TAMP_statusQuery: TAMPStatusQuery(),
id_ct_TAMP_statusResponse: TAMPStatusResponse(),
id_ct_TAMP_update: TAMPUpdate(),
id_ct_TAMP_updateConfirm: TAMPUpdateConfirm(),
id_ct_TAMP_apexUpdate: TAMPApexUpdate(),
id_ct_TAMP_apexUpdateConfirm: TAMPApexUpdateConfirm(),
id_ct_TAMP_communityUpdate: TAMPCommunityUpdate(),
id_ct_TAMP_communityUpdateConfirm: TAMPCommunityUpdateConfirm(),
id_ct_TAMP_seqNumAdjust: SequenceNumberAdjust(),
id_ct_TAMP_seqNumAdjustConfirm: SequenceNumberAdjustConfirm(),
id_ct_TAMP_error: TAMPError(),
}
rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
# Add to the map of CMS Attribute OIDs to Attribute Values in
# rfc5652.py
_cmsAttributesMapUpdate = {
id_aa_TAMP_contingencyPublicKeyDecryptKey: PlaintextSymmetricKey(),
}
rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
# Add to the map of Certificate Extension OIDs to Extensions in
# rfc5280.py
_certificateExtensionsMap = {
id_pe_wrappedApexContinKey: ApexContingencyKey(),
}
rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
| gpl-3.0 |
incuna/feincms-extensions | feincms_extensions/tests/factories.py | 1 | 1198 | from django.contrib.auth import get_user_model
import factory
from feincms.module.medialibrary.models import MediaFile
from feincms.module.page.models import Page
from . import content, models
User = get_user_model()
class DummyFactory(factory.DjangoModelFactory):
FACTORY_FOR = models.Dummy
@factory.post_generation
def content(self, create, extracted, **kwargs):
if not create:
return
Content = models.Dummy.content_type_for(content.TestContent)
Content.objects.create(parent=self, region='body')
class PageFactory(factory.DjangoModelFactory):
FACTORY_FOR = Page
title = factory.Sequence('Page {}'.format)
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence('{}@example.com'.format)
class MediaFileFactory(factory.DjangoModelFactory):
file = factory.django.FileField(
from_path='feincms_extensions/tests/images/image.png',
)
@factory.post_generation
def file_size(self, create, extracted, **kwargs):
"""Duplicate MediaFile.save to set self.file_size in build."""
self.file_size = self.file.size
class Meta:
model = MediaFile
| bsd-2-clause |
KohlsTechnology/ansible | lib/ansible/module_utils/_text.py | 167 | 12489 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Toshio Kuratomi <a.badger@gmail.com>, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
.. warn:: This module_util is currently internal implementation.
We want to evaluate this code for stability and API suitability before
making backwards compatibility guarantees. The API may change between
releases. Do not use this unless you are willing to port your module code.
"""
import codecs
from ansible.module_utils.six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_replace',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# We should only reach this if encoding was non-utf8 original_errors was
# surrogate_then_escape and errors was surrogateescape
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to exceptions or when dealing with an API that needs to take
#: a native string. Example::
#:
#: try:
#: 1//0
#: except ZeroDivisionError as e:
#: raise MyException('Encountered and error: %s' % to_native(e))
if PY3:
to_native = to_text
else:
to_native = to_bytes
| gpl-3.0 |
nuclear-wizard/moose | modules/porous_flow/test/tests/thm_rehbinder/thm_rehbinder.py | 12 | 6179 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def rehbinder(r):
# Results from Rehbinder with parameters used in the MOOSE simulation.
# Rehbinder's manuscript contains a few typos - I've corrected them here.
# G Rehbinder "Analytic solutions of stationary coupled thermo-hydro-mechanical problems" Int J Rock Mech Min Sci & Geomech Abstr 32 (1995) 453-463
poisson = 0.2
thermal_expansion = 1E-6
young = 1E10
fluid_density = 1000
fluid_specific_heat = 1000
permeability = 1E-12
fluid_viscosity = 1E-3
thermal_conductivity = 1E6
P0 = 1E6
T0 = 1E3
Tref = T0
r0 = 0.1
r1 = 1.0
xi = r / r0
xi1 = r1 / r0
Peclet = fluid_density * fluid_specific_heat * thermal_expansion * Tref * young * permeability / fluid_viscosity / thermal_conductivity / (1 - poisson)
That0 = T0 / Tref
sigmahat0 = -P0 * (1 - poisson) / thermal_expansion / Tref / young
Tzeroth = That0 * (1 - np.log(xi) / np.log(xi1))
Tfirst_pr = 2 * sigmahat0 * That0 * xi * (np.log(xi) - np.log(xi1)) / np.log(xi1)**2
Cone = 2 * That0 * sigmahat0 * (2 + np.log(xi1)) / np.log(xi1)**2
Cone = 2 * That0 * sigmahat0 / np.log(xi1) # Corrected Eqn(87)
Done = 2 * That0 * sigmahat0 * (2 * (xi1 - 1) / np.log(xi1) - 1) / np.log(xi1)**2
Done = 2 * That0 * sigmahat0 * (- 1) / np.log(xi1)**2 # Corrected Eqn(87)
Tfirst_hm = Cone + Done * np.log(xi)
Tfirst = Tfirst_pr + Tfirst_hm
That = Tzeroth + Peclet * Tfirst
T = Tref * That
Pzeroth = -sigmahat0 * (1 - np.log(xi) / np.log(xi1))
Pfirst = 0
Phat = Pzeroth + Peclet * Pfirst
P = thermal_expansion * Tref * young * Phat / (1 - poisson)
g0 = Tzeroth + (1 - 2 * poisson) * Pzeroth / (1 - poisson)
uzeroth_pr = (That0 - sigmahat0 * (1 - 2 * poisson) / (1 - poisson)) * (0.5 * (xi**2 - 1) - 0.25 * (1 - xi**2 + 2 * xi**2 * np.log(xi)) / np.log(xi1)) / xi
uzeroth_pr_xi1 = (That0 - sigmahat0 * (1 - 2 * poisson) / (1 - poisson)) * (0.5 * (xi1**2 - 1) - 0.25 * (1 - xi1**2 + 2 * xi1**2 * np.log(xi1)) / np.log(xi1)) / xi1
# fixed outer boundary
Bzeroth = - ((1 - 2 * poisson) * sigmahat0 + uzeroth_pr_xi1 / xi1) / (1 - 2 * poisson + 1.0 / xi1)
Azeroth = - Bzeroth / xi1**2 - uzeroth_pr_xi1 / xi1
fixed_uzeroth_hm = Azeroth * xi + Bzeroth / xi
fixed_uzeroth = uzeroth_pr + fixed_uzeroth_hm
# free outer boundary
Bzeroth = (xi1**2 * sigmahat0 - xi1 * uzeroth_pr_xi1) / (1 - xi1**2)
Azeroth = (1 - 2 * poisson) * (Bzeroth + sigmahat0)
free_uzeroth_hm = Azeroth * xi + Bzeroth / xi
free_uzeroth = uzeroth_pr + free_uzeroth_hm
ufirst_pr = (1.0 / xi) * (0.5 * (xi**2 - 1) * (2 * Cone - Done) + 0.5 * Done * xi**2 * np.log(xi) + 2 * sigmahat0 * That0 / np.log(xi1)**2 * (xi**3 * np.log(xi) / 3 + (1 - xi**3) / 9 + 0.5 * np.log(xi1) * (1 - xi**2)))
ufirst_pr_xi1 = (1.0 / xi1) * (0.5 * (xi1**2 - 1) * (2 * Cone - Done) + 0.5 * Done * xi1**2 * np.log(xi1) + 2 * sigmahat0 * That0 / np.log(xi1)**2 * (xi1**3 * np.log(xi1) / 3 + (1 - xi1**3) / 9 + 0.5 * np.log(xi1) * (1 - xi1**2)))
# fixed outer boundary
Bfirst = - ufirst_pr_xi1 / xi1 / (1 - 2 * poisson + 1.0 / xi1**2)
Afirst = - Bfirst / xi1**2 - ufirst_pr_xi1 / xi1
fixed_ufirst_hm = Afirst * xi + Bfirst / xi
fixed_ufirst = ufirst_pr + fixed_ufirst_hm
# free outer boundary
Bfirst = xi1 * ufirst_pr_xi1 / (1 - xi1**2)
Afirst = (1 - 2 * poisson) * Bfirst
free_ufirst_hm = Afirst * xi + Bfirst / xi
free_ufirst = ufirst_pr + free_ufirst_hm
fixed_uhat = fixed_uzeroth + Peclet * fixed_ufirst
fixed_u = thermal_expansion * Tref * r0 * fixed_uhat * (1 + poisson) / (1 - poisson) # Corrected Eqn(16)
free_uhat = free_uzeroth + Peclet * free_ufirst
free_u = thermal_expansion * Tref * r0 * free_uhat * (1 + poisson) / (1 - poisson) # Corrected Eqn(16)
return (T, P, fixed_u, free_u)
def moose(fn):
try:
f = open(fn)
data = f.readlines()[1:-1]
data = [map(float, d.strip().split(",")) for d in data]
data = ([d[0] for d in data], [d[4] for d in data])
f.close()
except:
sys.stderr.write("Cannot read " + fn + ", or it contains erroneous data\n")
sys.exit(1)
return data
mooser = [0.1 * i for i in range(1, 11)]
fixedT = moose("gold/fixed_outer_T_0001.csv")
fixedP = moose("gold/fixed_outer_P_0001.csv")
fixedu = moose("gold/fixed_outer_U_0001.csv")
freeu = moose("gold/free_outer_U_0001.csv")
rpoints = np.arange(0.1, 1.0, 0.01)
expected = zip(*[rehbinder(r) for r in rpoints])
plt.figure()
plt.plot(rpoints, expected[0], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(fixedT[0], fixedT[1], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("r (m)")
plt.ylabel("Temperature (K)")
plt.title("Temperature around cavity")
plt.savefig("temperature_fig.pdf")
plt.figure()
plt.plot(rpoints, [1E-6 * p for p in expected[1]], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(fixedP[0], [1E-6 * p for p in fixedP[1]], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("r (m)")
plt.ylabel("Porepressure (MPa)")
plt.title("Porepressure around cavity")
plt.savefig("porepressure_fig.pdf")
plt.figure()
plt.plot(rpoints, [1000 * u for u in expected[2]], 'k-', linewidth = 3.0, label = 'expected (fixed)')
plt.plot(fixedu[0], [1000 * u for u in fixedu[1]], 'rs', markersize = 10.0, label = 'MOOSE (fixed)')
plt.plot(rpoints, [1000 * u for u in expected[3]], 'b-', linewidth = 2.0, label = 'expected (free)')
plt.plot(freeu[0], [1000 * u for u in freeu[1]], 'g*', markersize = 13.0, label = 'MOOSE (free)')
plt.legend(loc = 'center right')
plt.xlabel("r (m)")
plt.ylabel("displacement (mm)")
plt.title("Radial displacement around cavity")
plt.savefig("displacement_fig.pdf")
sys.exit(0)
| lgpl-2.1 |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/numpy_io_test.py | 79 | 9338 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numpy_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import numpy_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class NumpyIoTest(test.TestCase):
def testNumpyInputFn(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
session.run([features, target])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithVeryLargeBatchSizeAndMultipleEpochs(self):
a = np.arange(2) * 1.0
b = np.arange(32, 34)
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 0, 1])
self.assertAllEqual(res[0]['b'], [32, 33, 32, 33])
self.assertAllEqual(res[1], [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithZeroEpochs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=0)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSize(self):
batch_size = 2
a = np.arange(5) * 1.0
b = np.arange(32, 37)
x = {'a': a, 'b': b}
y = np.arange(-32, -27)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 3])
self.assertAllEqual(res[0]['b'], [34, 35])
self.assertAllEqual(res[1], [-30, -29])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [4])
self.assertAllEqual(res[0]['b'], [36])
self.assertAllEqual(res[1], [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeNotDividedByDataSizeAndMultipleEpochs(self):
batch_size = 2
a = np.arange(3) * 1.0
b = np.arange(32, 35)
x = {'a': a, 'b': b}
y = np.arange(-32, -29)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=3)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2, 0])
self.assertAllEqual(res[0]['b'], [34, 32])
self.assertAllEqual(res[1], [-30, -32])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [1, 2])
self.assertAllEqual(res[0]['b'], [33, 34])
self.assertAllEqual(res[1], [-31, -30])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1])
self.assertAllEqual(res[0]['b'], [32, 33])
self.assertAllEqual(res[1], [-32, -31])
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [2])
self.assertAllEqual(res[0]['b'], [34])
self.assertAllEqual(res[1], [-30])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithBatchSizeLargerThanDataSize(self):
batch_size = 10
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
y = np.arange(-32, -28)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=batch_size, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [0, 1, 2, 3])
self.assertAllEqual(res[0]['b'], [32, 33, 34, 35])
self.assertAllEqual(res[1], [-32, -31, -30, -29])
with self.assertRaises(errors.OutOfRangeError):
session.run([features, target])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithDifferentDimensionsOfFeatures(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
x = {'a': a, 'b': b}
y = np.arange(-32, -30)
with self.test_session() as session:
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
res = session.run([features, target])
self.assertAllEqual(res[0]['a'], [[1, 2], [3, 4]])
self.assertAllEqual(res[0]['b'], [5, 6])
self.assertAllEqual(res[1], [-32, -31])
coord.request_stop()
coord.join(threads)
def testNumpyInputFnWithXAsNonDict(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(TypeError, 'x must be dict'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
def testNumpyInputFnWithTargetKeyAlreadyInX(self):
array = np.arange(32, 36)
x = {'__target_key__': array}
y = np.arange(4)
with self.test_session():
input_fn = numpy_io.numpy_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
input_fn()
self.assertAllEqual(x['__target_key__'], array)
self.assertItemsEqual(x.keys(), ['__target_key__'])
def testNumpyInputFnWithMismatchLengthOfInputs(self):
a = np.arange(4) * 1.0
b = np.arange(32, 36)
x = {'a': a, 'b': b}
x_mismatch_length = {'a': np.arange(1), 'b': b}
y_longer_length = np.arange(10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x, y_longer_length, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
with self.assertRaisesRegexp(
ValueError, 'Length of tensors in x and y is mismatched.'):
failing_input_fn = numpy_io.numpy_input_fn(
x=x_mismatch_length,
y=None,
batch_size=2,
shuffle=False,
num_epochs=1)
failing_input_fn()
if __name__ == '__main__':
test.main()
| apache-2.0 |
hayderimran7/tempest | tempest/api/orchestration/stacks/test_soft_conf.py | 17 | 8007 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
class TestSoftwareConfig(base.BaseOrchestrationTest):
def setUp(self):
super(TestSoftwareConfig, self).setUp()
self.configs = []
# Add 2 sets of software configuration
self.configs.append(self._config_create('a'))
self.configs.append(self._config_create('b'))
# Create a deployment using config a's id
self._deployment_create(self.configs[0]['id'])
def _config_create(self, suffix):
configuration = {'group': 'script',
'inputs': [],
'outputs': [],
'options': {}}
configuration['name'] = 'heat_soft_config_%s' % suffix
configuration['config'] = '#!/bin/bash echo init-%s' % suffix
api_config = self.client.create_software_config(**configuration)
configuration['id'] = api_config['software_config']['id']
self.addCleanup(self._config_delete, configuration['id'])
self._validate_config(configuration, api_config)
return configuration
def _validate_config(self, configuration, api_config):
# Assert all expected keys are present with matching data
for k in configuration.keys():
self.assertEqual(configuration[k],
api_config['software_config'][k])
def _deployment_create(self, config_id):
self.server_id = data_utils.rand_name('dummy-server')
self.action = 'ACTION_0'
self.status = 'STATUS_0'
self.input_values = {}
self.output_values = []
self.status_reason = 'REASON_0'
self.signal_transport = 'NO_SIGNAL'
self.deployment = self.client.create_software_deploy(
self.server_id, config_id, self.action, self.status,
self.input_values, self.output_values, self.status_reason,
self.signal_transport)
self.deployment_id = self.deployment['software_deployment']['id']
self.addCleanup(self._deployment_delete, self.deployment_id)
def _deployment_delete(self, deploy_id):
self.client.delete_software_deploy(deploy_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_deployment,
self.deployment_id)
def _config_delete(self, config_id):
self.client.delete_software_config(config_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_config, config_id)
@test.attr(type='smoke')
@test.idempotent_id('136162ed-9445-4b9c-b7fc-306af8b5da99')
def test_get_software_config(self):
"""Testing software config get."""
for conf in self.configs:
api_config = self.client.show_software_config(conf['id'])
self._validate_config(conf, api_config)
@test.attr(type='smoke')
@test.idempotent_id('1275c835-c967-4a2c-8d5d-ad533447ed91')
def test_get_deployment_list(self):
"""Getting a list of all deployments"""
deploy_list = self.client.list_software_deployments()
deploy_ids = [deploy['id'] for deploy in
deploy_list['software_deployments']]
self.assertIn(self.deployment_id, deploy_ids)
@test.attr(type='smoke')
@test.idempotent_id('fe7cd9f9-54b1-429c-a3b7-7df8451db913')
def test_get_deployment_metadata(self):
"""Testing deployment metadata get"""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
conf_ids = [conf['id'] for conf in metadata['metadata']]
self.assertIn(self.configs[0]['id'], conf_ids)
def _validate_deployment(self, action, status, reason, config_id):
deployment = self.client.show_software_deployment(self.deployment_id)
self.assertEqual(action, deployment['software_deployment']['action'])
self.assertEqual(status, deployment['software_deployment']['status'])
self.assertEqual(reason,
deployment['software_deployment']['status_reason'])
self.assertEqual(config_id,
deployment['software_deployment']['config_id'])
@test.attr(type='smoke')
@test.idempotent_id('f29d21f3-ed75-47cf-8cdc-ef1bdeb4c674')
def test_software_deployment_create_validate(self):
"""Testing software deployment was created as expected."""
# Asserting that all fields were created
self.assert_fields_in_dict(
self.deployment['software_deployment'], 'action', 'config_id',
'id', 'input_values', 'output_values', 'server_id', 'status',
'status_reason')
# Testing get for this deployment and verifying parameters
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[0]['id'])
@test.attr(type='smoke')
@test.idempotent_id('2ac43ab3-34f2-415d-be2e-eabb4d14ee32')
def test_software_deployment_update_no_metadata_change(self):
"""Testing software deployment update without metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Updating values without changing the configuration ID
new_action = 'ACTION_1'
new_status = 'STATUS_1'
new_reason = 'REASON_1'
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[0]['id'],
new_action, new_status, self.input_values, self.output_values,
new_reason, self.signal_transport)
# Verifying get and that the deployment was updated as expected
self._validate_deployment(new_action, new_status,
new_reason, self.configs[0]['id'])
# Metadata should not be changed at this point
test_metadata = self.client.show_software_deployment_metadata(
self.server_id)
for key in metadata['metadata'][0]:
self.assertEqual(
metadata['metadata'][0][key],
test_metadata['metadata'][0][key])
@test.attr(type='smoke')
@test.idempotent_id('92c48944-d79d-4595-a840-8e1a581c1a72')
def test_software_deployment_update_with_metadata_change(self):
"""Testing software deployment update with metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[1]['id'],
self.action, self.status, self.input_values,
self.output_values, self.status_reason, self.signal_transport)
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[1]['id'])
# Metadata should now be changed
new_metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Its enough to test the ID in this case
meta_id = metadata['metadata'][0]['id']
test_id = new_metadata['metadata'][0]['id']
self.assertNotEqual(meta_id, test_id)
| apache-2.0 |
andrejb/cloudant_bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Platform/irix.py | 61 | 1658 | """SCons.Platform.irix
Platform-specific initialization for SGI IRIX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/irix.py 5134 2010/08/16 23:02:40 bdeegan"
import posix
def generate(env):
posix.generate(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
zploskey/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py | 658 | 4406 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi00
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: obsolete argument. ignored.
strict: obsolete argument. ignored.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug(
'Client\'s opening handshake headers: %r', dict(request.headers_in))
handshakers = []
handshakers.append(
('RFC 6455', hybi.Handshaker(request, dispatcher)))
handshakers.append(
('HyBi 00', hybi00.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException, e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException, e:
raise
except VersionException, e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
| mpl-2.0 |
fbradyirl/home-assistant | homeassistant/components/nfandroidtv/notify.py | 1 | 9752 | """Notifications for Android TV notification service."""
import base64
import io
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.const import CONF_TIMEOUT, CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_FONTSIZE = "fontsize"
CONF_POSITION = "position"
CONF_TRANSPARENCY = "transparency"
CONF_COLOR = "color"
CONF_INTERRUPT = "interrupt"
DEFAULT_DURATION = 5
DEFAULT_FONTSIZE = "medium"
DEFAULT_POSITION = "bottom-right"
DEFAULT_TRANSPARENCY = "default"
DEFAULT_COLOR = "grey"
DEFAULT_INTERRUPT = False
DEFAULT_TIMEOUT = 5
DEFAULT_ICON = (
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGP6zwAAAgcBApo"
"cMXEAAAAASUVORK5CYII="
)
ATTR_DURATION = "duration"
ATTR_FONTSIZE = "fontsize"
ATTR_POSITION = "position"
ATTR_TRANSPARENCY = "transparency"
ATTR_COLOR = "color"
ATTR_BKGCOLOR = "bkgcolor"
ATTR_INTERRUPT = "interrupt"
ATTR_IMAGE = "filename2"
ATTR_FILE = "file"
# Attributes contained in file
ATTR_FILE_URL = "url"
ATTR_FILE_PATH = "path"
ATTR_FILE_USERNAME = "username"
ATTR_FILE_PASSWORD = "password"
ATTR_FILE_AUTH = "auth"
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_FILE_AUTH_DIGEST = "digest"
FONTSIZES = {"small": 1, "medium": 0, "large": 2, "max": 3}
POSITIONS = {
"bottom-right": 0,
"bottom-left": 1,
"top-right": 2,
"top-left": 3,
"center": 4,
}
TRANSPARENCIES = {"default": 0, "0%": 1, "25%": 2, "50%": 3, "75%": 4, "100%": 5}
COLORS = {
"grey": "#607d8b",
"black": "#000000",
"indigo": "#303F9F",
"green": "#4CAF50",
"red": "#F44336",
"cyan": "#00BCD4",
"teal": "#009688",
"amber": "#FFC107",
"pink": "#E91E63",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Coerce(int),
vol.Optional(CONF_FONTSIZE, default=DEFAULT_FONTSIZE): vol.In(FONTSIZES.keys()),
vol.Optional(CONF_POSITION, default=DEFAULT_POSITION): vol.In(POSITIONS.keys()),
vol.Optional(CONF_TRANSPARENCY, default=DEFAULT_TRANSPARENCY): vol.In(
TRANSPARENCIES.keys()
),
vol.Optional(CONF_COLOR, default=DEFAULT_COLOR): vol.In(COLORS.keys()),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
vol.Optional(CONF_INTERRUPT, default=DEFAULT_INTERRUPT): cv.boolean,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Notifications for Android TV notification service."""
remoteip = config.get(CONF_HOST)
duration = config.get(CONF_DURATION)
fontsize = config.get(CONF_FONTSIZE)
position = config.get(CONF_POSITION)
transparency = config.get(CONF_TRANSPARENCY)
color = config.get(CONF_COLOR)
interrupt = config.get(CONF_INTERRUPT)
timeout = config.get(CONF_TIMEOUT)
return NFAndroidTVNotificationService(
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
hass.config.is_allowed_path,
)
class NFAndroidTVNotificationService(BaseNotificationService):
"""Notification service for Notifications for Android TV."""
def __init__(
self,
remoteip,
duration,
fontsize,
position,
transparency,
color,
interrupt,
timeout,
is_allowed_path,
):
"""Initialize the service."""
self._target = "http://{}:7676".format(remoteip)
self._default_duration = duration
self._default_fontsize = fontsize
self._default_position = position
self._default_transparency = transparency
self._default_color = color
self._default_interrupt = interrupt
self._timeout = timeout
self._icon_file = io.BytesIO(base64.b64decode(DEFAULT_ICON))
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to a Android TV device."""
_LOGGER.debug("Sending notification to: %s", self._target)
payload = dict(
filename=(
"icon.png",
self._icon_file,
"application/octet-stream",
{"Expires": "0"},
),
type="0",
title=kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
msg=message,
duration="%i" % self._default_duration,
fontsize="%i" % FONTSIZES.get(self._default_fontsize),
position="%i" % POSITIONS.get(self._default_position),
bkgcolor="%s" % COLORS.get(self._default_color),
transparency="%i" % TRANSPARENCIES.get(self._default_transparency),
offset="0",
app=ATTR_TITLE_DEFAULT,
force="true",
interrupt="%i" % self._default_interrupt,
)
data = kwargs.get(ATTR_DATA)
if data:
if ATTR_DURATION in data:
duration = data.get(ATTR_DURATION)
try:
payload[ATTR_DURATION] = "%i" % int(duration)
except ValueError:
_LOGGER.warning("Invalid duration-value: %s", str(duration))
if ATTR_FONTSIZE in data:
fontsize = data.get(ATTR_FONTSIZE)
if fontsize in FONTSIZES:
payload[ATTR_FONTSIZE] = "%i" % FONTSIZES.get(fontsize)
else:
_LOGGER.warning("Invalid fontsize-value: %s", str(fontsize))
if ATTR_POSITION in data:
position = data.get(ATTR_POSITION)
if position in POSITIONS:
payload[ATTR_POSITION] = "%i" % POSITIONS.get(position)
else:
_LOGGER.warning("Invalid position-value: %s", str(position))
if ATTR_TRANSPARENCY in data:
transparency = data.get(ATTR_TRANSPARENCY)
if transparency in TRANSPARENCIES:
payload[ATTR_TRANSPARENCY] = "%i" % TRANSPARENCIES.get(transparency)
else:
_LOGGER.warning("Invalid transparency-value: %s", str(transparency))
if ATTR_COLOR in data:
color = data.get(ATTR_COLOR)
if color in COLORS:
payload[ATTR_BKGCOLOR] = "%s" % COLORS.get(color)
else:
_LOGGER.warning("Invalid color-value: %s", str(color))
if ATTR_INTERRUPT in data:
interrupt = data.get(ATTR_INTERRUPT)
try:
payload[ATTR_INTERRUPT] = "%i" % cv.boolean(interrupt)
except vol.Invalid:
_LOGGER.warning("Invalid interrupt-value: %s", str(interrupt))
filedata = data.get(ATTR_FILE) if data else None
if filedata is not None:
# Load from file or URL
file_as_bytes = self.load_file(
url=filedata.get(ATTR_FILE_URL),
local_path=filedata.get(ATTR_FILE_PATH),
username=filedata.get(ATTR_FILE_USERNAME),
password=filedata.get(ATTR_FILE_PASSWORD),
auth=filedata.get(ATTR_FILE_AUTH),
)
if file_as_bytes:
payload[ATTR_IMAGE] = (
"image",
file_as_bytes,
"application/octet-stream",
{"Expires": "0"},
)
try:
_LOGGER.debug("Payload: %s", str(payload))
response = requests.post(self._target, files=payload, timeout=self._timeout)
if response.status_code != 200:
_LOGGER.error("Error sending message: %s", str(response))
except requests.exceptions.ConnectionError as err:
_LOGGER.error("Error communicating with %s: %s", self._target, str(err))
def load_file(
self, url=None, local_path=None, username=None, password=None, auth=None
):
"""Load image/document/etc from a local path or URL."""
try:
if url is not None:
# Check whether authentication parameters are provided
if username is not None and password is not None:
# Use digest or basic authentication
if ATTR_FILE_AUTH_DIGEST == auth:
auth_ = HTTPDigestAuth(username, password)
else:
auth_ = HTTPBasicAuth(username, password)
# Load file from URL with authentication
req = requests.get(url, auth=auth_, timeout=DEFAULT_TIMEOUT)
else:
# Load file from URL without authentication
req = requests.get(url, timeout=DEFAULT_TIMEOUT)
return req.content
if local_path is not None:
# Check whether path is whitelisted in configuration.yaml
if self.is_allowed_path(local_path):
return open(local_path, "rb")
_LOGGER.warning("'%s' is not secure to load data from!", local_path)
else:
_LOGGER.warning("Neither URL nor local path found in params!")
except OSError as error:
_LOGGER.error("Can't load from url or local path: %s", error)
return None
| apache-2.0 |
toenuff/treadmill | lib/python/treadmill/api/allocation.py | 1 | 8317 | """Implementation of allocation API."""
from __future__ import absolute_import
from .. import admin
from .. import authz
from .. import context
from .. import schema
def _set_auth_resource(cls, resource):
"""Set auth resource name for CRUD methods of the class."""
for method_name in ['get', 'create', 'update', 'delete']:
method = getattr(cls, method_name, None)
if method:
method.auth_resource = resource
def _reservation_list(allocs, cell_allocs):
"""Combine allocations and reservations into single list."""
alloc2env = {alloc['_id']: alloc['environment'] for alloc in allocs}
name2alloc = dict()
for alloc in cell_allocs:
name = '/'.join(alloc['_id'].split('/')[:2])
if name not in name2alloc:
name2alloc[name] = {'_id': name,
'environment': alloc2env[name],
'reservations': []}
name2alloc[name]['reservations'].append(alloc)
return name2alloc.values()
class API(object):
"""Treadmill Allocation REST api."""
def __init__(self):
def _admin_alloc():
"""Lazily return admin allocation object."""
return admin.Allocation(context.GLOBAL.ldap.conn)
def _admin_cell_alloc():
"""Lazily return admin cell allocation object."""
return admin.CellAllocation(context.GLOBAL.ldap.conn)
def _admin_tnt():
"""Lazily return admin tenant object."""
return admin.Tenant(context.GLOBAL.ldap.conn)
def _list(tenant_id=None):
"""List allocations."""
if tenant_id is None:
admin_alloc = _admin_alloc()
admin_cell_alloc = _admin_cell_alloc()
return _reservation_list(admin_alloc.list({}),
admin_cell_alloc.list({}))
else:
admin_tnt = _admin_tnt()
return _reservation_list(admin_tnt.allocations(tenant_id),
admin_tnt.reservations(tenant_id))
@schema.schema({'$ref': 'allocation.json#/resource_id'})
def get(rsrc_id):
"""Get allocation configuration."""
return _admin_alloc().get(rsrc_id)
@schema.schema({'$ref': 'allocation.json#/resource_id'},
{'allOf': [{'$ref': 'allocation.json#/resource'},
{'$ref': 'allocation.json#/verbs/create'}]})
def create(rsrc_id, rsrc):
"""Create allocation."""
_admin_alloc().create(rsrc_id, rsrc)
return _admin_alloc().get(rsrc_id)
@schema.schema({'$ref': 'allocation.json#/resource_id'},
{'allOf': [{'$ref': 'allocation.json#/resource'},
{'$ref': 'allocation.json#/verbs/update'}]})
def update(rsrc_id, rsrc):
"""Update allocation."""
_admin_alloc().update(rsrc_id, rsrc)
return _admin_alloc().get(rsrc_id)
@schema.schema({'$ref': 'allocation.json#/resource_id'})
def delete(rsrc_id):
"""Delete allocation."""
_admin_alloc().delete(rsrc_id)
return None
class _ReservationAPI(object):
"""Reservation API."""
def __init__(self):
@schema.schema({'$ref': 'reservation.json#/resource_id'})
def get(rsrc_id):
"""Get reservation configuration."""
allocation, cell = rsrc_id.rsplit('/', 1)
return _admin_cell_alloc().get([cell, allocation])
@schema.schema(
{'$ref': 'reservation.json#/resource_id'},
{'allOf': [{'$ref': 'reservation.json#/resource'},
{'$ref': 'reservation.json#/verbs/create'}]}
)
def create(rsrc_id, rsrc):
"""Create reservation."""
allocation, cell = rsrc_id.rsplit('/', 1)
_admin_cell_alloc().create([cell, allocation], rsrc)
return _admin_cell_alloc().get([cell, allocation])
@schema.schema(
{'$ref': 'reservation.json#/resource_id'},
{'allOf': [{'$ref': 'reservation.json#/resource'},
{'$ref': 'reservation.json#/verbs/create'}]}
)
def update(rsrc_id, rsrc):
"""Create reservation."""
allocation, cell = rsrc_id.rsplit('/', 1)
_admin_cell_alloc().update([cell, allocation], rsrc)
return _admin_cell_alloc().get([cell, allocation])
self.get = get
self.create = create
self.update = update
# Must be called last when all methods are set.
_set_auth_resource(self, 'reservation')
class _AssignmentAPI(object):
"""Assignment API."""
def __init__(self):
@schema.schema({'$ref': 'assignment.json#/resource_id'})
def get(rsrc_id):
"""Get assignment configuration."""
allocation, cell, _pattern = rsrc_id.rsplit('/', 2)
return _admin_cell_alloc().get(
[cell, allocation]).get('assignments', [])
@schema.schema(
{'$ref': 'assignment.json#/resource_id'},
{'allOf': [{'$ref': 'assignment.json#/resource'},
{'$ref': 'assignment.json#/verbs/create'}]}
)
def create(rsrc_id, rsrc):
"""Create assignment."""
allocation, cell, pattern = rsrc_id.rsplit('/', 2)
priority = rsrc.get('priority', 0)
_admin_cell_alloc().create(
[cell, allocation],
{'assignments': [{'pattern': pattern,
'priority': priority}]}
)
return _admin_cell_alloc().get(
[cell, allocation]).get('assignments', [])
@schema.schema(
{'$ref': 'assignment.json#/resource_id'},
{'allOf': [{'$ref': 'assignment.json#/resource'},
{'$ref': 'assignment.json#/verbs/create'}]}
)
def update(rsrc_id, rsrc):
"""Update assignment."""
allocation, cell, pattern = rsrc_id.rsplit('/', 2)
priority = rsrc.get('priority', 0)
_admin_cell_alloc().update(
[cell, allocation],
{'assignments': [{'pattern': pattern,
'priority': priority}]}
)
return _admin_cell_alloc().get(
[cell, allocation]).get('assignments', [])
@schema.schema({'$ref': 'assignment.json#/resource_id'})
def delete(rsrc_id):
"""Delete assignment."""
allocation, cell, pattern = rsrc_id.rsplit('/', 2)
_admin_cell_alloc().update(
[cell, allocation],
{'assignments': [{'pattern': pattern,
'priority': 0,
'_delete': True}]}
)
return None
self.get = get
self.create = create
self.update = update
self.delete = delete
# Must be called last when all methods are set.
_set_auth_resource(self, 'assignment')
self.list = _list
self.get = get
self.create = create
self.update = update
self.delete = delete
self.reservation = _ReservationAPI()
self.assignment = _AssignmentAPI()
def init(authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return authz.wrap(api, authorizer)
| apache-2.0 |
MartinThoma/algorithms | perceptron/perceptron.py | 1 | 4057 | #!/usr/bin/env python
"""Example implementation for a perceptron."""
import logging
import math
import sys
import numpy as np
from sklearn.metrics import accuracy_score
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
class Activation:
"""Containing various activation functions."""
@staticmethod
def sign(netOutput, threshold=0):
return netOutput < threshold
@staticmethod
def sigmoid(netOutput):
return 1 / (1 + math.e**(-1.0 * netOutput))
@staticmethod
def tanh(netOutput):
pass
@staticmethod
def rectified(netOutput):
pass
@staticmethod
def softmax(netOutput):
pass
class Perceptron:
"""
A perceptron classifier.
Parameters
----------
train : list
valid : list
test : list
learningRate : float
epochs : positive int
Attributes
----------
learningRate : float
epochs : int
trainingSet : list
validationSet : list
testSet : list
weight : list
"""
def __init__(self, train, valid, test, learningRate=0.01, epochs=10):
self.learningRate = learningRate
self.epochs = epochs
self.trainingSet = train
self.validationSet = valid
self.testSet = test
# Initialize the weight vector with small random values
# around 0 and 0.1
self.weight = np.random.rand(self.trainingSet['x'].shape[1], 1) / 1000
self.weight = self.weight.astype(np.float32)
def train(self, verbose=True):
"""
Train the perceptron with the perceptron learning algorithm.
Parameters
----------
verbose : bool
Print logging messages with validation accuracy if verbose is True.
"""
for i in range(1, self.epochs + 1):
pred = self.evaluate(self.validationSet['x'])
if verbose:
val_acc = accuracy_score(self.validationSet['y'], pred) * 100
logging.info("Epoch: %i (Validation acc: %0.4f%%)", i, val_acc)
for X, y in zip(self.trainingSet['x'], self.trainingSet['y']):
pred = self.classify(X)
X = np.array([X]).reshape(784, 1)
self.weight += self.learningRate * (y - pred) * X * (-1)
def classify(self, testInstance):
"""
Classify a single instance.
Parameters
----------
testInstance : list of floats
Returns
-------
bool :
True if the testInstance is recognized as a 7, False otherwise.
"""
return self.fire(testInstance)
def evaluate(self, data=None):
if data is None:
data = self.testSet['x']
return list(map(self.classify, data))
def fire(self, input_):
return Activation.sign(np.dot(np.array(input_, dtype=np.float32),
self.weight))
def main():
"""Run an example."""
# Get data
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='.')
x = mnist.data
y = mnist.target
y = np.array([3 == el for el in y], dtype=np.float32)
x = x / 255.0 * 2 - 1 # Scale data to [-1, 1]
x = x.astype(np.float32)
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.10,
random_state=42)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train,
test_size=0.10,
random_state=1337)
p = Perceptron({'x': x_train, 'y': y_train},
{'x': x_valid, 'y': y_valid},
{'x': x_test, 'y': y_test})
p.train(verbose=True)
if __name__ == '__main__':
main()
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/__init__.py | 2 | 2470 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_gateways_operations import ApplicationGatewaysOperations
from .express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .express_route_circuits_operations import ExpressRouteCircuitsOperations
from .express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .load_balancers_operations import LoadBalancersOperations
from .network_interfaces_operations import NetworkInterfacesOperations
from .network_security_groups_operations import NetworkSecurityGroupsOperations
from .security_rules_operations import SecurityRulesOperations
from .public_ip_addresses_operations import PublicIPAddressesOperations
from .route_tables_operations import RouteTablesOperations
from .routes_operations import RoutesOperations
from .usages_operations import UsagesOperations
from .virtual_networks_operations import VirtualNetworksOperations
from .subnets_operations import SubnetsOperations
from .virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .local_network_gateways_operations import LocalNetworkGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'NetworkInterfacesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'PublicIPAddressesOperations',
'RouteTablesOperations',
'RoutesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
]
| mit |
MattArnold/tuxtrax | penguicontrax/api/users.py | 2 | 4400 | #flask libs
from flask import Flask, request, g
from flask.ext.restful import Resource, Api, reqparse
from sqlalchemy import or_
#global libs
from sys import exit
import sys
import os
## Import Local Libs
from penguicontrax import dump_table
from functions import return_null_if_not_logged_in
from .. import db
##User = user.User
from penguicontrax.user import User
class UsersAPI(Resource):
@return_null_if_not_logged_in
def get(self):
""" Returns a list of objects to represent users in the database
Pass a ?q=query to conduct a search by name and email
"""
parser = reqparse.RequestParser()
parser.add_argument('q', type=str)
args = parser.parse_args()
output = User.query
if args['q']:
search_string = '%' + args['q'] + '%'
output = output.filter(
or_(
User.name.like(search_string),
User.email.like(search_string),
User.account_name.like(search_string)
)
)
output = dump_table(output, User.__table__)
# fields to show in search results
fields = ['id', 'name', 'email']
return [
dict([(name, element[name]) for name in fields])
for element in output
]
class UserAPI(Resource):
def get(self, id):
""" Returns information about the given user by id
Reveals different information depending on the logged-in user
Whether the logged-in user is the same as the given user
Whether the session has a user
"""
found = User.query.get(id)
if found:
# fields revealed based on login status
self_fields = ['id', 'name', 'staff', 'email', 'points',
'image_large', 'image_small',
'public_rsvps', 'rsvped_to', 'superuser',
'creation_ip']
loggedin_fields = ['id', 'name', 'points',
'image_large', 'image_small',
'public_rsvps', 'rsvped_to']
anon_fields = ['id', 'name', 'image_large', 'image_small']
if g.user is None:
fields = anon_fields
elif g.user.id == id:
fields = self_fields
else:
fields = loggedin_fields
if not found.public_rsvps:
fields.remove('rsvped_to')
return dict([(name, getattr(found, name)) for name in fields])
def put(self, id):
""" Updates specific information about the given user
Currently requires that the given user is currently logged-in
Also only allows specific fields to be changed
The updated information should be sent as form-encoded data
"""
found = User.query.get(id)
if g.user:
if found:
if g.user.id == id:
fields = ['name', 'email'] # fields allowed to change
parser = reqparse.RequestParser()
for field in fields:
parser.add_argument(field, type=str)
args = parser.parse_args()
for field in fields:
if args[field]:
setattr(found, field, args[field])
if any(args):
db.session.commit()
return "Success"
else:
return "Unauthorized", 403
else:
return "Invalid ID", 404
else:
return "Unauthenticated", 401
class UserSubmissionsAPI(Resource):
def get(self, id):
found = User.query.get(id)
if found:
fields = ['id', 'title', 'description']
return [
dict([(name, getattr(submission, name)) for name in fields])
for submission in found.submissions
]
class UserPresentationsAPI(Resource):
def get(self, id):
found = User.query.get(id)
if found:
fields = ['id', 'title', 'description']
return [
dict([(name, getattr(presentation, name)) for name in fields])
for presentation in found.presentations
]
| gpl-3.0 |
kboard/kboard | kboard/functional_test/test_page_view_count.py | 4 | 1598 | from .base import FunctionalTest, login_test_user_with_browser
class CountPageViewTest(FunctionalTest):
@login_test_user_with_browser
def test_count_view_of_post(self):
self.move_to_default_board()
# 혜선이는 'grape'에 대한 게시글을 작성한다
self.add_post('grape', 'purple\nsweet')
# 글 목록을 보니 조회 수가 0으로 표시되어 있다.
rows = self.browser.find_elements_by_css_selector('#id_post_list_table tbody td.page-view-count')
self.assertEqual(rows[0].text, '0')
# 'grape' 게시글을 눌러서 뷰 수를 확인한다.
table = self.browser.find_element_by_id('id_post_list_table')
rows = table.find_elements_by_css_selector('tbody > tr > td > a')
rows[0].click()
view_count = self.browser.find_element_by_id('id_page_view_count')
self.assertIn(view_count.text, '조회수: 1')
# 게시글의 뷰 수를 늘리기 위해서 페이지 새로고침을 3번 한다.
self.browser.refresh()
self.browser.refresh()
self.browser.refresh()
view_count = self.browser.find_element_by_id('id_page_view_count')
self.assertIn(view_count.text, '조회수: 4')
# 다시 목록으로 돌아간다.
back_button = self.browser.find_element_by_id('id_back_to_post_list_button')
back_button.click()
# 조회수가 4인 것을 확인한다.
rows = self.browser.find_elements_by_css_selector('#id_post_list_table tbody td.page-view-count')
self.assertEqual(rows[0].text, '4')
| mit |
nomedeusuariodesconhecido/info3180-lab4 | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/_base.py | 915 | 13711 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| mit |
xxd3vin/spp-sdk | opt/Python27/Lib/sqlite3/dbapi2.py | 161 | 2615 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
| mit |
edx/edx-platform | openedx/core/djangoapps/schedules/tests/test_content_highlights.py | 3 | 8195 | # lint-amnesty, pylint: disable=missing-module-docstring
import datetime
from unittest.mock import patch
import pytest
from openedx.core.djangoapps.schedules.content_highlights import (
course_has_highlights_from_store,
get_all_course_highlights,
get_next_section_highlights,
get_week_highlights
)
from openedx.core.djangoapps.schedules.exceptions import CourseUpdateDoesNotExist
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@skip_unless_lms
class TestContentHighlights(ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
super().setUp()
self._setup_course()
self._setup_user()
def _setup_course(self):
self.course = CourseFactory.create(
highlights_enabled_for_messaging=True
)
self.course_key = self.course.id
def _setup_user(self):
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course_key)
def _create_chapter(self, **kwargs):
ItemFactory.create(
parent=self.course,
category='chapter',
**kwargs
)
def test_non_existent_course_raises_exception(self):
nonexistent_course_key = self.course_key.replace(run='no_such_run')
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, nonexistent_course_key, week_num=1)
def test_empty_course_raises_exception(self):
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
def test_happy_path(self):
highlights = ['highlights']
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=highlights)
assert course_has_highlights_from_store(self.course_key)
assert get_week_highlights(self.user, self.course_key, week_num=1) == highlights
def test_get_all_course_highlights(self):
all_highlights = [["week1highlight1", "week1highlight2"], ["week1highlight1", "week1highlight2"], [], []]
with self.store.bulk_operations(self.course_key):
for week_highlights in all_highlights:
self._create_chapter(highlights=week_highlights)
assert get_all_course_highlights(self.course_key) == all_highlights
def test_highlights_disabled_for_messaging(self):
highlights = ['A test highlight.']
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=highlights)
self.course.highlights_enabled_for_messaging = False
self.store.update_item(self.course, self.user.id)
assert not course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(
self.user,
self.course_key,
week_num=1,
)
def test_course_with_no_highlights(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(display_name="Week 1")
self._create_chapter(display_name="Week 2")
self.course = self.store.get_course(self.course_key) # lint-amnesty, pylint: disable=attribute-defined-outside-init
assert len(self.course.get_children()) == 2
assert not course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
def test_course_with_highlights(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=['a', 'b', 'á'])
self._create_chapter(highlights=[])
self._create_chapter(highlights=['skipped a week'])
assert course_has_highlights_from_store(self.course_key)
assert get_week_highlights(self.user, self.course_key, week_num=1) == ['a', 'b', 'á']
assert get_week_highlights(self.user, self.course_key, week_num=2) == ['skipped a week']
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=3)
def test_staff_only(self):
with self.store.bulk_operations(self.course_key):
self._create_chapter(
highlights=["I'm a secret!"],
visible_to_staff_only=True,
)
assert course_has_highlights_from_store(self.course_key)
with pytest.raises(CourseUpdateDoesNotExist):
get_week_highlights(self.user, self.course_key, week_num=1)
@patch('openedx.core.djangoapps.course_date_signals.utils.get_expected_duration')
def test_get_next_section_highlights(self, mock_duration):
# All of the dates chosen here are to make things easy and clean to calculate with date offsets
# It only goes up to 6 days because we are using two_days_ago as our reference point
# so 6 + 2 = 8 days for the duration of the course
mock_duration.return_value = datetime.timedelta(days=8)
today = datetime.datetime.utcnow()
two_days_ago = today - datetime.timedelta(days=2)
two_days = today + datetime.timedelta(days=2)
three_days = today + datetime.timedelta(days=3)
four_days = today + datetime.timedelta(days=4)
six_days = today + datetime.timedelta(days=6)
with self.store.bulk_operations(self.course_key):
self._create_chapter( # Week 1
highlights=['a', 'b', 'á'],
)
self._create_chapter( # Week 2
highlights=['skipped a week'],
)
self._create_chapter( # Week 3
highlights=[]
)
self._create_chapter( # Week 4
highlights=['final week!']
)
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, today.date()) ==\
(['skipped a week'], 2)
exception_message = 'Next section [{}] has no highlights for {}'.format( # pylint: disable=unused-variable
'chapter 3', self.course_key
)
with pytest.raises(CourseUpdateDoesNotExist):
get_next_section_highlights(self.user, self.course_key, two_days_ago, two_days.date())
# Returns None, None if the target date does not match any due dates. This is caused by
# making the mock_duration 8 days and there being only 4 chapters so any odd day will
# fail to match.
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, three_days.date()) == (None, None)
assert get_next_section_highlights(self.user, self.course_key, two_days_ago, four_days.date()) ==\
(['final week!'], 4)
exception_message = f'Last section was reached. There are no more highlights for {self.course_key}'
with pytest.raises(CourseUpdateDoesNotExist):
get_next_section_highlights(self.user, self.course_key, two_days_ago, six_days.date())
@patch('lms.djangoapps.courseware.module_render.get_module_for_descriptor')
def test_get_highlights_without_module(self, mock_get_module):
mock_get_module.return_value = None
with self.store.bulk_operations(self.course_key):
self._create_chapter(highlights=['Test highlight'])
with self.assertRaisesRegex(CourseUpdateDoesNotExist, 'Course module .* not found'):
get_week_highlights(self.user, self.course_key, 1)
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
today = datetime.datetime.utcnow()
with self.assertRaisesRegex(CourseUpdateDoesNotExist, 'Course module .* not found'):
get_next_section_highlights(self.user, self.course_key, yesterday, today.date())
| agpl-3.0 |
amith01994/intellij-community | python/lib/Lib/site-packages/django/db/backends/postgresql/operations.py | 229 | 9420 | import re
from django.db.backends import BaseDatabaseOperations
# This DatabaseOperations class lives in here instead of base.py because it's
# used by both the 'postgresql' and 'postgresql_psycopg2' backends.
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self._postgres_version = None
self.connection = connection
def _get_postgres_version(self):
if self._postgres_version is None:
from django.db.backends.postgresql.version import get_version
cursor = self.connection.cursor()
self._postgres_version = get_version(cursor)
return self._postgres_version
postgres_version = property(_get_postgres_version)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append(u'%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append(u'%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append(u'%s microseconds' % timedelta.microseconds)
mods = u' '.join(modifiers)
conn = u' %s ' % connector
return u'(%s)' % conn.join([sql, u'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def sql_flush(self, style, tables, sequences):
if tables:
if self.postgres_version[0:2] >= (8,1):
# Postgres 8.1+ can do 'TRUNCATE x, y, z...;'. In fact, it *has to*
# in order to be able to truncate tables referenced by a foreign
# key in any other table. The result is a single SQL TRUNCATE
# statement.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
else:
# Older versions of Postgres can't do TRUNCATE in a single call, so
# they must use a simple delete.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""Check that the backend fully supports the provided aggregate.
The population and sample statistics (STDDEV_POP, STDDEV_SAMP,
VAR_POP, VAR_SAMP) were first implemented in Postgres 8.2.
The implementation of population statistics (STDDEV_POP and VAR_POP)
under Postgres 8.2 - 8.2.4 is known to be faulty. Raise
NotImplementedError if this is the database in use.
"""
if aggregate.sql_function in ('STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP'):
if self.postgres_version[0:2] < (8,2):
raise NotImplementedError('PostgreSQL does not support %s prior to version 8.2. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
if self.postgres_version[0:2] == (8,2):
if self.postgres_version[2] is None or self.postgres_version[2] <= 4:
raise NotImplementedError('PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
| apache-2.0 |
ovnicraft/openerp-restaurant | product/__openerp__.py | 4 | 2604 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Products & Pricelists',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Sales Management',
'depends': ['base', 'process', 'decimal_precision', 'mail'],
'demo': [
'product_demo.xml',
'product_image_demo.xml',
],
'description': """
This is the base module for managing products and pricelists in OpenERP.
========================================================================
Products support variants, different pricing methods, suppliers information,
make to stock/order, different unit of measures, packaging and properties.
Pricelists support:
-------------------
* Multiple-level of discount (by product, category, quantities)
* Compute price based on different criteria:
* Other pricelist
* Cost price
* List price
* Supplier price
Pricelists preferences by product and/or partners.
Print product labels with barcode.
""",
'data': [
'security/product_security.xml',
'security/ir.model.access.csv',
'wizard/product_price_view.xml',
'product_data.xml',
'product_report.xml',
'product_view.xml',
'pricelist_view.xml',
'partner_view.xml',
'process/product_process.xml'
],
'test': [
'product_pricelist_demo.yml',
'test/product_pricelist.yml',
],
'installable': True,
'auto_install': False,
'images': ['images/product_uom.jpeg','images/product_pricelists.jpeg','images/products_categories.jpeg', 'images/products_form.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rosjat/python-scsi | tests/test_cdb_inquiry.py | 1 | 2022 | # coding: utf-8
# Copyright (C) 2014 by Ronnie Sahlberg <ronniesahlberg@gmail.com>
# Copyright (C) 2015 by Markus Rosjat <markus.rosjat@gmail.com>
# SPDX-FileCopyrightText: 2014 The python-scsi Authors
#
# SPDX-License-Identifier: LGPL-2.1-or-later
import unittest
from pyscsi.pyscsi.scsi_cdb_inquiry import Inquiry
from pyscsi.pyscsi.scsi_enum_command import spc
from pyscsi.utils.converter import scsi_ba_to_int
from .mock_device import MockDevice, MockSCSI
class CdbInquiryTest(unittest.TestCase):
def test_main(self):
with MockSCSI(MockDevice(spc)) as s:
# cdb for standard page request
i = s.inquiry(alloclen=128)
cdb = i.cdb
self.assertEqual(cdb[0], s.device.opcodes.INQUIRY.value)
self.assertEqual(cdb[1:3], bytearray(2))
self.assertEqual(scsi_ba_to_int(cdb[3:5]), 128)
self.assertEqual(cdb[5], 0)
cdb = i.unmarshall_cdb(cdb)
self.assertEqual(cdb['opcode'], s.device.opcodes.INQUIRY.value)
self.assertEqual(cdb['evpd'], 0)
self.assertEqual(cdb['page_code'], 0)
self.assertEqual(cdb['alloc_len'], 128)
d = Inquiry.unmarshall_cdb(Inquiry.marshall_cdb(cdb))
self.assertEqual(d, cdb)
# supported vpd pages
i = s.inquiry(evpd=1, page_code=0x88, alloclen=300)
cdb = i.cdb
self.assertEqual(cdb[0], s.device.opcodes.INQUIRY.value)
self.assertEqual(cdb[1], 0x01)
self.assertEqual(cdb[2], 0x88)
self.assertEqual(scsi_ba_to_int(cdb[3:5]), 300)
self.assertEqual(cdb[5], 0)
cdb = i.unmarshall_cdb(cdb)
self.assertEqual(cdb['opcode'], s.device.opcodes.INQUIRY.value)
self.assertEqual(cdb['evpd'], 1)
self.assertEqual(cdb['page_code'], 0x88)
self.assertEqual(cdb['alloc_len'], 300)
d = Inquiry.unmarshall_cdb(Inquiry.marshall_cdb(cdb))
self.assertEqual(d, cdb)
| lgpl-2.1 |
jjscarafia/odoo | addons/board/__openerp__.py | 114 | 1763 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dashboards',
'version': '1.0',
'category': 'Hidden',
'description': """
Lets the user create a custom dashboard.
========================================
Allows users to create custom dashboard.
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'security/ir.model.access.csv',
'board_view.xml',
'board_mydashboard_view.xml',
'views/board.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_dashboard_definition.jpeg','images/2_publish_note.jpeg','images/3_admin_dashboard.jpeg',],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
astrofrog/numpy | numpy/core/tests/test_umath.py | 8 | 45076 | import sys
import platform
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
err = np.seterr(invalid="ignore", divide="ignore")
try:
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
finally:
np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
try:
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
finally:
np.seterr(**err)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cinf = np.array([complex(np.inf, 0)])
cnan = np.array([complex(np.nan, np.nan)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x=np.array([1,2,3], np.int16)
assert (x**2.00001).dtype is (x**2.0).dtype
class TestLog2(TestCase):
def test_log2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
class TestExp2(TestCase):
def test_exp2_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec)
def test_logaddexp2_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self) :
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f','d','g'] :
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self) :
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec in zip(['f','d','g'],[6, 15, 15]) :
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec)
def test_logaddexp_range(self) :
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
try:
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
finally:
np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
def assert_hypot_isinf(x, y):
err = np.seterr(invalid='ignore')
try:
assert_(np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
finally:
np.seterr(**err)
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
@dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
"python.org < 2.6 binaries have broken ldexp in the "
"C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
err = np.seterr(over="ignore")
try:
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
finally:
np.seterr(**err)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
assert_equal(np.maximum.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1,2j]),2j)
assert_equal(np.minimum.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1,2j]),1)
assert_equal(np.fmax.reduce([1+3j,2j]),1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1,2j]),2j)
assert_equal(np.fmin.reduce([1+3j,2j]),2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)] :
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
olderr = np.seterr(invalid='ignore')
try:
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
finally:
np.seterr(**olderr)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x,x)) is np.ndarray)
self.assertTrue(type(f(x,a)) is A)
self.assertTrue(type(f(x,b)) is B)
self.assertTrue(type(f(x,c)) is C)
self.assertTrue(type(f(a,x)) is A)
self.assertTrue(type(f(b,x)) is B)
self.assertTrue(type(f(c,x)) is C)
self.assertTrue(type(f(a,a)) is A)
self.assertTrue(type(f(a,b)) is B)
self.assertTrue(type(f(b,a)) is B)
self.assertTrue(type(f(b,b)) is B)
self.assertTrue(type(f(b,c)) is C)
self.assertTrue(type(f(c,b)) is C)
self.assertTrue(type(f(c,c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True,True])
a = np.array([True,True])
assert_equal(np.choose(c, (a, 1)), np.array([1,1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh :
x = 1.5
else :
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s'%f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s'%f)
def test_precisions_consistent(self) :
z = 1 + 1j
for f in self.funcs :
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s'%f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s'%f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
@dec.knownfailureif(True, "These branch cuts are known to fail")
def test_branch_cuts_failing(self):
# XXX: signed zero not OK with ICC on 64-bit platform for log, see
# http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
# XXX: signed zeros are not OK for sqrt or for the arc* functions
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
def test_against_cmath(self):
import cmath, sys
# cmath.asinh is broken in some versions of Python, see
# http://bugs.python.org/issue1381
broken_cmath_asinh = False
if sys.version_info < (2,6):
broken_cmath_asinh = True
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
if cname == 'asinh' and broken_cmath_asinh:
continue
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan):
pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n'))
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3,4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
x = -x0
x.real[jr] = 0.*dx.real
x.imag[ji] = 0.*dx.imag
x = -x
ym = f(x)
ym = ym[jr | ji]
y0 = y0[jr | ji]
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
old_err = np.seterr(divide="ignore")
try:
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
finally:
np.seterr(**old_err)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
err = np.seterr(invalid='ignore')
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
try:
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
finally:
np.seterr(**err)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0,7,15,25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
res = np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
olderr = np.seterr(invalid='ignore')
try:
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
finally:
np.seterr(**olderr)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
CristinaCristescu/root | bindings/pyroot/_pythonization.py | 53 | 12056 | """ Pythonization API.
"""
# TODO: externalize this (have PythonizationScope and UserPythonizations as
# globals here and picked up from this module
# TODO: set explicit export list
# TODO: move cast to cppyy.lowlevel or some sort
# TODO: remove all need for accessing _backend
def _set_backend( backend ):
global _backend
_backend = backend
def set_pythonization_scope(scope):
_backend.PythonizationScope = scope
if scope not in _backend.UserPythonizations:
_backend.UserPythonizations[scope] = []
def add_pythonization(pythonizor):
"""Takes a callable that should take two arguments -- the class proxy,
and its C++ name -- and which is called the first time the named
class is bound.
"""
scope = _backend.PythonizationScope
#scope = _pythonization_scope
if pythonizor and not callable(pythonizor):
raise TypeError("given '%s' object is not callable" % str(pythonizor))
if pythonizor:
# _pythonizations[scope]
_backend.UserPythonizations[scope].append(pythonizor)
def pin_type(derived_type, base_type):
_backend.SetTypePinning(derived_type, base_type)
def make_interface(base_type):
pin_type(base_type, base_type)
def ignore_type_pinning(some_type):
_backend.IgnoreTypePinning(some_type)
def cast(some_object, new_type):
return _backend.Cast(some_object, new_type)
def add_exception_mapping(cpp_exception, py_exception):
_backend.UserExceptions[cpp_exception] = py_exception
#--- Pythonization factories --------------------------------------------
def set_gil_policy(match_class, match_method, release_gil=True):
return set_method_property(match_class, match_method, '_threaded', int(release_gil))
def set_ownership_policy(match_class, match_method, python_owns_result):
return set_method_property(match_class, match_method,
'_creates', int(python_owns_result))
def set_smart_ptr_policy(match_class, match_method, manage_smart_ptr=False):
return set_method_property(match_class, match_method,
'_manage_smart_ptr', bool(manage_smart_ptr))
# NB: Ideally, we'd use the version commented out below, but for now, we
# make do with the hackier version here.
def rename_attribute(match_class, orig_attribute, new_attribute, keep_orig=False):
class attribute_pythonizor(object):
class getter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, obj):
return getattr(obj, self.attr)
class setter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, obj, value):
return setattr(obj, self.attr, value)
class deleter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, obj):
return delattr(obj, self.attr)
def __init__(self, match_class, orig_attribute, new_attribute, keep_orig):
import re
self.match_class = re.compile(match_class)
self.match_attr = re.compile(orig_attribute)
self.new_attr = new_attribute
self.keep_orig = keep_orig
def __call__(self, obj, name):
if not self.match_class.match(name):
return
for k in dir(obj): #.__dict__:
if self.match_attr.match(k):
tmp = property(self.getter(k), self.setter(k), self.deleter(k))
setattr(obj, self.new_attr, tmp)
#if not self.keep_orig: delattr(obj, k)
return attribute_pythonizor(match_class, orig_attribute, new_attribute, keep_orig)
# def rename_attribute(match_class, orig_attribute, new_attribute, keep_orig=False):
# class method_pythonizor:
# def __init__(self, match_class, orig_attribute, new_attribute, keep_orig):
# import re
# self.match_class = re.compile(match_class)
# self.match_attr = re.compile(orig_attribute)
# self.new_attr = new_attribute
# self.keep_orig = keep_orig
# def __call__(self, obj, name):
# import sys
# if not self.match_class.match(name):
# return
# sys.stderr.write("%s %s %s %s" % ("!!!", obj, name, "\n"))
# for k in dir(obj): #obj.__dict__:
# if not self.match_attr.match(k): continue
# try:
# tmp = getattr(obj, k)
# except Exception as e:
# continue
# setattr(obj, self.new_attr, tmp)
# if not self.keep_orig: delattr(obj, k)
# return method_pythonizor(match_class, orig_attribute, new_attribute, keep_orig)
# Shared with PyPy:
def add_overload(match_class, match_method, overload):
class method_pythonizor(object):
def __init__(self, match_class, match_method, overload):
import re
self.match_class = re.compile(match_class)
self.match_method = re.compile(match_method)
self.overload = overload
def __call__(self, obj, name):
if not self.match_class.match(name):
return
for k in dir(obj): #.__dict__:
try:
tmp = getattr(obj, k)
except:
continue
if self.match_method.match(k):
try:
tmp.__add_overload__(overload)
except AttributeError: pass
return method_pythonizor(match_class, match_method, overload)
def compose_method(match_class, match_method, g):
class composition_pythonizor(object):
def __init__(self, match_class, match_method, g):
import re
self.match_class = re.compile(match_class)
self.match_method = re.compile(match_method)
self.g = g
def __call__(self, obj, name):
if not self.match_class.match(name):
return
g = self.g
for k in obj.__dict__:
if not self.match_method.match(k):
continue
try:
f = getattr(obj, k)
except:
continue
def make_fun(f, g):
def h(self, *args, **kwargs):
return g(self, f(self, *args, **kwargs))
return h
h = make_fun(f, g)
setattr(obj, k, h)
return composition_pythonizor(match_class, match_method, g)
def set_method_property(match_class, match_method, prop, value):
class method_pythonizor(object):
def __init__(self, match_class, match_method, prop, value):
import re
self.match_class = re.compile(match_class)
self.match_method = re.compile(match_method)
self.prop = prop
self.value = value
def __call__(self, obj, name):
if not self.match_class.match(name):
return
for k in dir(obj): #.__dict__:
try:
tmp = getattr(obj, k)
except:
continue
if self.match_method.match(k):
setattr(tmp, self.prop, self.value)
return method_pythonizor(match_class, match_method, prop, value)
def make_property(match_class, match_get, match_set=None, match_del=None, prop_name=None):
class property_pythonizor(object):
def __init__(self, match_class, match_get, match_set, match_del, prop_name):
import re
self.match_class = re.compile(match_class)
self.match_get = re.compile(match_get)
match_many_getters = self.match_get.groups == 1
if match_set:
self.match_set = re.compile(match_set)
match_many_setters = self.match_set.groups == 1
if match_many_getters ^ match_many_setters:
raise ValueError('Must match getters and setters equally')
else:
self.match_set = None
if match_del:
self.match_del = re.compile(match_del)
match_many_deleters = self.match_del.groups == 1
if match_many_getters ^ match_many_deleters:
raise ValueError('Must match getters and deleters equally')
else:
self.match_del = None
self.match_many = match_many_getters
if not (self.match_many or prop_name):
raise ValueError("If not matching properties by regex, need a property name with exactly one substitution field")
if self.match_many and prop_name:
if prop_name.format(').!:(') == prop_name:
raise ValueError("If matching properties by regex and providing a property name, the name needs exactly one substitution field")
self.prop_name = prop_name
def make_get_del_proxy(self, getter):
class proxy(object):
def __init__(self, getter):
self.getter = getter
def __call__(self, obj):
return getattr(obj, self.getter)()
return proxy(getter)
def make_set_proxy(self, setter):
class proxy(object):
def __init__(self, setter):
self.setter = setter
def __call__(self, obj, arg):
return getattr(obj, self.setter)(arg)
return proxy(setter)
def __call__(self, obj, name):
if not self.match_class.match(name):
return
names = []
named_getters = {}
named_setters = {}
named_deleters = {}
if not self.match_many:
fget, fset, fdel = None, None, None
for k in dir(obj): #.__dict__:
match = self.match_get.match(k)
try:
tmp = getattr(obj, k)
except:
continue
if match and hasattr(tmp, '__call__'):
if self.match_many:
name = match.group(1)
named_getters[name] = k
else:
fget = self.make_get_del_proxy(k)
break
if self.match_set:
for k in dir(obj): #.__dict__:
match = self.match_set.match(k)
try:
tmp = getattr(obj, k)
except:
continue
if match and hasattr(tmp, '__call__'):
if self.match_many:
name = match.group(1)
named_setters[name] = k
else:
fset = self.make_set_proxy(k)
break
if self.match_del:
for k in dir(obj): #.__dict__:
match = self.match_del.match(k)
try:
tmp = getattr(obj, k)
except:
continue
if match and hasattr(tmp, '__call__'):
if self.match_many:
name = match.group(1)
named_deleters[name] = k
else:
fdel = self.make_get_del_proxy(k)
break
if not self.match_many:
new_prop = property(fget, fset, fdel)
setattr(obj, self.prop_name, new_prop)
return
names += list(named_getters.keys())
names += list(named_setters.keys())
names += list(named_deleters.keys())
names = set(names)
properties = []
for name in names:
if name in named_getters:
fget = self.make_get_del_proxy(named_getters[name])
else:
fget = None
if name in named_setters:
fset = self.make_set_proxy(named_setters[name])
else:
fset = None
if name in named_deleters:
fdel = self.make_get_del_proxy(named_deleters[name])
else:
fdel = None
new_prop = property(fget, fset, fdel)
if self.prop_name:
prop_name = self.prop_name.format(name)
else:
prop_name = name
setattr(obj, prop_name, new_prop)
return property_pythonizor(match_class, match_get, match_set, match_del, prop_name)
| lgpl-2.1 |
jpakkane/meson | mesonbuild/compilers/mixins/intel.py | 2 | 5659 | # Copyright 2019 The meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstractions for the Intel Compiler families.
Intel provides both a posix/gcc-like compiler (ICC) and an msvc-like compiler
(ICL).
"""
import os
import typing
from ... import mesonlib
from ..compilers import CompilerType
from .gnu import GnuLikeCompiler
from .visualstudio import VisualStudioLikeCompiler
if typing.TYPE_CHECKING:
import subprocess # noqa: F401
# XXX: avoid circular dependencies
# TODO: this belongs in a posix compiler class
clike_optimization_args = {
'0': [],
'g': [],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
} # type: typing.Dict[str, typing.List[str]]
# Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1, 19.0.0
class IntelGnuLikeCompiler(GnuLikeCompiler):
def __init__(self, compiler_type: 'CompilerType'):
super().__init__(compiler_type)
# As of 19.0.0 ICC doesn't have sanitizer, color, or lto support.
#
# It does have IPO, which serves much the same purpose as LOT, but
# there is an unfortunate rule for using IPO (you can't control the
# name of the output file) which break assumptions meson makes
self.base_options = ['b_pch', 'b_lundef', 'b_asneeded', 'b_pgo',
'b_coverage', 'b_ndebug', 'b_staticpic', 'b_pie']
self.id = 'intel'
self.lang_header = 'none'
def get_optimization_args(self, optimization_level: str) -> typing.List[str]:
return clike_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'pchi'
def get_pch_use_args(self, pch_dir: str, header: str) -> typing.List[str]:
return ['-pch', '-pch_dir', os.path.join(pch_dir), '-x',
self.lang_header, '-include', header, '-x', 'none']
def get_pch_name(self, header_name: str) -> str:
return os.path.basename(header_name) + '.' + self.get_pch_suffix()
def openmp_flags(self) -> typing.List[str]:
if mesonlib.version_compare(self.version, '>=15.0.0'):
return ['-qopenmp']
else:
return ['-openmp']
def compiles(self, *args, **kwargs) -> typing.Tuple[bool, bool]:
# This covers a case that .get('foo', []) doesn't, that extra_args is
# defined and is None
extra_args = kwargs.get('extra_args') or []
kwargs['extra_args'] = [
extra_args,
'-diag-error', '10006', # ignoring unknown option
'-diag-error', '10148', # Option not supported
'-diag-error', '10155', # ignoring argument required
'-diag-error', '10156', # ignoring not argument allowed
'-diag-error', '10157', # Ignoring argument of the wrong type
'-diag-error', '10158', # Argument must be separate. Can be hit by trying an option like -foo-bar=foo when -foo=bar is a valid option but -foo-bar isn't
'-diag-error', '1292', # unknown __attribute__
]
return super().compiles(*args, **kwargs)
def get_profile_generate_args(self) -> typing.List[str]:
return ['-prof-gen=threadsafe']
def get_profile_use_args(self) -> typing.List[str]:
return ['-prof-use']
class IntelVisualStudioLikeCompiler(VisualStudioLikeCompiler):
"""Abstractions for ICL, the Intel compiler on Windows."""
def __init__(self, target: str):
super().__init__(target)
self.compiler_type = CompilerType.ICC_WIN
self.id = 'intel-cl'
def compile(self, code, *, extra_args: typing.Optional[typing.List[str]] = None, **kwargs) -> typing.Iterator['subprocess.Popen']:
# This covers a case that .get('foo', []) doesn't, that extra_args is
if kwargs.get('mode', 'compile') != 'link':
extra_args = extra_args.copy() if extra_args is not None else []
extra_args.extend([
'/Qdiag-error:10006', # ignoring unknown option
'/Qdiag-error:10148', # Option not supported
'/Qdiag-error:10155', # ignoring argument required
'/Qdiag-error:10156', # ignoring not argument allowed
'/Qdiag-error:10157', # Ignoring argument of the wrong type
'/Qdiag-error:10158', # Argument must be separate. Can be hit by trying an option like -foo-bar=foo when -foo=bar is a valid option but -foo-bar isn't
])
return super().compile(code, extra_args, **kwargs)
def get_toolset_version(self) -> typing.Optional[str]:
# Avoid circular dependencies....
from ...environment import search_version
# ICL provides a cl.exe that returns the version of MSVC it tries to
# emulate, so we'll get the version from that and pass it to the same
# function the real MSVC uses to calculate the toolset version.
_, _, err = mesonlib.Popen_safe(['cl.exe'])
v1, v2, *_ = search_version(err).split('.')
version = int(v1 + v2)
return self._calculate_toolset_version(version)
def openmp_flags(self) -> typing.List[str]:
return ['/Qopenmp']
| apache-2.0 |
pklaus/python-inwx-xmlrpc | example.register-domain.py | 1 | 2448 | #!/usr/bin/env python
# -*- encoding: UTF8 -*-
# author: Philipp Klaus, philipp.klaus →AT→ gmail.com
# This file is part of python-inwx-xmlrpc.
#
# python-inwx-xmlrpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-inwx-xmlrpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-inwx-xmlrpc. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
####### This is an example of how to use the inwx class to ########
####### list all your domains via the InterNetworX XMLRPC API. ########
from inwx import domrobot, prettyprint
from configuration import get_account_data
def main():
api_url, username, password = get_account_data(True)
inwx_conn = domrobot(api_url, username, password, 'en', False)
print prettyprint.contacts(inwx_conn.contact.list()['contact'])
print prettyprint.nameserversets(inwx_conn.nameserverset.list()['nsset'])
print "\nRegister a new domain\n"
domainname = raw_input('New Domain [e.g. example.com]: ')
check = inwx_conn.domain.check({'domain': domainname})
if check['domain'][0]['status'] == 'free':
if raw_input("The domain %s is available. Do you want to register now? [yes/no]: " % domainname) != 'yes': return
registrant_id = int(raw_input('Please give the ID for the registrant and admin contact [e.g. 1023532]: '))
admin_id = registrant_id
tech_id, billing_id = 1,1
nameservers = ['ns.inwx.de','ns2.inwx.de','ns3.inwx.de']
reg_result = inwx_conn.domain.create({'domain':domainname, 'registrant': registrant_id, 'admin': admin_id, 'tech': tech_id, 'billing': billing_id, 'ns': nameservers})
if reg_result == None:
print "Successfully registered the domain."
else:
print "Sorry, the domain %s is not available anymore." % domainname
print "The current status of the domain is '%s'." % check['domain'][0]['status']
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Shapely_numpy/source/numpy/ctypeslib.py | 50 | 14669 | """
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> _lib.foo_func.restype = None #doctest: +SKIP
>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
from __future__ import division, absolute_import, print_function
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError("ctypes is not available.")
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>]
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
Parameters
----------
libname : str
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : str
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1", stacklevel=2)
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
# mac, windows and linux >= py3.2 shared library and loadable
# module have different extensions so try both
so_ext2 = get_shared_lib_extension(is_python_ext=True)
if not so_ext2 == so_ext:
libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
## defective lib file
raise
## if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
attribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
@property
def __array_interface__(self):
return {'descr': self._dtype_.descr,
'__ref': self,
'strides': None,
'shape': self._shape_,
'version': 3,
'typestr': self._dtype_.descr[0][1],
'data': (self.value, False),
}
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[(dtype, shape, ndim, num)] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, dtype):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
typestr = _dtype(dtype).str
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) is _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
def prep_pointer(pointer_obj, shape):
"""Given a ctypes pointer object, construct and
attach an __array_interface__ property to it if it does not
yet have one.
"""
try: pointer_obj.__array_interface__
except AttributeError: pass
else: return
contents = pointer_obj.contents
dtype = _dtype(type(contents))
inter = {'version': 3,
'typestr': dtype.str,
'data': (ct.addressof(contents), False),
'shape': shape}
pointer_obj.__array_interface__ = inter
################################################################
# public functions
def as_array(obj, shape=None):
"""Create a numpy array from a ctypes array or a ctypes POINTER.
The numpy array shares the memory with the ctypes object.
The size parameter must be given if converting from a ctypes POINTER.
The size parameter is ignored if converting from a ctypes array
"""
tp = type(obj)
try: tp.__array_interface__
except AttributeError:
if hasattr(obj, 'contents'):
prep_pointer(obj, shape)
else:
prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
| mit |
Zarokka/exaile | plugins/somafm/__init__.py | 1 | 6721 | # Copyright (C) 2012 Rocco Aliberti
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import logging
logger = logging.getLogger(__name__)
import os
from urllib2 import urlparse
import httplib
import socket
try:
import xml.etree.cElementTree as ETree
except ImportError:
import xml.etree.ElementTree as ETree
from xl import (
event,
main,
playlist,
xdg
)
from xl.radio import *
from xl.nls import gettext as _
from xlgui.panel import radio
STATION = None
def enable(exaile):
if exaile.loading:
event.add_callback(_enable, 'exaile_loaded')
else:
_enable(None, exaile, None)
def _enable(o1, exaile, o2):
global STATION
STATION = SomaFMRadioStation()
exaile.radio.add_station(STATION)
def disable(exaile):
global STATION
exaile.radio.remove_station(STATION)
STATION = None
def set_status(message, timeout = 0):
radio.set_status(message, timeout)
class SomaFMRadioStation(RadioStation):
name = "somafm"
def __init__(self):
"""
Initializes the somafm radio station
"""
self.user_agent = main.exaile().get_user_agent_string('somafm')
self.somafm_url = 'http://somafm.com/'
self.channels_xml_url = self.somafm_url + 'channels.xml'
self.cache_file = os.path.join(xdg.get_cache_dir(),'somafm.cache')
self.channelist = ''
self.data = {}
self._load_cache()
self.subs = {}
self.playlists = {}
self.playlist_id = 0
logger.debug(self.user_agent)
def get_document(self, url):
"""
Connects to the server and retrieves the document
"""
set_status(_('Contacting SomaFM server...'))
hostinfo = urlparse.urlparse(url)
try:
c = httplib.HTTPConnection(hostinfo.netloc, timeout = 20)
except TypeError:
c = httplib.HTTPConnection(hostinfo.netloc)
try:
c.request('GET', hostinfo.path, headers={'User-Agent':
self.user_agent})
response = c.getresponse()
except (socket.timeout, socket.error):
raise radio.RadioException(_('Error connecting to SomaFM server.'))
if response.status != 200:
raise radio.RadioException(_('Error connecting to SomaFM server.'))
document = response.read()
c.close()
set_status('')
return document
def _load_cache(self):
"""
Loads somafm data from cache
"""
self.data = {}
if os.path.isfile(self.cache_file):
tree = ETree.parse(self.cache_file)
for channel in tree.findall('channel'):
self.data[channel.get("id")] = channel.get("name")
def _save_cache(self):
"""
Saves cache data
"""
channellist = ETree.Element('channellist')
for id, name in self.data.items():
channel = ETree.SubElement(channellist, 'channel', id=id, name=name)
with open(self.cache_file, 'w') as h:
h.write('<?xml version="1.0" encoding="UTF-8"?>')
h.write(ETree.tostring(channellist, 'utf-8'))
def get_lists(self, no_cache = False):
"""
Returns the rlists for somafm
"""
if no_cache or not self.data:
self.channellist = self.get_document(self.channels_xml_url)
data = {}
tree = ETree.fromstring(self.channellist)
for channel in tree.findall('channel'):
name = channel.find('title').text
data[channel.get("id")] = name
self.data = data
self._save_cache()
else:
data = self.data
rlists = []
for id, name in data.items():
rlist = RadioList(name, station = self)
rlist.get_items = lambda no_cache, id = id: \
self._get_subrlists(id = id, no_cache = no_cache)
rlists.append(rlist)
sort_list = [(item.name, item) for item in rlists]
sort_list.sort()
rlists = [item[1] for item in sort_list]
self.rlists = rlists
return rlists
def _get_subrlists(self, id, no_cache = False):
"""
Gets the subrlists for a rlist
"""
if no_cache or id not in self.subs:
rlists = self._get_stations(id)
sort_list = [(item.name, item) for item in rlists]
sort_list.sort()
rlists = [item[1] for item in sort_list]
self.subs[id] = rlists
return self.subs[id]
def _get_playlist(self, url, playlist_id):
"""
Gets the playlist for the given url and id
"""
if playlist_id not in self.playlists:
set_status(_('Contacting SomaFM server...'))
try:
self.playlists[playlist_id] = playlist.import_playlist(url)
except Exception:
set_status(_("Error importing playlist"))
logger.exception("Error importing playlist")
set_status('')
return self.playlists[playlist_id]
def _get_stations(self, id):
if not self.channelist:
self.channelist = self.get_document(self.channels_xml_url)
tree = ETree.fromstring(self.channelist)
channel = tree.find('.//channel[@id="%s"]' % id)
plss = channel.findall('.//*[@format]')
rlists = []
i = 1
for pls in plss:
type = pls.tag.replace('pls','')
format = pls.attrib['format'].upper()
url = pls.text
display_name = format + " - " + type
rlist = RadioItem(display_name, station = self)
rlist.format = format
rlist.get_playlist = lambda url = url,\
playlist_id = self.playlist_id :\
self._get_playlist(url, playlist_id)
self.playlist_id += 1
rlists.append(rlist)
return rlists
def get_menu(self, parent):
return parent.get_menu()
| gpl-2.0 |
perkinslr/pypyjs | addedLibraries/twisted/internet/test/connectionmixins.py | 39 | 20175 | # -*- test-case-name: twisted.internet.test.test_tcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various helpers for tests for connection-oriented transports.
"""
from __future__ import division, absolute_import
import socket
from gc import collect
from weakref import ref
from zope.interface.verify import verifyObject
from twisted.python import context, log
from twisted.python.failure import Failure
from twisted.python.runtime import platform
from twisted.python.log import ILogContext, msg, err
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet.interfaces import IConnector, IReactorFDSet
from twisted.internet.protocol import ClientFactory, Protocol, ServerFactory
from twisted.trial.unittest import SkipTest
from twisted.internet.test.reactormixins import needsRunningReactor
from twisted.test.test_tcp import ClosingProtocol
def findFreePort(interface='127.0.0.1', family=socket.AF_INET,
type=socket.SOCK_STREAM):
"""
Ask the platform to allocate a free port on the specified interface, then
release the socket and return the address which was allocated.
@param interface: The local address to try to bind the port on.
@type interface: C{str}
@param type: The socket type which will use the resulting port.
@return: A two-tuple of address and port, like that returned by
L{socket.getsockname}.
"""
addr = socket.getaddrinfo(interface, 0)[0][4]
probe = socket.socket(family, type)
try:
probe.bind(addr)
return probe.getsockname()
finally:
probe.close()
class ConnectableProtocol(Protocol):
"""
A protocol to be used with L{runProtocolsWithReactor}.
The protocol and its pair should eventually disconnect from each other.
@ivar reactor: The reactor used in this test.
@ivar disconnectReason: The L{Failure} passed to C{connectionLost}.
@ivar _done: A L{Deferred} which will be fired when the connection is
lost.
"""
disconnectReason = None
def _setAttributes(self, reactor, done):
"""
Set attributes on the protocol that are known only externally; this
will be called by L{runProtocolsWithReactor} when this protocol is
instantiated.
@param reactor: The reactor used in this test.
@param done: A L{Deferred} which will be fired when the connection is
lost.
"""
self.reactor = reactor
self._done = done
def connectionLost(self, reason):
self.disconnectReason = reason
self._done.callback(None)
del self._done
class EndpointCreator:
"""
Create client and server endpoints that know how to connect to each other.
"""
def server(self, reactor):
"""
Return an object providing C{IStreamServerEndpoint} for use in creating
a server to use to establish the connection type to be tested.
"""
raise NotImplementedError()
def client(self, reactor, serverAddress):
"""
Return an object providing C{IStreamClientEndpoint} for use in creating
a client to use to establish the connection type to be tested.
"""
raise NotImplementedError()
class _SingleProtocolFactory(ClientFactory):
"""
Factory to be used by L{runProtocolsWithReactor}.
It always returns the same protocol (i.e. is intended for only a single
connection).
"""
def __init__(self, protocol):
self._protocol = protocol
def buildProtocol(self, addr):
return self._protocol
def runProtocolsWithReactor(reactorBuilder, serverProtocol, clientProtocol,
endpointCreator):
"""
Connect two protocols using endpoints and a new reactor instance.
A new reactor will be created and run, with the client and server protocol
instances connected to each other using the given endpoint creator. The
protocols should run through some set of tests, then disconnect; when both
have disconnected the reactor will be stopped and the function will
return.
@param reactorBuilder: A L{ReactorBuilder} instance.
@param serverProtocol: A L{ConnectableProtocol} that will be the server.
@param clientProtocol: A L{ConnectableProtocol} that will be the client.
@param endpointCreator: An instance of L{EndpointCreator}.
@return: The reactor run by this test.
"""
reactor = reactorBuilder.buildReactor()
serverProtocol._setAttributes(reactor, Deferred())
clientProtocol._setAttributes(reactor, Deferred())
serverFactory = _SingleProtocolFactory(serverProtocol)
clientFactory = _SingleProtocolFactory(clientProtocol)
# Listen on a port:
serverEndpoint = endpointCreator.server(reactor)
d = serverEndpoint.listen(serverFactory)
# Connect to the port:
def gotPort(p):
clientEndpoint = endpointCreator.client(
reactor, p.getHost())
return clientEndpoint.connect(clientFactory)
d.addCallback(gotPort)
# Stop reactor when both connections are lost:
def failed(result):
log.err(result, "Connection setup failed.")
disconnected = gatherResults([serverProtocol._done, clientProtocol._done])
d.addCallback(lambda _: disconnected)
d.addErrback(failed)
d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop))
reactorBuilder.runReactor(reactor)
return reactor
def _getWriters(reactor):
"""
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as
well.
"""
if IReactorFDSet.providedBy(reactor):
return reactor.getWriters()
elif 'IOCP' in reactor.__class__.__name__:
return reactor.handles
else:
# Cannot tell what is going on.
raise Exception("Cannot find writers on %r" % (reactor,))
class _AcceptOneClient(ServerFactory):
"""
This factory fires a L{Deferred} with a protocol instance shortly after it
is constructed (hopefully long enough afterwards so that it has been
connected to a transport).
@ivar reactor: The reactor used to schedule the I{shortly}.
@ivar result: A L{Deferred} which will be fired with the protocol instance.
"""
def __init__(self, reactor, result):
self.reactor = reactor
self.result = result
def buildProtocol(self, addr):
protocol = ServerFactory.buildProtocol(self, addr)
self.reactor.callLater(0, self.result.callback, protocol)
return protocol
class _SimplePullProducer(object):
"""
A pull producer which writes one byte whenever it is resumed. For use by
C{test_unregisterProducerAfterDisconnect}.
"""
def __init__(self, consumer):
self.consumer = consumer
def stopProducing(self):
pass
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.consumer.write(b'x')
class Stop(ClientFactory):
"""
A client factory which stops a reactor when a connection attempt fails.
"""
failReason = None
def __init__(self, reactor):
self.reactor = reactor
def clientConnectionFailed(self, connector, reason):
self.failReason = reason
msg("Stop(CF) cCFailed: %s" % (reason.getErrorMessage(),))
self.reactor.stop()
class ClosingLaterProtocol(ConnectableProtocol):
"""
ClosingLaterProtocol exchanges one byte with its peer and then disconnects
itself. This is mostly a work-around for the fact that connectionMade is
called before the SSL handshake has completed.
"""
def __init__(self, onConnectionLost):
self.lostConnectionReason = None
self.onConnectionLost = onConnectionLost
def connectionMade(self):
msg("ClosingLaterProtocol.connectionMade")
def dataReceived(self, bytes):
msg("ClosingLaterProtocol.dataReceived %r" % (bytes,))
self.transport.loseConnection()
def connectionLost(self, reason):
msg("ClosingLaterProtocol.connectionLost")
self.lostConnectionReason = reason
self.onConnectionLost.callback(self)
class ConnectionTestsMixin(object):
"""
This mixin defines test methods which should apply to most L{ITransport}
implementations.
"""
# This should be a reactormixins.EndpointCreator instance.
endpoints = None
def test_logPrefix(self):
"""
Client and server transports implement L{ILoggingContext.logPrefix} to
return a message reflecting the protocol they are running.
"""
class CustomLogPrefixProtocol(ConnectableProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = None
def connectionMade(self):
self.transport.write(b"a")
def logPrefix(self):
return self._prefix
def dataReceived(self, bytes):
self.system = context.get(ILogContext)["system"]
self.transport.write(b"b")
# Only close connection if both sides have received data, so
# that both sides have system set.
if b"b" in bytes:
self.transport.loseConnection()
client = CustomLogPrefixProtocol("Custom Client")
server = CustomLogPrefixProtocol("Custom Server")
runProtocolsWithReactor(self, server, client, self.endpoints)
self.assertIn("Custom Client", client.system)
self.assertIn("Custom Server", server.system)
def test_writeAfterDisconnect(self):
"""
After a connection is disconnected, L{ITransport.write} and
L{ITransport.writeSequence} are no-ops.
"""
reactor = self.buildReactor()
finished = []
serverConnectionLostDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(serverConnectionLostDeferred)
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(protocol))
def listening(port):
msg("Listening on %r" % (port.getHost(),))
endpoint = self.endpoints.client(reactor, port.getHost())
lostConnectionDeferred = Deferred()
protocol = lambda: ClosingLaterProtocol(lostConnectionDeferred)
client = endpoint.connect(ClientFactory.forProtocol(protocol))
def write(proto):
msg("About to write to %r" % (proto,))
proto.transport.write(b'x')
client.addCallbacks(write, lostConnectionDeferred.errback)
def disconnected(proto):
msg("%r disconnected" % (proto,))
proto.transport.write(b"some bytes to get lost")
proto.transport.writeSequence([b"some", b"more"])
finished.append(True)
lostConnectionDeferred.addCallback(disconnected)
serverConnectionLostDeferred.addCallback(disconnected)
return gatherResults([lostConnectionDeferred,
serverConnectionLostDeferred])
def onListen():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addCallback(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListen)
self.runReactor(reactor)
self.assertEqual(finished, [True, True])
def test_protocolGarbageAfterLostConnection(self):
"""
After the connection a protocol is being used for is closed, the
reactor discards all of its references to the protocol.
"""
lostConnectionDeferred = Deferred()
clientProtocol = ClosingLaterProtocol(lostConnectionDeferred)
clientRef = ref(clientProtocol)
reactor = self.buildReactor()
portDeferred = self.endpoints.server(reactor).listen(
ServerFactory.forProtocol(Protocol))
def listening(port):
msg("Listening on %r" % (port.getHost(),))
endpoint = self.endpoints.client(reactor, port.getHost())
client = endpoint.connect(
ClientFactory.forProtocol(lambda: clientProtocol))
def disconnect(proto):
msg("About to disconnect %r" % (proto,))
proto.transport.loseConnection()
client.addCallback(disconnect)
client.addErrback(lostConnectionDeferred.errback)
return lostConnectionDeferred
def onListening():
portDeferred.addCallback(listening)
portDeferred.addErrback(err)
portDeferred.addBoth(lambda ignored: reactor.stop())
needsRunningReactor(reactor, onListening)
self.runReactor(reactor)
# Drop the reference and get the garbage collector to tell us if there
# are no references to the protocol instance left in the reactor.
clientProtocol = None
collect()
self.assertIs(None, clientRef())
class LogObserverMixin(object):
"""
Mixin for L{TestCase} subclasses which want to observe log events.
"""
def observe(self):
loggedMessages = []
log.addObserver(loggedMessages.append)
self.addCleanup(log.removeObserver, loggedMessages.append)
return loggedMessages
class BrokenContextFactory(object):
"""
A context factory with a broken C{getContext} method, for exercising the
error handling for such a case.
"""
message = "Some path was wrong maybe"
def getContext(self):
raise ValueError(self.message)
class StreamClientTestsMixin(object):
"""
This mixin defines tests applicable to SOCK_STREAM client implementations.
This must be mixed in to a L{ReactorBuilder
<twisted.internet.test.reactormixins.ReactorBuilder>} subclass, as it
depends on several of its methods.
Then the methods C{connect} and C{listen} must defined, defining a client
and a server communicating with each other.
"""
def test_interface(self):
"""
The C{connect} method returns an object providing L{IConnector}.
"""
reactor = self.buildReactor()
connector = self.connect(reactor, ClientFactory())
self.assertTrue(verifyObject(IConnector, connector))
def test_clientConnectionFailedStopsReactor(self):
"""
The reactor can be stopped by a client factory's
C{clientConnectionFailed} method.
"""
reactor = self.buildReactor()
needsRunningReactor(
reactor, lambda: self.connect(reactor, Stop(reactor)))
self.runReactor(reactor)
def test_connectEvent(self):
"""
This test checks that we correctly get notifications event for a
client. This ought to prevent a regression under Windows using the
GTK2 reactor. See #3925.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(Protocol))
connected = []
class CheckConnection(Protocol):
def connectionMade(self):
connected.append(self)
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckConnection
needsRunningReactor(
reactor, lambda: self.connect(reactor, clientFactory))
reactor.run()
self.assertTrue(connected)
def test_unregisterProducerAfterDisconnect(self):
"""
If a producer is unregistered from a transport after the transport has
been disconnected (by the peer) and after C{loseConnection} has been
called, the transport is not re-added to the reactor as a writer as
would be necessary if the transport were still connected.
"""
reactor = self.buildReactor()
self.listen(reactor, ServerFactory.forProtocol(ClosingProtocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
writing = []
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, wait for the server to disconnect from us, and then
unregister the producer.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(
_SimplePullProducer(self.transport), False)
self.transport.loseConnection()
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
self.unregister()
writing.append(self.transport in _getWriters(reactor))
finished.callback(None)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
self.assertFalse(writing[0],
"Transport was writing after unregisterProducer.")
def test_disconnectWhileProducing(self):
"""
If C{loseConnection} is called while a producer is registered with the
transport, the connection is closed after the producer is unregistered.
"""
reactor = self.buildReactor()
# For some reason, pyobject/pygtk will not deliver the close
# notification that should happen after the unregisterProducer call in
# this test. The selectable is in the write notification set, but no
# notification ever arrives. Probably for the same reason #5233 led
# win32eventreactor to be broken.
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"A pygobject/pygtk bug disables this functionality "
"on Windows.")
class Producer:
def resumeProducing(self):
log.msg("Producer.resumeProducing")
self.listen(reactor, ServerFactory.forProtocol(Protocol))
finished = Deferred()
finished.addErrback(log.err)
finished.addCallback(lambda ign: reactor.stop())
class ClientProtocol(Protocol):
"""
Protocol to connect, register a producer, try to lose the
connection, unregister the producer, and wait for the connection to
actually be lost.
"""
def connectionMade(self):
log.msg("ClientProtocol.connectionMade")
self.transport.registerProducer(Producer(), False)
self.transport.loseConnection()
# Let the reactor tick over, in case synchronously calling
# loseConnection and then unregisterProducer is the same as
# synchronously calling unregisterProducer and then
# loseConnection (as it is in several reactors).
reactor.callLater(0, reactor.callLater, 0, self.unregister)
def unregister(self):
log.msg("ClientProtocol unregister")
self.transport.unregisterProducer()
# This should all be pretty quick. Fail the test
# if we don't get a connectionLost event really
# soon.
reactor.callLater(
1.0, finished.errback,
Failure(Exception("Connection was not lost")))
def connectionLost(self, reason):
log.msg("ClientProtocol.connectionLost")
finished.callback(None)
clientFactory = ClientFactory()
clientFactory.protocol = ClientProtocol
self.connect(reactor, clientFactory)
self.runReactor(reactor)
# If the test failed, we logged an error already and trial
# will catch it.
| mit |
TinyOS-Camp/DDEA-DEV | REFACTORING/data_preprocess.py | 2 | 18914 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 14 01:34:41 2014
@author: deokwoo
"""
from __future__ import division # To forace float point division
import numpy as np
from numpy.linalg import norm
from scipy.interpolate import interp1d
from shared_constants import *
from data_tools import *
from scipy.stats import stats
import time
import multiprocessing as mp
def verify_sensor_data_format(tup):
key = tup[0]
data_list = tup[1]
time_slots = tup[2]
q = tup[3]
print 'checking ', key, '...'
for i, samples in enumerate(data_list):
for j, each_sample in enumerate(samples):
if each_sample == []:
q.put([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
elif ( isinstance(each_sample,int) == False and isinstance(each_sample, float) == False):
q.put([key, i, j])
print each_sample, 'at', time_slots[i], 'in', key
def verify_data_format(data_dict, PARALLEL=False):
# Verify there is no [] or N/A in the list
# Only FLoat or Int format is allowed
print 'Checking any inconsisent data format...'
print '-' * 40
list_of_wrong_data_format = []
time_slots = data_dict['time_slots']
weather_list_used = [data_dict['weather_list'][i] for i in [1, 2, 3, 10, 11]]
key_list = weather_list_used+ data_dict['sensor_list']
if not PARALLEL:
for key in key_list:
print 'checking ', key, '...'
for i, samples in enumerate(data_dict[key][1]):
for j, each_sample in enumerate(samples):
if each_sample == []:
list_of_wrong_data_format.append([key,i,j])
print each_sample, 'at', time_slots[i], 'in', key
elif isinstance(each_sample, int) == False and isinstance(each_sample, float) == False:
list_of_wrong_data_format.append([key, i, j])
print each_sample, 'at', time_slots[i], 'in', key
print '-' * 40
# PARALLEL
else:
manager = mp.Manager()
q = manager.Queue()
p = mp.Pool(CPU_CORE_NUM)
param_list = [(key,data_dict[key][1],time_slots,q) for key in key_list]
p.map(verify_sensor_data_format,param_list)
p.close()
p.join()
while not q.empty():
item = q.get()
print 'queue item: ' + str(item)
list_of_wrong_data_format.append(item)
if len(list_of_wrong_data_format)>0:
raise NameError('Inconsistent data format in the list of data_used')
return list_of_wrong_data_format
def verify_data_mat(X):
num_err_temp=np.array([[len(np.nonzero(np.isnan(sample))[0]),len(np.nonzero(sample==np.inf)[0]),len(np.nonzero(np.var(sample)==0)[0])] for sample in X])
num_err=np.sum(num_err_temp,axis=0)
for err_idx in np.argwhere(num_err>0):
if err_idx==0:
NameError('nan entry found')
if err_idx==1:
NameError('inf entry found')
if err_idx==2:
NameError('zero var found')
print 'all entry values of data matrix are verifed ok'
def normalize_data(data_input):
y_pred = data_input.copy()
y_temp = np.delete(y_pred, np.nonzero(y_pred==np.infty), axis=0)
y_temp_sort = np.sort(y_temp)[int(np.ceil(len(y_temp)*0.05)):int(np.floor(len(y_temp)*0.95))]
var_temp = np.var(y_temp_sort)
if var_temp > 0: # At least 2 non-infty elements in y_pred
no_inf_idx = np.nonzero(y_pred!=np.infty)
y_pred[no_inf_idx] = y_pred[no_inf_idx] - np.mean(y_pred[no_inf_idx])
temp_val = y_pred/norm(y_pred[no_inf_idx])
temp_status = 0
else:
temp_val = list(set(y_temp_sort))
temp_status = -1
return temp_val, temp_status
def interploate_data(x_temp, num_type, max_num_succ_idx_for_itpl):
num_of_samples = x_temp.shape[0]
inf_idx = np.nonzero(x_temp == np.inf)[0]
noinf_idx = np.nonzero(x_temp != np.inf)[0]
# Dont interploate the values on bondary.
inter_idx = np.delete(inf_idx, np.nonzero(inf_idx == 0))
inter_idx = np.delete(inter_idx, np.nonzero(inter_idx == num_of_samples-1))
#############################################################################################
# Dont interploate the values unknown successively more than num_succ_idx_no_interploate
# Then deletea any index that meet the condition above,
# inter_idx=np.delete(inter_idx,those index)
# Need to be completed .....
#############################################################################################
# Find successive inf indices
succ_inf_idx = []
for i in range(0, len(noinf_idx) - 1):
# number of successive inf between two non-inf indices
num_succ_inf = noinf_idx[i+1] - noinf_idx[i] - 1
if num_succ_inf > max_num_succ_idx_for_itpl:
succ_inf_idx = succ_inf_idx + range(noinf_idx[i]+1, noinf_idx[i+1])
# Remove successive inf indices
inter_idx = list(set(inter_idx) - set(succ_inf_idx))
if num_type == FLOAT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'linear')
val_new = np.interp(inter_idx,noinf_idx, x_temp[noinf_idx,0])
#val_new = np.interp(t_new, t_,val_)
elif num_type == INT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'nearest')
val_new=fast_nearest_interp(inter_idx,noinf_idx, x_temp[noinf_idx, 0])
else:
raise NameError('Sample type must either INT or FLOAT type')
#x_temp[inter_idx,0]=f(inter_idx)
x_temp[inter_idx,0] = val_new
print 'No sample in time slot', inf_idx
print len(inter_idx), '/', len(inf_idx), ' time slots are interplated'
return x_temp
def get_feature(data_dict_samples,num_type):
x_temp = []
for i, sample in enumerate(data_dict_samples):
# If sample=[], np.std returns 0. Avoid zero std, add a infitestimal number
# Set infty if no sample is availble
if len(sample) == 0:
x_temp.append(np.inf)
else:
if num_type == INT_TYPE:
x_temp.append(int(stats.mode(sample)[0]))
elif num_type == FLOAT_TYPE:
x_temp.append(np.mean(sample))
else:
raise NameError('Sample type must either INT or FLOAT type')
x_temp = np.array(x_temp)[:,np.newaxis]
return x_temp
# Mean value measure
def build_feature_matrix(data_dict, sensor_list, weather_list, time_slots, interpolation=1, max_num_succ_idx_for_itpl=4):
data_used = sensor_list + weather_list
print 'Build data feature matrix now.....'
if interpolation == 1:
print 'Missing samples will be interpolated upto', max_num_succ_idx_for_itpl, 'successive time slots'
else:
print 'All time slots with any missing sample will be removed without interpolatoin '
num_of_data = len(data_used)
num_of_samples = len(time_slots)
# Declare as 2-d list for exception.
X = []
INT_type_list = []
FLOAT_type_list = []
input_names = []
weather_type_idx = []
sensor_type_idx = []
INT_type_idx = []
FLOAT_type_idx = []
zero_var_list = []
zero_var_val = []
#import pdb; pdb.set_trace()
# whose variance is zero, hence carry no information,
# Constrcut X matrix by summerizing hourly samples
for j, key in enumerate(data_used):
print '-' * 40
print 'building for ', key
try:
num_type = check_data_type(data_dict[key][2][1])
# Avg. value feature
x_temp = get_feature(data_dict[key][1], num_type)
non_inf_idx = np.nonzero(x_temp < np.inf)[0]
#if non_inf_idx <len(time_slots):measurement_point_set
# Outlier removal, different parameters for sensors and weather data
if len(sensor_list) <= j:
# weather data
is_weather_data = True
outlier_idx = outlier_detect(x_temp[non_inf_idx], 5, 10)
else:
is_weather_data = False
outlier_idx = outlier_detect(x_temp[non_inf_idx], 1, 20)
if len(outlier_idx) > 0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
x_temp[non_inf_idx[outlier_idx]] = np.inf
# interplolation data, use nearest for int type, use linear for float type
if interpolation == 1:
x_temp = interploate_data(x_temp, num_type, max_num_succ_idx_for_itpl)
norm_data_vec, output_status = normalize_data(x_temp[:, 0])
if len(np.nonzero(norm_data_vec == np.inf)[0]) > num_of_samples/5:
raise
except Exception as e:
print ' Error in processing data feature, excluded from analysis'
output_status = -1
norm_data_vec = None
if output_status == -1:
zero_var_list.append(key)
zero_var_val.append(norm_data_vec)
print 'too small variance for float type, added to zero var list'
else:
input_names.append(key)
print j, 'th sensor update'
if (num_type == FLOAT_TYPE) and (is_weather_data == False):
X.append(norm_data_vec)
FLOAT_type_idx.append(len(X)-1)
FLOAT_type_list.append(key)
elif (num_type == INT_TYPE) or (is_weather_data == True):
X.append(x_temp[:, 0])
INT_type_idx.append(len(X)-1)
INT_type_list.append(key)
else:
raise NameError('Sample type must either INT or FLOAT type')
if key in weather_list:
weather_type_idx.append(len(X)-1)
elif key in sensor_list:
sensor_type_idx.append(len(X)-1)
else:
raise NameError('Sample type must either Weather or Sensor type')
# Linear Interpolate
X = np.array(X).T
if X.shape[0] != num_of_samples:
raise NameError('The numeber of rows in feature matrix and the number of the time slots are different ')
if X.shape[1]+len(zero_var_list) != num_of_data:
raise NameError('The sume of the numeber of column in feature matrix and the number of zero var column are different from the number of input measurements ')
deleted_timeslot_idx=[]
print '-' * 20
print 'removing time slots having no sample...'
inf_idx_set = []
for col_vec in X.T:
inf_idx = np.nonzero(col_vec ==np.infty)[0]
inf_idx_set = np.r_[inf_idx_set, inf_idx]
inf_col_idx = list(set(list(inf_idx_set)))
deleted_timeslot_idx = np.array([int(x) for x in inf_col_idx])
print 'time slots', deleted_timeslot_idx, ' removed...'
print '-' * 20
X = np.delete(X, deleted_timeslot_idx, axis=0)
new_time_slot = np.delete(time_slots, deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(X)
return X, new_time_slot, input_names, zero_var_list, zero_var_val, INT_type_list, INT_type_idx, FLOAT_type_list, FLOAT_type_idx, weather_type_idx, sensor_type_idx
# Abs Diff value measure
def build_diff(tup):
k = tup[0]
time_slots = tup[1]
conf_lev = tup[2]
set_val = tup[3]
set_name = tup[4]
num_type = tup[5]
print set_name
try:
diff_mean=get_diff(set_val,time_slots,num_type,conf_lev)
if num_type==FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean,output_status=normalize_data(diff_mean)
elif num_type==INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals=len(set(list(diff_mean)))
print 'num_discrete_vals :', num_discrete_vals
if num_discrete_vals>1:
output_status=0
norm_diff_mean=diff_mean
else:
output_status=-1
norm_diff_mean=list(set(diff_mean))
#norm_diff_mean=list(set(diff_mean[:,0]))
else:
pass
except Exception:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_diff_mean=None
return (k,[output_status,norm_diff_mean])
return (k,[output_status,norm_diff_mean])
def get_diff(set_val,time_slots,num_type,conf_lev):
time_slots_utc=dtime_to_unix(time_slots)
TIMELET_INV_seconds=(time_slots[1]-time_slots[0]).seconds
diff_mean=[]
for r,utc_t in enumerate(time_slots_utc):
utc_t_s=utc_t
utc_t_e=utc_t+TIMELET_INV_seconds
idx=np.nonzero((set_val[0]>=utc_t_s) & (set_val[0]<utc_t_e))[0]
if len(idx)<2:
diff_val=np.inf
else:
temp_val=abs(np.diff(set_val[1][idx]))
upper_val=np.sort(temp_val)[int(np.floor(len(temp_val)*conf_lev)):]
if len(upper_val)==0:
diff_val=np.inf
else:
if num_type==FLOAT_TYPE:
diff_val=np.mean(upper_val)
#print 'float type'
elif num_type==INT_TYPE:
diff_val=int(stats.mode(upper_val)[0])
#print 'int type'
else:
raise NameError('Sample type must either INT or FLOAT type')
#diff_val=max(abs(diff(set_val[1][idx])))
#sort(abs(diff(set_val[1][idx])))[::-1]
diff_mean.append(diff_val)
#diff_mean=np.array(diff_mean)[:,np.newaxis]
diff_mean=np.array(diff_mean)
return diff_mean
# Abs Diff value measure
def build_diff_matrix(measurement_point_set,time_slots,num_type_set,irr_data_name,conf_lev=0.5,PARALLEL=False):
#time_slots_utc = dtime_to_unix(time_slots)
Xdiff = []
input_names = []
INT_type_list = []
FLOAT_type_list = []
INT_type_idx = []
FLOAT_type_idx = []
zero_var_list = []
# whose variance is zero, hence carry no information,
zero_var_val = []
num_of_samples = len(time_slots)
#TIMELET_INV_seconds = (time_slots[1]-time_slots[0]).seconds
print '=' * 40
if not PARALLEL:
for k, (set_val, set_name) in enumerate(zip(measurement_point_set, irr_data_name)):
print irr_data_name[k]
try:
num_type = num_type_set[k]
diff_mean = get_diff(set_val, time_slots, num_type, conf_lev)
if num_type == FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean,output_status=normalize_data(diff_mean)
elif num_type == INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals=len(set(list(diff_mean)))
print 'num_discrete_vals :', num_discrete_vals
if num_discrete_vals>1:
output_status=0
norm_diff_mean=diff_mean
else:
output_status=-1
#norm_diff_mean=list(set(diff_mean[:,0]))
norm_diff_mean=list(set(diff_mean))
else:
pass
if len(np.nonzero(norm_diff_mean == np.inf)[0])>num_of_samples/5:
raise
except Exception as e:
print ' Error in processing data feature, excluded from analysis'
output_status=-1
norm_diff_mean=None
if output_status == -1:
zero_var_list.append(set_name);#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
print 'too small variance for float type or a single value for int type, added to zero var list'
else:
input_names.append(set_name)
Xdiff.append(norm_diff_mean)
if num_type == FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type == INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
print '-' * 20
print '=' * 40
# PARALLEL ENABLED
else:
print 'Build diff matrix: Parallel enabled...'
# Construct param list for workers
param_list = []
for k,(set_val,set_name) in enumerate(zip(measurement_point_set,irr_data_name)):
param_list.append((k,time_slots,conf_lev,set_val,set_name,num_type_set[k]))
p = mp.Pool(CPU_CORE_NUM)
ret_dict = dict(p.map(build_diff,param_list))
p.close()
p.join()
for k in sorted(ret_dict.keys()):
v = ret_dict[k]
output_status = v[0]
norm_diff_mean = v[1]
set_name = irr_data_name[k]
num_type = num_type_set[k]
if output_status==-1:
zero_var_list.append(set_name)
#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
print 'too small variance for float type or a single value for int type, added to zero var list'
else:
input_names.append(set_name)
try:
Xdiff.append(norm_diff_mean)
except:
import pdb;pdb.set_trace()
if num_type == FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type == INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
print '-' * 20
Xdiff = np.array(Xdiff).T
deleted_timeslot_idx = []
print '-' * 20
print 'removing time slots having no sample...'
inf_idx_set=[]
for col_vec in Xdiff.T:
inf_idx = np.nonzero(col_vec == np.infty)[0]
inf_idx_set=np.r_[inf_idx_set, inf_idx]
inf_col_idx = list(set(list(inf_idx_set)))
deleted_timeslot_idx = np.array([int(x) for x in inf_col_idx]).astype(int)
print 'time slots', deleted_timeslot_idx, ' removed...'
print '-' * 20
Xdiff = np.delete(Xdiff, deleted_timeslot_idx, axis=0)
new_time_slot = np.delete(time_slots, deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(Xdiff)
print "*-" * 20
print "* deleted_timeslot_idx :", deleted_timeslot_idx
print "*-" * 20
return Xdiff,\
new_time_slot,\
input_names,\
zero_var_list,\
zero_var_val, \
INT_type_list,\
INT_type_idx,\
FLOAT_type_list,\
FLOAT_type_idx | gpl-2.0 |
metaperl/behave | behave4cmd0/pathutil.py | 12 | 3946 | # -*- coding -*-
"""
Provides some command utility functions.
TODO:
matcher that ignores empty lines and whitespace and has contains comparison
"""
from __future__ import absolute_import, print_function, unicode_literals
import os.path
import codecs
# -----------------------------------------------------------------------------
# CONSTANTS:
# -----------------------------------------------------------------------------
# HERE, WORKDIR: see "__setup.py"
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def realpath_with_context(path, context):
"""
Convert a path into its realpath:
* For relative path: use :attr:`context.workdir` as root directory
* For absolute path: Pass-through without any changes.
:param path: Filepath to convert (as string).
:param context: Behave context object (with :attr:`context.workdir`)
:return: Converted path.
"""
if not os.path.isabs(path):
# XXX ensure_workdir_exists(context)
assert context.workdir
path = os.path.join(context.workdir, os.path.normpath(path))
return path
def posixpath_normpath(pathname):
"""
Convert path into POSIX path:
* Normalize path
* Replace backslash with slash
:param pathname: Pathname (as string)
:return: Normalized POSIX path.
"""
backslash = '\\'
pathname2 = os.path.normpath(pathname) or "."
if backslash in pathname2:
pathname2 = pathname2.replace(backslash, '/')
return pathname2
def read_file_contents(filename, context=None, encoding=None):
filename_ = realpath_with_context(filename, context)
assert os.path.exists(filename_)
with open(filename_, "r") as file_:
file_contents = file_.read()
return file_contents
# def create_new_workdir(context):
# ensure_attribute_exists(context, "workdir", default=WORKDIR)
# if os.path.exists(context.workdir):
# shutil.rmtree(context.workdir, ignore_errors=True)
# ensure_workdir_exists(context)
def create_textfile_with_contents(filename, contents, encoding='utf-8'):
"""
Creates a textual file with the provided contents in the workdir.
Overwrites an existing file.
"""
ensure_directory_exists(os.path.dirname(filename))
if os.path.exists(filename):
os.remove(filename)
outstream = codecs.open(filename, "w", encoding)
outstream.write(contents)
if contents and not contents.endswith("\n"):
outstream.write("\n")
outstream.flush()
outstream.close()
assert os.path.exists(filename), "ENSURE file exists: %s" % filename
def ensure_file_exists(filename, context=None):
real_filename = filename
if context:
real_filename = realpath_with_context(filename, context)
if not os.path.exists(real_filename):
create_textfile_with_contents(real_filename, "")
assert os.path.exists(real_filename), "ENSURE file exists: %s" % filename
def ensure_directory_exists(dirname, context=None):
"""
Ensures that a directory exits.
If it does not exist, it is automatically created.
"""
real_dirname = dirname
if context:
real_dirname = realpath_with_context(dirname, context)
if not os.path.exists(real_dirname):
os.makedirs(real_dirname)
assert os.path.exists(real_dirname), "ENSURE dir exists: %s" % dirname
assert os.path.isdir(real_dirname), "ENSURE isa dir: %s" % dirname
# def ensure_workdir_exists(context):
# """
# Ensures that the work directory exists.
# In addition, the location of the workdir is stored as attribute in
# the context object.
# """
# ensure_attribute_exists(context, "workdir", default=WORKDIR)
# # if not context.workdir:
# # context.workdir = os.path.abspath(WORKDIR)
# ensure_directory_exists(context.workdir)
| bsd-2-clause |
mattseymour/django | tests/messages_tests/test_session.py | 7 | 1761 | from django.contrib.messages import constants
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.session import SessionStorage
from django.test import TestCase
from django.utils.safestring import SafeData, mark_safe
from .base import BaseTests
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = storage.serialize_messages(messages)
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.deserialize_messages(storage.request.session.get(storage.session_key, []))
return len(data)
class SessionTests(BaseTests, TestCase):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTests, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
A message containing SafeData keeps its safe status when retrieved from
the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
| bsd-3-clause |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/distutils/tests/setuptools_extension.py | 149 | 1592 | from distutils.core import Extension as _Extension
from distutils.core import Distribution as _Distribution
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
_Extension = _get_unpatched(_Extension)
try:
from Pyrex.Distutils.build_ext import build_ext
except ImportError:
have_pyrex = False
else:
have_pyrex = True
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
if not have_pyrex:
# convert .pyx extensions to .c
def __init__(self,*args,**kw):
_Extension.__init__(self,*args,**kw)
sources = []
for s in self.sources:
if s.endswith('.pyx'):
sources.append(s[:-3]+'c')
else:
sources.append(s)
self.sources = sources
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
import sys, distutils.core, distutils.extension
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| apache-2.0 |
akohlmey/lammps | lib/plumed/Install.py | 7 | 5784 | #!/usr/bin/env python
"""
Install.py tool to download, unpack, build, and link to the plumed2 library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, platform, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
version = "2.7.1"
mode = "static"
# help message
HELP = """
Syntax from src dir: make lib-plumed args="-b"
or: make lib-plumed args="-b -v 2.4.3"
or: make lib-plumed args="-p /usr/local/plumed2 -m shared"
Syntax from lib dir: python Install.py -b -v 2.4.3
or: python Install.py -b
or: python Install.py -p /usr/local/plumed2 -m shared
Example:
make lib-plumed args="-b" # download/build in lib/plumed/plumed2
make lib-plumed args="-p $HOME/plumed2 -m shared" # use existing Plumed2 installation in $HOME/plumed2
"""
# known checksums for different PLUMED versions. used to validate the download.
checksums = { \
'2.4.2' : '88188743a6e03ef076e5377d03ebb0e7', \
'2.4.3' : 'b1be7c48971627febc11c61b70767fc5', \
'2.4.4' : '71ed465bdc7c2059e282dbda8d564e71', \
'2.5.0' : '6224cd089493661e19ceacccd35cf911', \
'2.5.1' : 'c2a7b519e32197a120cdf47e0f194f81', \
'2.5.2' : 'bd2f18346c788eb54e1e52f4f6acf41a', \
'2.5.3' : 'de30d6e7c2dcc0973298e24a6da24286', \
'2.5.4' : 'f31b7d16a4be2e30aa7d5c19c3d37853', \
'2.5.7' : '1ca36226fdb8110b1009aa61d615d4e5', \
'2.6.0' : '204d2edae58d9b10ba3ad460cad64191', \
'2.6.1' : '89a9a450fc6025299fe16af235957163', \
'2.6.3' : 'a9f8028fd74528c2024781ea1fdefeee', \
'2.7.0' : '95f29dd0c067577f11972ff90dfc7d12', \
'2.7.1' : '4eac6a462ec84dfe0cec96c82421b8e8', \
}
# parse and process arguments
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build the plumed2 library")
pgroup.add_argument("-p", "--path",
help="specify folder of existing plumed2 installation")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of plumed to download and build (default: %s)" % version)
parser.add_argument("-m", "--mode", default=mode, choices=['static', 'shared', 'runtime'],
help="set plumed linkage mode: static (default), shared, or runtime")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
pathflag = args.path is not None
plumedpath = args.path
mode = args.mode
homepath = fullpath('.')
homedir = "%s/plumed2" % (homepath)
if pathflag:
if not os.path.isdir(plumedpath):
sys.exit("Plumed2 path %s does not exist" % plumedpath)
homedir = fullpath(plumedpath)
if not os.path.isdir(os.path.join(homedir, 'include', 'plumed', 'core')):
sys.exit("No Plumed2 installation found at %s" % plumedpath)
# download and unpack plumed2 tarball
if buildflag:
url = "https://github.com/plumed/plumed2/releases/download/v%s/plumed-src-%s.tgz" % (version, version)
filename = "plumed-src-%s.tar.gz" %version
print("Downloading plumed ...")
geturl(url, filename)
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], filename):
sys.exit("Checksum for plumed2 library does not match")
print("Unpacking plumed2 source tarball ...")
if os.path.exists("%s/plumed-%s" % (homepath, version)):
shutil.rmtree("%s/plumed-%s" % (homepath, version))
if os.path.exists(homedir):
shutil.rmtree(homedir)
cmd = 'cd "%s"; tar -xzvf %s' % (homepath, filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
os.remove(os.path.join(homepath, filename))
# build plumed
print("Building plumed ...")
n_cpus = get_cpus()
cmd = 'cd %s/plumed-%s; ./configure --prefix=%s --enable-modules=all --enable-static-patch ; make -j%d ; make install' % (homepath, version, homedir, n_cpus)
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
# create 2 links in lib/plumed to plumed2 installation dir
print("Creating links to plumed2 include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
os.symlink(os.path.join(homedir, 'include'), 'includelink')
libpath = os.path.join(homedir, 'lib64')
if not os.path.exists(libpath):
libpath = os.path.join(homedir, 'lib')
os.symlink(libpath, 'liblink')
if os.path.isfile("Makefile.lammps.%s" % mode):
print("Creating Makefile.lammps")
plumedinc = os.path.join('liblink', 'plumed', 'src', 'lib', 'Plumed.inc.' + mode)
lines1 = open(plumedinc, 'r').readlines()
if (platform.system() == 'Darwin' and os.path.isfile("Makefile.lammps.%s.macosx" % mode)):
lines2 = open("Makefile.lammps.%s.macosx" % mode, 'r').readlines()
else:
lines2 = open("Makefile.lammps.%s" % mode, 'r').readlines()
fp = open("Makefile.lammps", 'w')
fp.write("PLUMED_LIBDIR=" + os.path.join(homedir, "lib\n"))
for line in lines1:
fp.write(line)
for line in lines2:
fp.write(line)
fp.close()
| gpl-2.0 |
PistonDevelopers/freetype-sys | freetype2/builds/meson/extract_freetype_version.py | 12 | 2634 | #!/usr/bin/env python
"""Extract the FreeType version numbers from `<freetype/freetype.h>`.
This script parses the header to extract the version number defined there.
By default, the full dotted version number is printed, but `--major`,
`--minor` or `--patch` can be used to only print one of these values
instead.
"""
from __future__ import print_function
import argparse
import os
import re
import sys
# Expected input:
#
# ...
# #define FREETYPE_MAJOR 2
# #define FREETYPE_MINOR 10
# #define FREETYPE_PATCH 2
# ...
RE_MAJOR = re.compile(r"^ \#define \s+ FREETYPE_MAJOR \s+ (.*) $", re.X)
RE_MINOR = re.compile(r"^ \#define \s+ FREETYPE_MINOR \s+ (.*) $", re.X)
RE_PATCH = re.compile(r"^ \#define \s+ FREETYPE_PATCH \s+ (.*) $", re.X)
def parse_freetype_header(header):
major = None
minor = None
patch = None
for line in header.splitlines():
line = line.rstrip()
m = RE_MAJOR.match(line)
if m:
assert major == None, "FREETYPE_MAJOR appears more than once!"
major = m.group(1)
continue
m = RE_MINOR.match(line)
if m:
assert minor == None, "FREETYPE_MINOR appears more than once!"
minor = m.group(1)
continue
m = RE_PATCH.match(line)
if m:
assert patch == None, "FREETYPE_PATCH appears more than once!"
patch = m.group(1)
continue
assert (
major and minor and patch
), "This header is missing one of FREETYPE_MAJOR, FREETYPE_MINOR or FREETYPE_PATCH!"
return (major, minor, patch)
def main():
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--major",
action="store_true",
help="Only print the major version number.",
)
group.add_argument(
"--minor",
action="store_true",
help="Only print the minor version number.",
)
group.add_argument(
"--patch",
action="store_true",
help="Only print the patch version number.",
)
parser.add_argument(
"input",
metavar="FREETYPE_H",
help="The input freetype.h header to parse.",
)
args = parser.parse_args()
with open(args.input) as f:
header = f.read()
version = parse_freetype_header(header)
if args.major:
print(version[0])
elif args.minor:
print(version[1])
elif args.patch:
print(version[2])
else:
print("%s.%s.%s" % version)
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
tinloaf/home-assistant | homeassistant/components/cloud/auth_api.py | 1 | 4703 | """Package to communicate with the authentication API."""
class CloudError(Exception):
"""Base class for cloud related errors."""
class Unauthenticated(CloudError):
"""Raised when authentication failed."""
class UserNotFound(CloudError):
"""Raised when a user is not found."""
class UserNotConfirmed(CloudError):
"""Raised when a user has not confirmed email yet."""
class PasswordChangeRequired(CloudError):
"""Raised when a password change is required."""
# https://github.com/PyCQA/pylint/issues/1085
# pylint: disable=useless-super-delegation
def __init__(self, message='Password change required.'):
"""Initialize a password change required error."""
super().__init__(message)
class UnknownError(CloudError):
"""Raised when an unknown error occurs."""
AWS_EXCEPTIONS = {
'UserNotFoundException': UserNotFound,
'NotAuthorizedException': Unauthenticated,
'UserNotConfirmedException': UserNotConfirmed,
'PasswordResetRequiredException': PasswordChangeRequired,
}
def _map_aws_exception(err):
"""Map AWS exception to our exceptions."""
ex = AWS_EXCEPTIONS.get(err.response['Error']['Code'], UnknownError)
return ex(err.response['Error']['Message'])
def register(cloud, email, password):
"""Register a new account."""
from botocore.exceptions import ClientError
cognito = _cognito(cloud)
# Workaround for bug in Warrant. PR with fix:
# https://github.com/capless/warrant/pull/82
cognito.add_base_attributes()
try:
cognito.register(email, password)
except ClientError as err:
raise _map_aws_exception(err)
def resend_email_confirm(cloud, email):
"""Resend email confirmation."""
from botocore.exceptions import ClientError
cognito = _cognito(cloud, username=email)
try:
cognito.client.resend_confirmation_code(
Username=email,
ClientId=cognito.client_id
)
except ClientError as err:
raise _map_aws_exception(err)
def forgot_password(cloud, email):
"""Initialize forgotten password flow."""
from botocore.exceptions import ClientError
cognito = _cognito(cloud, username=email)
try:
cognito.initiate_forgot_password()
except ClientError as err:
raise _map_aws_exception(err)
def login(cloud, email, password):
"""Log user in and fetch certificate."""
cognito = _authenticate(cloud, email, password)
cloud.id_token = cognito.id_token
cloud.access_token = cognito.access_token
cloud.refresh_token = cognito.refresh_token
cloud.write_user_info()
def check_token(cloud):
"""Check that the token is valid and verify if needed."""
from botocore.exceptions import ClientError
cognito = _cognito(
cloud,
access_token=cloud.access_token,
refresh_token=cloud.refresh_token)
try:
if cognito.check_token():
cloud.id_token = cognito.id_token
cloud.access_token = cognito.access_token
cloud.write_user_info()
except ClientError as err:
raise _map_aws_exception(err)
def renew_access_token(cloud):
"""Renew access token."""
from botocore.exceptions import ClientError
cognito = _cognito(
cloud,
access_token=cloud.access_token,
refresh_token=cloud.refresh_token)
try:
cognito.renew_access_token()
cloud.id_token = cognito.id_token
cloud.access_token = cognito.access_token
cloud.write_user_info()
except ClientError as err:
raise _map_aws_exception(err)
def _authenticate(cloud, email, password):
"""Log in and return an authenticated Cognito instance."""
from botocore.exceptions import ClientError
from warrant.exceptions import ForceChangePasswordException
assert not cloud.is_logged_in, 'Cannot login if already logged in.'
cognito = _cognito(cloud, username=email)
try:
cognito.authenticate(password=password)
return cognito
except ForceChangePasswordException:
raise PasswordChangeRequired
except ClientError as err:
raise _map_aws_exception(err)
def _cognito(cloud, **kwargs):
"""Get the client credentials."""
import botocore
import boto3
from warrant import Cognito
cognito = Cognito(
user_pool_id=cloud.user_pool_id,
client_id=cloud.cognito_client_id,
user_pool_region=cloud.region,
**kwargs
)
cognito.client = boto3.client(
'cognito-idp',
region_name=cloud.region,
config=botocore.config.Config(
signature_version=botocore.UNSIGNED
)
)
return cognito
| apache-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/core/management/commands/testserver.py | 73 | 1408 | from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--addrport', action='store', dest='addrport',
type='string', default='',
help='port number or ipaddr:port to run the server on'),
)
help = 'Runs a development server with data from the given fixture(s).'
args = '[fixture ...]'
requires_model_validation = False
def handle(self, *fixture_labels, **options):
from django.core.management import call_command
from django.db import connection
verbosity = int(options.get('verbosity', 1))
addrport = options.get('addrport')
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = '\nServer stopped.\nNote that the test database, %r, has not been deleted. You can explore it on your own.' % db_name
call_command('runserver', addrport=addrport, shutdown_message=shutdown_message, use_reloader=False)
| apache-2.0 |
DavidAndreev/indico | indico/util/mimetypes.py | 2 | 3313 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals, absolute_import
import mimetypes
import re
_exact_mapping = {
'application/json': 'icon-file-css',
'text/css': 'icon-file-css',
'text/calendar': 'icon-calendar',
# Word
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'icon-file-word',
'application/msword': 'icon-file-word',
# PDF
'application/pdf': 'icon-file-pdf',
# Excel
'application/vnd.ms-excel': 'icon-file-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': 'icon-file-excel',
# Powerpoint
'application/vnd.ms-powerpoint': 'icon-file-presentation',
'application/vnd.openxmlformats-officedocument.presentationml.presentation': 'icon-file-presentation',
# Archive
'application/x-7z-compressed': 'icon-file-zip',
'application/x-ace-compressed': 'icon-file-zip',
'application/x-rar-compressed': 'icon-file-zip',
'application/x-tar': 'icon-file-zip',
'application/zip': 'icon-file-zip',
# Markup Languages
'application/xml': 'icon-file-xml',
'text/xml': 'icon-file-xml',
'text/n3': 'icon-file-xml',
'text/html': 'icon-file-xml',
'text/sgml': 'icon-file-xml',
# X-separated-values
'text/csv': 'icon-file-spreadsheet',
'text/tab-separated-values': 'icon-file-spreadsheet',
}
_regex_mapping = [
# Archive
('^application/x-bzip', 'icon-file-zip'), # matches bzip and bzip2
# Audio
('^audio/', 'icon-file-music'),
# Images
('^image/', 'icon-file-image'),
# Text
('^text/', 'icon-file-text'),
# Video
('^video/', 'icon-file-video'),
# OpenOffice
('application/vnd\.oasis\.opendocument\.', 'icon-file-openoffice'),
# XML
('.+/.+\+xml$', 'icon-file-xml'),
# JSON
('.+/.+\+json$', 'icon-file-css')
]
_regex_mapping = [(re.compile(regex), icon) for regex, icon in _regex_mapping]
def icon_from_mimetype(mimetype, default_icon='icon-file-filled'):
"""Gets the most suitable icon for a MIME type."""
mimetype = mimetype.lower()
try:
return _exact_mapping[mimetype]
except KeyError:
for pattern, icon in _regex_mapping:
if pattern.search(mimetype):
return icon
return default_icon
def register_custom_mimetypes():
"""Registers additional extension/mimetype mappings.
This is used for mimetypes/extensions that are not in the official
mapping but useful, e.g. because indico has special handling for
files with that type.
"""
mimetypes.add_type(b'text/markdown', b'.md')
| gpl-3.0 |
MackZxh/OCA-Choice | knowledge/document_page_approval/document_page_approval.py | 11 | 9143 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp import models, fields, SUPERUSER_ID
class document_page_history_wkfl(models.Model):
_inherit = 'document.page.history'
def page_approval_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
template_id = self.pool.get('ir.model.data').get_object_reference(
cr, uid,
'document_page_approval',
'email_template_new_draft_need_approval')[1]
for page in self.browse(cr, uid, ids, context=context):
if page.is_parent_approval_required:
self.pool.get('email.template').send_mail(
cr,
uid,
template_id,
page.id,
force_send=True
)
return True
def page_approval_approved(self, cr, uid, ids, context=None):
model_data_obj = self.pool.get('ir.model.data')
message_obj = self.pool.get('mail.message')
self.write(cr, uid, ids, {
'state': 'approved',
'approved_date': datetime.now().strftime(
DEFAULT_SERVER_DATETIME_FORMAT),
'approved_uid': uid
}, context=context)
# Notify followers a new version is available
for page_history in self.browse(cr, uid, ids, context=context):
subtype_id = model_data_obj.get_object_reference(
cr, SUPERUSER_ID, 'mail', 'mt_comment')[1]
message_obj.create(
cr, uid,
{'res_id': page_history.page_id.id,
'model': 'document.page',
'subtype_id': subtype_id,
'body': _('New version of the document %s'
' approved.') % page_history.page_id.name
}
)
return True
def _can_user_approve_page(self):
user = self.env.user
for page in self:
page.can_user_approve_page = page.can_user_approve_this_page(
page.page_id,
user
)
def can_user_approve_this_page(self, page, user):
if page:
res = page.approver_gid in user.groups_id
res = res or self.can_user_approve_this_page(page.parent_id, user)
else:
res = False
return res
def get_approvers_guids(self):
res = {}
for page in self:
res[page.id] = self.get_approvers_guids_for_page(page.page_id)
return res
def get_approvers_guids_for_page(self, page):
if page:
if page.approver_gid:
res = [page.approver_gid.id]
else:
res = []
res.extend(self.get_approvers_guids_for_page(page.parent_id))
else:
res = []
return res
def _get_approvers_email(self):
for page in self:
emails = ''
guids = self.get_approvers_guids()
uids = [i.id for i in self.env['res.users'].search([
('groups_id', 'in', guids[page.id])
])]
users = self.env['res.users'].browse(uids)
for user in users:
if user.email:
emails += user.email
emails += ','
else:
empl = self.env['hr.employee'].search([
('login', '=', user.login)
])
if empl.work_email:
emails += empl.work_email
emails += ','
page.get_approvers_email = emails[:-1]
def _get_page_url(self):
for page in self:
base_url = self.env['ir.config_parameter'].get_param(
'web.base.url',
default='http://localhost:8069'
)
page.get_page_url = (
'{}/web#db={}&id={}&view_type=form&'
'model=document.page.history').format(
base_url,
self.env.cr.dbname,
page.id
)
state = fields.Selection(
[('draft', 'Draft'), ('approved', 'Approved')],
'Status',
readonly=True
)
approved_date = fields.Datetime("Approved Date")
approved_uid = fields.Many2one(
'res.users',
"Approved By"
)
is_parent_approval_required = fields.Boolean(
related='page_id.is_parent_approval_required',
string="parent approval",
store=False
)
can_user_approve_page = fields.Boolean(
compute=_can_user_approve_page,
string="can user approve this page",
store=False
)
get_approvers_email = fields.Text(
compute=_get_approvers_email,
string="get all approvers email",
store=False
)
get_page_url = fields.Text(
compute=_get_page_url,
string="URL",
store=False
)
class document_page_approval(models.Model):
_inherit = 'document.page'
def _get_display_content(self):
for page in self:
content = ""
if page.type == "category":
content = self._get_page_index(page, link=False)
else:
history = self.env['document.page.history']
if self.is_approval_required(page):
history_ids = history.search(
[
('page_id', '=', page.id),
('state', '=', 'approved')
],
limit=1,
order='create_date DESC'
)
content = history_ids.content
else:
content = page.content
page.display_content = content
def _get_approved_date(self):
for page in self:
approved_date = False
if self.is_approval_required(page):
history = self.env['document.page.history']
history_ids = history.search(
[
('page_id', '=', page.id),
('state', '=', 'approved')
],
limit=1,
order='create_date DESC'
)
approved_date = history_ids.approved_date
page.approved_date = approved_date
def _get_approved_uid(self):
for page in self:
approved_uid = False
if self.is_approval_required(page):
history = self.env['document.page.history']
history_ids = history.search(
[
('page_id', '=', page.id),
('state', '=', 'approved')
],
limit=1,
order='create_date DESC'
)
approved_uid = history_ids.approved_uid.id
page.approved_uid = approved_uid
def _is_parent_approval_required(self):
for page in self:
page.is_parent_approval_required = self.is_approval_required(page)
def is_approval_required(self, page):
if page:
res = page.approval_required
res = res or self.is_approval_required(page.parent_id)
else:
res = False
return res
display_content = fields.Text(
compute=_get_display_content,
string='Displayed Content'
)
approved_date = fields.Datetime(
compute=_get_approved_date,
string="Approved Date"
)
approved_uid = fields.Many2one(
'res.users',
compute=_get_approved_uid,
string="Approved By",
)
approval_required = fields.Boolean("Require approval")
is_parent_approval_required = fields.Boolean(
compute=_is_parent_approval_required,
string="parent approval"
)
approver_gid = fields.Many2one(
"res.groups",
"Approver group"
)
| lgpl-3.0 |
rljacobson/FoxySheep | python_target/FoxySheep/Errors/ErrorHandler.py | 1 | 2893 | """
This module defines the mechanism by which errors are handled in FoxySheep.
"""
import sys
from antlr4.ParserRuleContext import ParserRuleContext
from AST import ASTNode
def _print_err(*args, flush=False):
sys.stderr.write(' '.join(map(str, args)) + '\n')
if flush:
sys.stderr.flush()
class ErrorBase:
def __init__(self, parse_tree_node: ParserRuleContext=None,
ast_node: ASTNode = None):
self.parse_tree_node = parse_tree_node
self.ast_node = ast_node
class_name = type(self).__name__
if class_name.endswith('Base'):
class_name = class_name[:-4]
elif class_name.endswith('_'):
class_name = class_name[:-1]
self.name = class_name
self.message = 'Unknown error.'
def get_line_number(self):
"""
Returns the line number in the source text where the error occurs, or -1 if unknown.
:return: line number, integer.
"""
if self.parse_tree_node:
start_token = self.parse_tree_node.start
return start_token.getLine()
# We don't have an associated parse_tree_node.
return -1
def get_char_position(self):
"""
Returns the character position in the line in the source text where the error occurs, or -1 if unknown.
:return: character position, integer.
"""
if self.parse_tree_node:
start_token = self.parse_tree_node.start
return start_token.getCharPositionInLine()
# We don't have an associated parse_tree_node.
return -1
def to_string(self):
line = self.get_line_number()
char = self.get_char_position()
location = ''
if line > -1 or char > -1:
location = '{line}:{char}:'.format(line=line, char=char)
return '{location}{error_name}: {message}'.format(
location=location,
error_name=self.name,
message=self.message
)
class UnknownFunctionError(ErrorBase):
def __init__(self, identifier='', **kwargs):
super().__init__(**kwargs)
self.message = 'The function {identifier} is being called but is not ' \
'yet defined.'.format(identifier=identifier)
class NotImplementedError_(ErrorBase):
def __init__(self, feature: str = '', **kwargs):
super().__init__(**kwargs)
self.message = '{feature} has not yet been implemented.'.format(
feature=feature)
class ExternalExceptionError(ErrorBase):
"""
An exception from an external library repackaged as an ErrorBase subclass.
"""
def __init__(self, e: Exception, **kwargs):
super().__init__(**kwargs)
self._exception = e
self.message = str(e)
def handle_error(error: ErrorBase):
_print_err(error.to_string())
| bsd-2-clause |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/idlelib/configHelpSourceEdit.py | 149 | 6680 | "Dialog to specify or edit the parameters for a user configured help source."
import os
import sys
from Tkinter import *
import tkMessageBox
import tkFileDialog
class GetHelpSourceDialog(Toplevel):
def __init__(self, parent, title, menuItem='', filePath=''):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.result = None
self.CreateWidgets()
self.menu.set(menuItem)
self.path.set(filePath)
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
#centre dialog over parent:
self.geometry("+%d+%d" %
((parent.winfo_rootx() + ((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty() + ((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)))))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
def CreateWidgets(self):
self.menu = StringVar(self)
self.path = StringVar(self)
self.fontSize = StringVar(self)
self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
text='Menu Item:')
self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
width=30)
self.entryMenu.focus_set()
labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
text='Help File Path: Enter URL or browse for file')
self.entryPath = Entry(self.frameMain, textvariable=self.path,
width=40)
self.entryMenu.focus_set()
labelMenu.pack(anchor=W, padx=5, pady=3)
self.entryMenu.pack(anchor=W, padx=5, pady=3)
labelPath.pack(anchor=W, padx=5, pady=3)
self.entryPath.pack(anchor=W, padx=5, pady=3)
browseButton = Button(self.frameMain, text='Browse', width=8,
command=self.browseFile)
browseButton.pack(pady=3)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOk = Button(frameButtons, text='OK',
width=8, default=ACTIVE, command=self.Ok)
self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel',
width=8, command=self.Cancel)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
def browseFile(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.path.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if sys.platform[:3] == 'win':
dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
file = opendialog.show(initialdir=dir, initialfile=base)
if file:
self.path.set(file)
def MenuOk(self):
"Simple validity check for a sensible menu item name"
menuOk = True
menu = self.menu.get()
menu.strip()
if not menu:
tkMessageBox.showerror(title='Menu Item Error',
message='No menu item specified',
parent=self)
self.entryMenu.focus_set()
menuOk = False
elif len(menu) > 30:
tkMessageBox.showerror(title='Menu Item Error',
message='Menu item too long:'
'\nLimit 30 characters.',
parent=self)
self.entryMenu.focus_set()
menuOk = False
return menuOk
def PathOk(self):
"Simple validity check for menu file path"
pathOk = True
path = self.path.get()
path.strip()
if not path: #no path specified
tkMessageBox.showerror(title='File Path Error',
message='No help file path specified.',
parent=self)
self.entryPath.focus_set()
pathOk = False
elif path.startswith(('www.', 'http')):
pass
else:
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
tkMessageBox.showerror(title='File Path Error',
message='Help file path does not exist.',
parent=self)
self.entryPath.focus_set()
pathOk = False
return pathOk
def Ok(self, event=None):
if self.MenuOk() and self.PathOk():
self.result = (self.menu.get().strip(),
self.path.get().strip())
if sys.platform == 'darwin':
path = self.result[1]
if path.startswith(('www', 'file:', 'http:')):
pass
else:
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
self.destroy()
def Cancel(self, event=None):
self.result = None
self.destroy()
if __name__ == '__main__':
#test the dialog
root = Tk()
def run():
keySeq = ''
dlg = GetHelpSourceDialog(root, 'Get Help Source')
print dlg.result
Button(root,text='Dialog', command=run).pack()
root.mainloop()
| gpl-2.0 |
kaedroho/django | tests/template_tests/test_context.py | 40 | 7972 | from unittest import mock
from django.http import HttpRequest
from django.template import (
Context, Engine, RequestContext, Template, Variable, VariableDoesNotExist,
)
from django.template.context import RenderContext
from django.test import RequestFactory, SimpleTestCase
class ContextTests(SimpleTestCase):
def test_context(self):
c = Context({"a": 1, "b": "xyzzy"})
self.assertEqual(c["a"], 1)
self.assertEqual(c.push(), {})
c["a"] = 2
self.assertEqual(c["a"], 2)
self.assertEqual(c.get("a"), 2)
self.assertEqual(c.pop(), {"a": 2})
self.assertEqual(c["a"], 1)
self.assertEqual(c.get("foo", 42), 42)
self.assertEqual(c, mock.ANY)
def test_push_context_manager(self):
c = Context({"a": 1})
with c.push():
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.push(a=3):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager(self):
c = Context({"a": 1})
with c.update({}):
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.update({'a': 3}):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_push_context_manager_with_context_object(self):
c = Context({'a': 1})
with c.push(Context({'a': 3})):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager_with_context_object(self):
c = Context({'a': 1})
with c.update(Context({'a': 3})):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_push_proper_layering(self):
c = Context({'a': 1})
c.push(Context({'b': 2}))
c.push(Context({'c': 3, 'd': {'z': '26'}}))
self.assertEqual(
c.dicts,
[
{'False': False, 'None': None, 'True': True},
{'a': 1},
{'b': 2},
{'c': 3, 'd': {'z': '26'}},
]
)
def test_update_proper_layering(self):
c = Context({'a': 1})
c.update(Context({'b': 2}))
c.update(Context({'c': 3, 'd': {'z': '26'}}))
self.assertEqual(
c.dicts,
[
{'False': False, 'None': None, 'True': True},
{'a': 1},
{'b': 2},
{'c': 3, 'd': {'z': '26'}},
]
)
def test_setdefault(self):
c = Context()
x = c.setdefault('x', 42)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
x = c.setdefault('x', 100)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
def test_resolve_on_context_method(self):
"""
#17778 -- Variable shouldn't resolve RequestContext methods
"""
empty_context = Context()
with self.assertRaises(VariableDoesNotExist):
Variable('no_such_variable').resolve(empty_context)
with self.assertRaises(VariableDoesNotExist):
Variable('new').resolve(empty_context)
self.assertEqual(
Variable('new').resolve(Context({'new': 'foo'})),
'foo',
)
def test_render_context(self):
test_context = RenderContext({'fruit': 'papaya'})
# push() limits access to the topmost dict
test_context.push()
test_context['vegetable'] = 'artichoke'
self.assertEqual(list(test_context), ['vegetable'])
self.assertNotIn('fruit', test_context)
with self.assertRaises(KeyError):
test_context['fruit']
self.assertIsNone(test_context.get('fruit'))
def test_flatten_context(self):
a = Context()
a.update({'a': 2})
a.update({'b': 4})
a.update({'c': 8})
self.assertEqual(a.flatten(), {
'False': False, 'None': None, 'True': True,
'a': 2, 'b': 4, 'c': 8
})
def test_flatten_context_with_context(self):
"""
Context.push() with a Context argument should work.
"""
a = Context({'a': 2})
a.push(Context({'z': '8'}))
self.assertEqual(a.flatten(), {
'False': False,
'None': None,
'True': True,
'a': 2,
'z': '8',
})
def test_context_comparable(self):
"""
#21765 -- equality comparison should work
"""
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
self.assertEqual(Context(test_data), Context(test_data))
a = Context()
b = Context()
self.assertEqual(a, b)
# update only a
a.update({'a': 1})
self.assertNotEqual(a, b)
# update both to check regression
a.update({'c': 3})
b.update({'c': 3})
self.assertNotEqual(a, b)
# make contexts equals again
b.update({'a': 1})
self.assertEqual(a, b)
def test_copy_request_context_twice(self):
"""
#24273 -- Copy twice shouldn't raise an exception
"""
RequestContext(HttpRequest()).new().new()
def test_set_upward(self):
c = Context({'a': 1})
c.set_upward('a', 2)
self.assertEqual(c.get('a'), 2)
def test_set_upward_empty_context(self):
empty_context = Context()
empty_context.set_upward('a', 1)
self.assertEqual(empty_context.get('a'), 1)
def test_set_upward_with_push(self):
"""
The highest context which has the given key is used.
"""
c = Context({'a': 1})
c.push({'a': 2})
c.set_upward('a', 3)
self.assertEqual(c.get('a'), 3)
c.pop()
self.assertEqual(c.get('a'), 1)
def test_set_upward_with_push_no_match(self):
"""
The highest context is used if the given key isn't found.
"""
c = Context({'b': 1})
c.push({'b': 2})
c.set_upward('a', 2)
self.assertEqual(len(c.dicts), 3)
self.assertEqual(c.dicts[-1]['a'], 2)
class RequestContextTests(SimpleTestCase):
request_factory = RequestFactory()
def test_include_only(self):
"""
#15721 -- ``{% include %}`` and ``RequestContext`` should work
together.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ var|default:"none" }}',
}),
])
request = self.request_factory.get('/')
ctx = RequestContext(request, {'var': 'parent'})
self.assertEqual(engine.from_string('{% include "child" %}').render(ctx), 'parent')
self.assertEqual(engine.from_string('{% include "child" only %}').render(ctx), 'none')
def test_stack_size(self):
"""Optimized RequestContext construction (#7116)."""
request = self.request_factory.get('/')
ctx = RequestContext(request, {})
# The stack contains 4 items:
# [builtins, supplied context, context processor, empty dict]
self.assertEqual(len(ctx.dicts), 4)
def test_context_comparable(self):
# Create an engine without any context processors.
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
# test comparing RequestContext to prevent problems if somebody
# adds __eq__ in the future
request = self.request_factory.get('/')
self.assertEqual(
RequestContext(request, dict_=test_data),
RequestContext(request, dict_=test_data),
)
def test_modify_context_and_render(self):
template = Template('{{ foo }}')
request = self.request_factory.get('/')
context = RequestContext(request, {})
context['foo'] = 'foo'
self.assertEqual(template.render(context), 'foo')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.