repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ncliam/serverpos | openerp/addons/hw_escpos/escpos/constants.py | 278 | 7471 | # -*- coding: utf-8 -*-
""" ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# RT Status commands
DLE_EOT_PRINTER = '\x10\x04\x01' # Transmit printer status
DLE_EOT_OFFLINE = '\x10\x04\x02'
DLE_EOT_ERROR = '\x10\x04\x03'
DLE_EOT_PAPER = '\x10\x04\x04'
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_DOUBLE = '\x1b\x21\x30' # Double height & Width
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_COLOR_BLACK = '\x1b\x72\x00' # Default Color
TXT_COLOR_RED = '\x1b\x72\x01' # Alternative Color ( Usually Red )
# Text Encoding
TXT_ENC_PC437 = '\x1b\x74\x00' # PC437 USA
TXT_ENC_KATAKANA= '\x1b\x74\x01' # KATAKANA (JAPAN)
TXT_ENC_PC850 = '\x1b\x74\x02' # PC850 Multilingual
TXT_ENC_PC860 = '\x1b\x74\x03' # PC860 Portuguese
TXT_ENC_PC863 = '\x1b\x74\x04' # PC863 Canadian-French
TXT_ENC_PC865 = '\x1b\x74\x05' # PC865 Nordic
TXT_ENC_KANJI6 = '\x1b\x74\x06' # One-pass Kanji, Hiragana
TXT_ENC_KANJI7 = '\x1b\x74\x07' # One-pass Kanji
TXT_ENC_KANJI8 = '\x1b\x74\x08' # One-pass Kanji
TXT_ENC_PC851 = '\x1b\x74\x0b' # PC851 Greek
TXT_ENC_PC853 = '\x1b\x74\x0c' # PC853 Turkish
TXT_ENC_PC857 = '\x1b\x74\x0d' # PC857 Turkish
TXT_ENC_PC737 = '\x1b\x74\x0e' # PC737 Greek
TXT_ENC_8859_7 = '\x1b\x74\x0f' # ISO8859-7 Greek
TXT_ENC_WPC1252 = '\x1b\x74\x10' # WPC1252
TXT_ENC_PC866 = '\x1b\x74\x11' # PC866 Cyrillic #2
TXT_ENC_PC852 = '\x1b\x74\x12' # PC852 Latin2
TXT_ENC_PC858 = '\x1b\x74\x13' # PC858 Euro
TXT_ENC_KU42 = '\x1b\x74\x14' # KU42 Thai
TXT_ENC_TIS11 = '\x1b\x74\x15' # TIS11 Thai
TXT_ENC_TIS18 = '\x1b\x74\x1a' # TIS18 Thai
TXT_ENC_TCVN3 = '\x1b\x74\x1e' # TCVN3 Vietnamese
TXT_ENC_TCVN3B = '\x1b\x74\x1f' # TCVN3 Vietnamese
TXT_ENC_PC720 = '\x1b\x74\x20' # PC720 Arabic
TXT_ENC_WPC775 = '\x1b\x74\x21' # WPC775 Baltic Rim
TXT_ENC_PC855 = '\x1b\x74\x22' # PC855 Cyrillic
TXT_ENC_PC861 = '\x1b\x74\x23' # PC861 Icelandic
TXT_ENC_PC862 = '\x1b\x74\x24' # PC862 Hebrew
TXT_ENC_PC864 = '\x1b\x74\x25' # PC864 Arabic
TXT_ENC_PC869 = '\x1b\x74\x26' # PC869 Greek
TXT_ENC_PC936 = '\x1C\x21\x00' # PC936 GBK(Guobiao Kuozhan)
TXT_ENC_8859_2 = '\x1b\x74\x27' # ISO8859-2 Latin2
TXT_ENC_8859_9 = '\x1b\x74\x28' # ISO8859-2 Latin9
TXT_ENC_PC1098 = '\x1b\x74\x29' # PC1098 Farsi
TXT_ENC_PC1118 = '\x1b\x74\x2a' # PC1118 Lithuanian
TXT_ENC_PC1119 = '\x1b\x74\x2b' # PC1119 Lithuanian
TXT_ENC_PC1125 = '\x1b\x74\x2c' # PC1125 Ukrainian
TXT_ENC_WPC1250 = '\x1b\x74\x2d' # WPC1250 Latin2
TXT_ENC_WPC1251 = '\x1b\x74\x2e' # WPC1251 Cyrillic
TXT_ENC_WPC1253 = '\x1b\x74\x2f' # WPC1253 Greek
TXT_ENC_WPC1254 = '\x1b\x74\x30' # WPC1254 Turkish
TXT_ENC_WPC1255 = '\x1b\x74\x31' # WPC1255 Hebrew
TXT_ENC_WPC1256 = '\x1b\x74\x32' # WPC1256 Arabic
TXT_ENC_WPC1257 = '\x1b\x74\x33' # WPC1257 Baltic Rim
TXT_ENC_WPC1258 = '\x1b\x74\x34' # WPC1258 Vietnamese
TXT_ENC_KZ1048 = '\x1b\x74\x35' # KZ-1048 Kazakhstan
TXT_ENC_KATAKANA_MAP = {
# Maps UTF-8 Katakana symbols to KATAKANA Page Codes
# Half-Width Katakanas
'\xef\xbd\xa1':'\xa1', # 。
'\xef\xbd\xa2':'\xa2', # 「
'\xef\xbd\xa3':'\xa3', # 」
'\xef\xbd\xa4':'\xa4', # 、
'\xef\xbd\xa5':'\xa5', # ・
'\xef\xbd\xa6':'\xa6', # ヲ
'\xef\xbd\xa7':'\xa7', # ァ
'\xef\xbd\xa8':'\xa8', # ィ
'\xef\xbd\xa9':'\xa9', # ゥ
'\xef\xbd\xaa':'\xaa', # ェ
'\xef\xbd\xab':'\xab', # ォ
'\xef\xbd\xac':'\xac', # ャ
'\xef\xbd\xad':'\xad', # ュ
'\xef\xbd\xae':'\xae', # ョ
'\xef\xbd\xaf':'\xaf', # ッ
'\xef\xbd\xb0':'\xb0', # ー
'\xef\xbd\xb1':'\xb1', # ア
'\xef\xbd\xb2':'\xb2', # イ
'\xef\xbd\xb3':'\xb3', # ウ
'\xef\xbd\xb4':'\xb4', # エ
'\xef\xbd\xb5':'\xb5', # オ
'\xef\xbd\xb6':'\xb6', # カ
'\xef\xbd\xb7':'\xb7', # キ
'\xef\xbd\xb8':'\xb8', # ク
'\xef\xbd\xb9':'\xb9', # ケ
'\xef\xbd\xba':'\xba', # コ
'\xef\xbd\xbb':'\xbb', # サ
'\xef\xbd\xbc':'\xbc', # シ
'\xef\xbd\xbd':'\xbd', # ス
'\xef\xbd\xbe':'\xbe', # セ
'\xef\xbd\xbf':'\xbf', # ソ
'\xef\xbe\x80':'\xc0', # タ
'\xef\xbe\x81':'\xc1', # チ
'\xef\xbe\x82':'\xc2', # ツ
'\xef\xbe\x83':'\xc3', # テ
'\xef\xbe\x84':'\xc4', # ト
'\xef\xbe\x85':'\xc5', # ナ
'\xef\xbe\x86':'\xc6', # ニ
'\xef\xbe\x87':'\xc7', # ヌ
'\xef\xbe\x88':'\xc8', # ネ
'\xef\xbe\x89':'\xc9', # ノ
'\xef\xbe\x8a':'\xca', # ハ
'\xef\xbe\x8b':'\xcb', # ヒ
'\xef\xbe\x8c':'\xcc', # フ
'\xef\xbe\x8d':'\xcd', # ヘ
'\xef\xbe\x8e':'\xce', # ホ
'\xef\xbe\x8f':'\xcf', # マ
'\xef\xbe\x90':'\xd0', # ミ
'\xef\xbe\x91':'\xd1', # ム
'\xef\xbe\x92':'\xd2', # メ
'\xef\xbe\x93':'\xd3', # モ
'\xef\xbe\x94':'\xd4', # ヤ
'\xef\xbe\x95':'\xd5', # ユ
'\xef\xbe\x96':'\xd6', # ヨ
'\xef\xbe\x97':'\xd7', # ラ
'\xef\xbe\x98':'\xd8', # リ
'\xef\xbe\x99':'\xd9', # ル
'\xef\xbe\x9a':'\xda', # レ
'\xef\xbe\x9b':'\xdb', # ロ
'\xef\xbe\x9c':'\xdc', # ワ
'\xef\xbe\x9d':'\xdd', # ン
'\xef\xbe\x9e':'\xde', # ゙
'\xef\xbe\x9f':'\xdf', # ゚
}
# Barcod format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
| agpl-3.0 |
pongem/python-bot-project | appengine/standard/botapp/lib/django/db/migrations/operations/base.py | 127 | 4888 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
# Should this operation be considered safe to elide and optimize across?
elidable = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def reduce(self, operation, in_between, app_label=None):
"""
Return either a list of operations the actual operation should be
replaced with or a boolean that indicates whether or not the specified
operation can be optimized across.
"""
if self.elidable:
return [operation]
elif operation.elidable:
return [self]
return False
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
| apache-2.0 |
Intel-Corporation/tensorflow | tensorflow/python/data/experimental/kernel_tests/matching_files_test.py | 14 | 5065 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class MatchingFilesDatasetTest(test_base.DatasetTestBase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(os.path.join(self.tmp_dir, filename), 'a').close()
def testNonExistingDirectory(self):
"""Test the MatchingFiles dataset with a non-existing directory."""
self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir')
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
def testEmptyDirectory(self):
"""Test the MatchingFiles dataset with an empty directory."""
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
def testSimpleDirectory(self):
"""Test the MatchingFiles dataset with a simple directory."""
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
def testFileSuffixes(self):
"""Test the MatchingFiles dataset using the suffixes of filename."""
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
def testFileMiddles(self):
"""Test the MatchingFiles dataset using the middles of filename."""
filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, 'b*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:3]
],
assert_items_equal=True)
def testNestedDirectories(self):
"""Test the MatchingFiles dataset with nested directories."""
filenames = []
width = 8
depth = 4
for i in range(width):
for j in range(depth):
new_base = os.path.join(self.tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
filenames.append(filename)
open(filename, 'w').close()
patterns = [
os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]),
suffix) for suffix in ['*.txt', '*.log']
]
dataset = matching_files.MatchingFilesDataset(patterns)
next_element = self.getNext(dataset)
expected_filenames = [
compat.as_bytes(filename)
for filename in filenames
if filename.endswith('.txt') or filename.endswith('.log')
]
actual_filenames = []
while True:
try:
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
except errors.OutOfRangeError:
break
self.assertItemsEqual(expected_filenames, actual_filenames)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Sabayon/anaconda | pyanaconda/ui/gui/spokes/__init__.py | 1 | 4575 | # Base classes for Spokes
#
# Copyright (C) 2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
# Martin Sivak <msivak@redhat.com>
from pyanaconda.ui import common
from pyanaconda.ui.common import collect
from pyanaconda.ui.gui import GUIObject
import os.path
__all__ = ["StandaloneSpoke", "NormalSpoke", "PersonalizationSpoke",
"collect_spokes"]
class Spoke(GUIObject):
def __init__(self, data):
GUIObject.__init__(self, data)
def apply(self):
"""Apply the selections made on this Spoke to the object's preset
data object. This method must be provided by every subclass.
"""
raise NotImplementedError
@property
def completed(self):
"""Has this spoke been visited and completed? If not, a special warning
icon will be shown on the Hub beside the spoke, and a highlighted
message will be shown at the bottom of the Hub. Installation will not
be allowed to proceed until all spokes are complete.
"""
return False
def execute(self):
"""Cause the data object to take effect on the target system. This will
usually be as simple as calling one or more of the execute methods on
the data object. This method does not need to be provided by all
subclasses.
This method will be called in two different places: (1) Immediately
after initialize on kickstart installs. (2) Immediately after apply
in all cases.
"""
pass
class StandaloneSpoke(Spoke, common.StandaloneSpoke):
def __init__(self, data, storage, payload, instclass):
Spoke.__init__(self, data)
common.StandaloneSpoke.__init__(self, data, storage, payload, instclass)
def _on_continue_clicked(self, cb):
self.apply()
cb()
def register_event_cb(self, event, cb):
if event == "continue":
self.window.connect("continue-clicked", lambda *args: self._on_continue_clicked(cb))
elif event == "quit":
self.window.connect("quit-clicked", lambda *args: cb())
class NormalSpoke(Spoke, common.NormalSpoke):
def __init__(self, data, storage, payload, instclass):
Spoke.__init__(self, data)
common.NormalSpoke.__init__(self, data, storage, payload, instclass)
def on_back_clicked(self, window):
from gi.repository import Gtk
# Look for failed checks
failed_check = next(self.failed_checks, None)
if failed_check:
# Set the focus to the first failed check and stay in the spoke
failed_check.editable.grab_focus()
return
self.window.hide()
Gtk.main_quit()
class PersonalizationSpoke(Spoke, common.PersonalizationSpoke):
def __init__(self, data, storage, payload, instclass):
Spoke.__init__(self, data)
common.PersonalizationSpoke.__init__(self, data, storage, payload, instclass)
def collect_spokes(mask_paths, category):
"""Return a list of all spoke subclasses that should appear for a given
category. Look for them in files imported as module_path % basename(f)
:param mask_paths: list of mask, path tuples to search for classes
:type mask_paths: list of (mask, path)
:return: list of Spoke classes belonging to category
:rtype: list of Spoke classes
"""
spokes = []
for mask, path in mask_paths:
spokes.extend(collect(mask, path, lambda obj: hasattr(obj, "category") and obj.category != None and obj.category.__name__ == category))
return spokes
| gpl-2.0 |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/operations.py | 98 | 12894 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from decimal import Decimal
from django.db.backends.oracle.base import DatabaseOperations
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.util import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.utils import six
class SDOOperation(SpatialFunction):
"Base class for SDO* Oracle operations."
sql_template = "%(function)s(%(geo_col)s, %(geometry)s) %(operator)s '%(result)s'"
def __init__(self, func, **kwargs):
kwargs.setdefault('operator', '=')
kwargs.setdefault('result', 'TRUE')
super(SDOOperation, self).__init__(func, **kwargs)
class SDODistance(SpatialFunction):
"Class for Distance queries."
sql_template = ('%(function)s(%(geo_col)s, %(geometry)s, %(tolerance)s) '
'%(operator)s %(result)s')
dist_func = 'SDO_GEOM.SDO_DISTANCE'
def __init__(self, op, tolerance=0.05):
super(SDODistance, self).__init__(self.dist_func,
tolerance=tolerance,
operator=op, result='%s')
class SDODWithin(SpatialFunction):
dwithin_func = 'SDO_WITHIN_DISTANCE'
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, %%s) = 'TRUE'"
def __init__(self):
super(SDODWithin, self).__init__(self.dwithin_func)
class SDOGeomRelate(SpatialFunction):
"Class for using SDO_GEOM.RELATE."
relate_func = 'SDO_GEOM.RELATE'
sql_template = ("%(function)s(%(geo_col)s, '%(mask)s', %(geometry)s, "
"%(tolerance)s) %(operator)s '%(mask)s'")
def __init__(self, mask, tolerance=0.05):
# SDO_GEOM.RELATE(...) has a peculiar argument order: column, mask, geom, tolerance.
# Moreover, the runction result is the mask (e.g., 'DISJOINT' instead of 'TRUE').
super(SDOGeomRelate, self).__init__(self.relate_func, operator='=',
mask=mask, tolerance=tolerance)
class SDORelate(SpatialFunction):
"Class for using SDO_RELATE."
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, 'mask=%(mask)s') = 'TRUE'"
relate_func = 'SDO_RELATE'
def __init__(self, mask):
if not self.mask_regex.match(mask):
raise ValueError('Invalid %s mask: "%s"' % (self.relate_func, mask))
super(SDORelate, self).__init__(self.relate_func, mask=mask)
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
class OracleOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = "django.contrib.gis.db.backends.oracle.compiler"
name = 'oracle'
oracle = True
valid_aggregates = dict([(a, None) for a in ('Union', 'Extent')])
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml= 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent= 'SDO_AGGR_MBR'
intersection= 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
distance_functions = {
'distance_gt' : (SDODistance('>'), dtypes),
'distance_gte' : (SDODistance('>='), dtypes),
'distance_lt' : (SDODistance('<'), dtypes),
'distance_lte' : (SDODistance('<='), dtypes),
'dwithin' : (SDODWithin(), dtypes),
}
geometry_functions = {
'contains' : SDOOperation('SDO_CONTAINS'),
'coveredby' : SDOOperation('SDO_COVEREDBY'),
'covers' : SDOOperation('SDO_COVERS'),
'disjoint' : SDOGeomRelate('DISJOINT'),
'intersects' : SDOOperation('SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals' : SDOOperation('SDO_EQUAL'),
'exact' : SDOOperation('SDO_EQUAL'),
'overlaps' : SDOOperation('SDO_OVERLAPS'),
'same_as' : SDOOperation('SDO_EQUAL'),
'relate' : (SDORelate, six.string_types), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches' : SDOOperation('SDO_TOUCH'),
'within' : SDOOperation('SDO_INSIDE'),
}
geometry_functions.update(distance_functions)
gis_terms = ['isnull']
gis_terms += list(geometry_functions)
gis_terms = dict([(term, None) for term in gis_terms])
truncate_params = {'relate' : None}
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, clob, geo_field):
if clob:
return Geometry(clob.read(), geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
alias, col, db_type = lvalue
# Getting the quoted table name as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
# See if a Oracle Geometry function matches the lookup type next
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# 'dwithin' lookup types.
if isinstance(lookup_info, tuple):
# First element of tuple is lookup type, second element is the type
# of the expected argument (e.g., str, float)
sdo_op, arg_type = lookup_info
geom = value[0]
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Lookup info is a SDOOperation instance, whose `as_sql` method returns
# the SQL necessary for the geometry function call. For example:
# SDO_CONTAINS("geoapp_country"."poly", SDO_GEOMTRY('POINT(5 23)', 4326)) = 'TRUE'
return lookup_info.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__.lower()
if agg_name == 'union' : agg_name += 'agg'
if agg.is_extent:
sql_template = '%(function)s(%(field)s)'
else:
sql_template = '%(function)s(SDOAGGRTYPE(%(field)s,%(tolerance)s))'
sql_function = getattr(self, agg_name)
return self.select % sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
return SpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder,param
in six.moves.zip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| gpl-2.0 |
hall-lab/svtools | scripts/filter_del.py | 1 | 4831 | #!/usr/bin/env python
from __future__ import division
import argparse
import sys
from svtools.vcf.file import Vcf
from svtools.vcf.variant import Variant
import svtools.utils as su
class VCFReader(object):
def __init__(self, stream):
self.vcf_obj = Vcf()
self.stream = stream
header = list()
for line in stream:
if line[0] != '#':
raise RuntimeError('Error parsing VCF header. Line is not a header line. {}'.format(line))
header.append(line)
if line.startswith('#CHROM\t'):
# end of header
break
self.vcf_obj.add_header(header)
def __iter__(self):
for line in self.stream:
yield Variant(line.rstrip().split('\t'), self.vcf_obj)
def load_deletion_sizes(stream):
minimum_del_size = dict()
for line in stream:
if line.startswith('Sample\t'):
continue
sample, size, overlap = line.rstrip().split('\t')
if sample not in minimum_del_size:
minimum_del_size[sample] = abs(int(size))
else:
raise RuntimeError('Size for {0} already set. Does your file of sizes include multiple lines with the same sample name?'.format(sample))
return minimum_del_size, max(minimum_del_size.values())
def set_missing(input_stream, deletion_sizes, output_stream, max_del_size, sr_cutoff):
valid_types = set(('DEL', 'MEI'))
for variant in input_stream:
if variant.get_info('SVTYPE') in valid_types:
# NOTE this will raise an exception if SVLEN is null
length = abs(int(variant.get_info('SVLEN')))
if length < max_del_size:
split_read_support = 0
total_depth = 0
for s in variant.sample_list:
g = variant.genotype(s)
if g.get_format('GT') not in ('./.', '0/0'):
split_read_support += int(g.get_format('AS'))
total_depth += int(g.get_format('DP'))
if total_depth > 0 and (split_read_support / total_depth) < sr_cutoff:
# Only set to null if PE support is our only source of
# information. Not counting soft-clips here.
# This may be a bad idea as even with SR support the
# lack of power to detect PE reads could skew us away from
# the correct genotype.
# A better method might be to regenotype using only
# split-read support if the SV is too small.
logged = False
for sample in variant.sample_list:
if sample in deletion_sizes and length < deletion_sizes[sample]:
gt = variant.genotype(sample)
gt.set_format('GT', './.')
if not logged:
sys.stderr.write('Applying small deletion filter to {0}\n'.format(variant.var_id))
logged=True
output_stream.write(variant.get_var_string())
output_stream.write('\n')
def description():
return 'set genotypes of deletions smaller than a per-sample cutoff to missing if no splitread support in the sample'
def add_arguments_to_parser(parser):
parser.add_argument("-i", "--input", required=True, dest="input", metavar='<VCF>', help="VCF file containing variants to be output")
parser.add_argument("-t", "--thresholds", required=True, dest="threshold_file", metavar='<TXT>', type=argparse.FileType('r'), help="Tab-separated file of sample name and minimum deletion size used to determine if site should be output")
parser.add_argument("-s", "--split-read-fraction", required=True, dest="sr_cutoff", metavar='<FLOAT>', type=float, help="Minimum fraction of split read support for the site to be excluded from filtering.")
parser.add_argument('-o', '--output', type=argparse.FileType('w'), metavar='<VCF>', default=sys.stdout, help='output VCF to write (default: stdout)')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
deletion_size_map, max_size = load_deletion_sizes(args.threshold_file)
with su.InputStream(args.input) as stream:
variant_stream = VCFReader(stream)
args.output.write(variant_stream.vcf_obj.get_header())
args.output.write('\n')
return set_missing(variant_stream, deletion_size_map, args.output, max_size, args.sr_cutoff)
if __name__ == '__main__':
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
| mit |
kmoocdev2/edx-platform | pavelib/utils/passthrough_opts.py | 24 | 4649 | """
Provides:
PassthroughOptionParser:
A subclass of :class:`optparse.OptionParser` that captures unknown options
into its ``passthrough_options`` attribute.
PassthroughTask:
A subclass of :class:`paver.tasks.Task` that supplies unknown options
as the `passthrough_options` argument to the decorated function
"""
from optparse import BadOptionError, OptionParser
import paver.tasks
from mock import patch
class PassthroughOptionParser(OptionParser):
"""
An :class:`optparse.OptionParser` which captures any unknown options into
the ``passthrough_options`` attribute. Handles both "--long-options" and
"-s" short options.
"""
def __init__(self, *args, **kwargs):
self.passthrough_options = []
# N.B. OptionParser is an old-style class, which is why
# this isn't using super()
OptionParser.__init__(self, *args, **kwargs)
def _process_long_opt(self, rargs, values):
# This is a copy of the OptionParser._process_long_opt method,
# modified to capture arguments that aren't understood
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
try:
opt = self._match_long_opt(opt)
except BadOptionError:
self.passthrough_options.append(arg)
if had_explicit_value:
rargs.pop(0)
return
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires an argument" % opt)
else:
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error("%s option does not take a value" % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
passthrough_opts = []
for char in arg[1:]:
opt = "-" + char
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
passthrough_opts.append(char)
continue
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires an argument" % opt)
else:
self.error("%s option requires %d arguments"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
if passthrough_opts:
self.passthrough_options.append('-{}'.format("".join(passthrough_opts)))
class PassthroughTask(paver.tasks.Task):
"""
A :class:`paver.tasks.Task` subclass that supplies any options that it doesn't
understand to the task function as the ``passthrough_options`` argument.
"""
@property
def parser(self):
with patch.object(paver.tasks.optparse, 'OptionParser', PassthroughOptionParser):
return super(PassthroughTask, self).parser
def __call__(self, *args, **kwargs):
paver.tasks.environment.passthrough_options = self._parser.passthrough_options # pylint: disable=no-member
try:
return super(PassthroughTask, self).__call__(*args, **kwargs)
finally:
del paver.tasks.environment.passthrough_options
| agpl-3.0 |
cimarron-pistoncloud/molecule | test/unit/command/test_destroy.py | 2 | 2936 | # Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import subprocess
import pytest
from molecule.command import destroy
def test_execute_deletes_instances(
patched_driver_destroy, patched_print_info, patched_remove_templates,
patched_remove_inventory, molecule_instance):
d = destroy.Destroy({}, {}, molecule_instance)
result = d.execute()
msg = 'Destroying instances...'
patched_print_info.assert_called_once_with(msg)
patched_driver_destroy.assert_called_once_with()
assert not molecule_instance.state.created
assert not molecule_instance.state.converged
(None, None) == result
patched_remove_templates.assert_called_once_with()
patched_remove_inventory.assert_called_once_with()
def test_execute_raises_on_exit(patched_driver_destroy, patched_print_info,
patched_print_error, patched_remove_templates,
patched_remove_inventory, molecule_instance):
patched_driver_destroy.side_effect = subprocess.CalledProcessError(1, None,
None)
d = destroy.Destroy({}, {}, molecule_instance)
with pytest.raises(SystemExit):
d.execute()
msg = "Command 'None' returned non-zero exit status 1"
patched_print_error.assert_called_with(msg)
assert not patched_remove_templates.called
assert not patched_remove_inventory.called
def test_execute_does_not_raise_on_exit(patched_driver_destroy,
patched_print_info, molecule_instance):
patched_driver_destroy.side_effect = subprocess.CalledProcessError(1, None,
None)
d = destroy.Destroy({}, {}, molecule_instance)
result = d.execute(exit=False)
assert (1, '') == result
| mit |
NCI-Cloud/horizon | openstack_dashboard/local/dashboards/admin_nci/pupha/views.py | 1 | 8252 | # openstack_dashboard.local.dashboards.admin_nci.pupha.views
#
# Copyright (c) 2016, NCI, Australian National University.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from openstack_dashboard import api
from openstack_dashboard.openstack.common import log as logging
from horizon import tabs
from horizon import messages
from .tabs import TabGroup, DictObject
from . import constants
from .constants import short_name # separate import because it feels weird having short_name live in constants, so that may change..
# surely there is a cleaner way to do this...?
from novaclient.exceptions import NotFound as NotFoundNova
from keystoneclient.openstack.common.apiclient.exceptions import NotFound as NotFoundKeystone
from django.conf import settings
LOG = logging.getLogger(__name__)
def get_overcommit_ratios():
"""Return {cpu,ram,disk}_allocation_ratio values from django settings.
Return 1.0 for any missing allocation ratios.
"""
setting = 'NCI_NOVA_COMMIT_RATIOS'
resources = ['cpu', 'ram', 'disk'] # hard-coded strings to match names in nova.conf
ratios = getattr(settings, setting, {})
for r in resources:
if r not in ratios:
LOG.debug('Missing {} overcommit ratio in {}; assuming value of 1.'.format(r, setting))
ratios[r] = 1.
return ratios
class HostAggregate(object):
"""
Has attributes:
aggregate -- object from api.nova.aggregate_details_list
overcommit -- dict with keys matching "{}_allocation_ratio" in nova.conf
(see comment in get_overcommit_ratios)
hypervisors -- list of objects with attributes including
instances -- list of objects with attributes including
project
flavor
"""
def __init__(self, aggregate, hypervisors=None):
self.aggregate = aggregate
self.hypervisors = [] if hypervisors == None else hypervisors
class IndexView(tabs.TabbedTableView):
tab_group_class = TabGroup
template_name = constants.TEMPLATE_NAME
page_title = constants.TITLE
def get_tabs(self, request, **kwargs):
"""
Pass host aggregate data to the TabGroup on construction, as an
attribute "host_aggregates" in kwargs, which is a list of HostAggregate
objects.
This is useful because it avoids re-fetching the same data for each Tab
in the TabGroup (which would take some time -- there's no caching).
This is a slightly hacky solution, because if the way that TabView
instantiates its TabGroup changes such that it's no longer done in
get_tabs, this code will need to be updated accordingly. This seemed
like this least hacky way of doing it, though.
(TabView.get_tabs performs the initialisation of the TabGroup.)
"""
aggregates = api.nova.aggregate_details_list(request)
hypervisors = api.nova.hypervisor_list(request)
instances, _ = api.nova.server_list(request, all_tenants=True)
projects, _ = api.keystone.tenant_list(request)
flavors = api.nova.flavor_list(request)
# define these dicts to make it easier to look up objects
flavor_d = {f.id : f for f in flavors}
project_d = {p.id : p for p in projects}
hypervisor_d = {short_name(getattr(h, h.NAME_ATTR)) : h for h in hypervisors}
# (only) this list ends up being shared with the TabGroup
host_aggregates = [HostAggregate(aggregate=a) for a in aggregates]
# if there are no aggregates, invent a HostAggregate to hold everything
# (this is hacky but that's okay because nobody should actually want to
# use this panel if running a cloud with no host aggregates.. this code
# exists just so the dashboard doesn't break in that odd non-use case.)
if not host_aggregates:
host_aggregates = [HostAggregate(aggregate=DictObject(
id = 0,
name = '(none)',
hosts = [h.service['host'] for h in hypervisors],
metadata = {}
))]
# check if any instances are missing necessary data, and if so, skip them
hypervisor_instances = {} # otherwise, add them to this (short_name => [instance])
for i in instances:
# make sure we can tell which hypervisor is running this instance; if not, ignore it
try:
host = short_name(i.host_server)
if host not in hypervisor_d:
messages.error(request, 'Instance {} has unknown host, so was ignored.'.format(i.id))
continue
except AttributeError:
messages.error(request, 'Instance {} is missing host, so was ignored.'.format(i.id))
continue
# api.nova.flavor_list (which wraps novaclient.flavors.list) does not get all flavors,
# so if we have a reference to one that hasn't been retrieved, try looking it up specifically
# (wrap this rather trivially in a try block to make the error less cryptic)
if i.flavor['id'] not in flavor_d:
try:
LOG.debug('Extra lookup for flavor "{}"'.format(i.flavor['id']))
flavor_d[i.flavor['id']] = api.nova.flavor_get(request, i.flavor['id'])
except NotFoundNova:
messages.error(request, 'Instance {} has unknown flavor, so was ignored.'.format(i.id))
continue
# maybe the same thing could happen for projects (haven't actually experienced this one though)
if i.tenant_id not in project_d:
try:
LOG.debug('Extra lookup for project "{}"'.format(i.tenant_id))
project_d[i.tenant_id] = api.keystone.tenant_get(request, i.tenant_id)
except NotFoundKeystone:
messages.error(request, 'Instance {} has unknown project, so was ignored.'.format(i.id))
continue
# expose related objects, so that no further lookups are required
i.flavor = flavor_d[i.flavor['id']]
i.project = project_d[i.tenant_id]
# all the necessary information is present, so populate the dict
if host not in hypervisor_instances:
hypervisor_instances[host] = []
hypervisor_instances[host].append(i)
# assign hypervisors to host aggregates
for h in hypervisors:
h.instances = hypervisor_instances.get(short_name(getattr(h, h.NAME_ATTR)), [])
for ha in host_aggregates:
if h.service['host'] in ha.aggregate.hosts:
ha.hypervisors.append(h)
# get overcommit values and allocated/available resource counts
oc = get_overcommit_ratios()
p = re.compile(r'^(?P<resource>cpu|ram|disk)_allocation_ratio$')
for h in host_aggregates:
h.overcommit = {k:oc[k] for k in oc} # copy default overcommit values
for k in h.aggregate.metadata:
m = p.match(k)
if m:
try:
h.overcommit[m.group('resource')] = float(h.aggregate.metadata[k])
except ValueError:
LOG.debug('Could not parse host aggregate "{key}" metadata value "{value}" as float.'.format(key=k, value=h.aggregate.metadata[k]))
continue
return super(IndexView, self).get_tabs(request, host_aggregates=host_aggregates, **kwargs)
| apache-2.0 |
kalvdans/scipy | scipy/stats/_distn_infrastructure.py | 3 | 119483 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY3
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state, _lazywhere, _lazyselect
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, shape, ndarray,
product, reshape, zeros, floor, logical_and, log, sqrt, exp)
from numpy import (place, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY3:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
else:
instancemethod = types.MethodType
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(k, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(k, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative distribution function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative distribution function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _support_mask(self, x):
return (self.a <= x) & (x <= self.b)
def _open_support_mask(self, x):
return (self.a < x) & (x < self.b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x)
n_bad = sum(cond0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / np.sum(qk, axis=0)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any "
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 1
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
return _expect(lambda x: entr(self.pmf(x, *args)),
self.a, self.b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may also
make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = self.a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = self.b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if len(xk) != len(pk):
raise ValueError("xk and pk need to have the same length.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any"
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 0
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
@np.deprecate(message="moment_gen method is not used anywhere any more "
"and is deprecated in scipy 0.18.")
def moment_gen(self, t):
t = asarray(t)
return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
@property
@np.deprecate(message="F attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def F(self):
return dict(zip(self.xk, self.qvals))
@property
@np.deprecate(message="Finv attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def Finv(self):
decreasing_keys = sorted(self.F.keys(), reverse=True)
return dict((self.F[k], k) for k in decreasing_keys)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
OptimusGitEtna/RestSymf | Python-3.4.2/Lib/test/test_file_eintr.py | 122 | 10331 | # Written to test interrupted system calls interfering with our many buffered
# IO implementations. http://bugs.python.org/issue12268
#
# It was suggested that this code could be merged into test_io and the tests
# made to work using the same method as the existing signal tests in test_io.
# I was unable to get single process tests using alarm or setitimer that way
# to reproduce the EINTR problems. This process based test suite reproduces
# the problems prior to the issue12268 patch reliably on Linux and OSX.
# - gregory.p.smith
import os
import select
import signal
import subprocess
import sys
from test.support import run_unittest
import time
import unittest
# Test import all of the things we're about to try testing up front.
from _io import FileIO
@unittest.skipUnless(os.name == 'posix', 'tests requires a posix system.')
class TestFileIOSignalInterrupt(unittest.TestCase):
def setUp(self):
self._process = None
def tearDown(self):
if self._process and self._process.poll() is None:
try:
self._process.kill()
except OSError:
pass
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code for the reader process.
subclasseses should override this to test different IO objects.
"""
return ('import _io ;'
'infile = _io.FileIO(sys.stdin.fileno(), "rb")')
def fail_with_process_info(self, why, stdout=b'', stderr=b'',
communicate=True):
"""A common way to cleanup and fail with useful debug output.
Kills the process if it is still running, collects remaining output
and fails the test with an error message including the output.
Args:
why: Text to go after "Error from IO process" in the message.
stdout, stderr: standard output and error from the process so
far to include in the error message.
communicate: bool, when True we call communicate() on the process
after killing it to gather additional output.
"""
if self._process.poll() is None:
time.sleep(0.1) # give it time to finish printing the error.
try:
self._process.terminate() # Ensure it dies.
except OSError:
pass
if communicate:
stdout_end, stderr_end = self._process.communicate()
stdout += stdout_end
stderr += stderr_end
self.fail('Error from IO process %s:\nSTDOUT:\n%sSTDERR:\n%s\n' %
(why, stdout.decode(), stderr.decode()))
def _test_reading(self, data_to_write, read_and_verify_code):
"""Generic buffered read method test harness to validate EINTR behavior.
Also validates that Python signal handlers are run during the read.
Args:
data_to_write: String to write to the child process for reading
before sending it a signal, confirming the signal was handled,
writing a final newline and closing the infile pipe.
read_and_verify_code: Single "line" of code to read from a file
object named 'infile' and validate the result. This will be
executed as part of a python subprocess fed data_to_write.
"""
infile_setup_code = self._generate_infile_setup_code()
# Total pipe IO in this function is smaller than the minimum posix OS
# pipe buffer size of 512 bytes. No writer should block.
assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
# Start a subprocess to call our read method while handling a signal.
self._process = subprocess.Popen(
[sys.executable, '-u', '-c',
'import signal, sys ;'
'signal.signal(signal.SIGINT, '
'lambda s, f: sys.stderr.write("$\\n")) ;'
+ infile_setup_code + ' ;' +
'sys.stderr.write("Worm Sign!\\n") ;'
+ read_and_verify_code + ' ;' +
'infile.close()'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the signal handler to be installed.
worm_sign = self._process.stderr.read(len(b'Worm Sign!\n'))
if worm_sign != b'Worm Sign!\n': # See also, Dune by Frank Herbert.
self.fail_with_process_info('while awaiting a sign',
stderr=worm_sign)
self._process.stdin.write(data_to_write)
signals_sent = 0
rlist = []
# We don't know when the read_and_verify_code in our child is actually
# executing within the read system call we want to interrupt. This
# loop waits for a bit before sending the first signal to increase
# the likelihood of that. Implementations without correct EINTR
# and signal handling usually fail this test.
while not rlist:
rlist, _, _ = select.select([self._process.stderr], (), (), 0.05)
self._process.send_signal(signal.SIGINT)
signals_sent += 1
if signals_sent > 200:
self._process.kill()
self.fail('reader process failed to handle our signals.')
# This assumes anything unexpected that writes to stderr will also
# write a newline. That is true of the traceback printing code.
signal_line = self._process.stderr.readline()
if signal_line != b'$\n':
self.fail_with_process_info('while awaiting signal',
stderr=signal_line)
# We append a newline to our input so that a readline call can
# end on its own before the EOF is seen and so that we're testing
# the read call that was interrupted by a signal before the end of
# the data stream has been reached.
stdout, stderr = self._process.communicate(input=b'\n')
if self._process.returncode:
self.fail_with_process_info(
'exited rc=%d' % self._process.returncode,
stdout, stderr, communicate=False)
# PASS!
# String format for the read_and_verify_code used by read methods.
_READING_CODE_TEMPLATE = (
'got = infile.{read_method_name}() ;'
'expected = {expected!r} ;'
'assert got == expected, ('
'"{read_method_name} returned wrong data.\\n"'
'"got data %r\\nexpected %r" % (got, expected))'
)
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected=b'hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=[b'hello\n', b'world!\n']))
def test_readall(self):
"""readall() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readall',
expected=b'hello\nworld!\n'))
# read() is the same thing as readall().
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestBufferedIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a BufferedReader."""
return ('infile = open(sys.stdin.fileno(), "rb") ;'
'import _io ;assert isinstance(infile, _io.BufferedReader)')
def test_readall(self):
"""BufferedReader.read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestTextIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a TextIOWrapper."""
return ('infile = open(sys.stdin.fileno(), "rt", newline=None) ;'
'import _io ;assert isinstance(infile, _io.TextIOWrapper)')
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected='hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\r\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=['hello\n', 'world!\n']))
def test_readall(self):
"""read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected="hello\nworld!\n"))
def test_main():
test_cases = [
tc for tc in globals().values()
if isinstance(tc, type) and issubclass(tc, unittest.TestCase)]
run_unittest(*test_cases)
if __name__ == '__main__':
test_main()
| mit |
ormandj/stalker | stalkerweb/setup.py | 1 | 1247 | #!/usr/bin/env python
""" setuptools for stalkerweb """
from setuptools import setup, find_packages
from stalkerweb import __version__ as version
setup(
name='stalkerweb',
version=version,
author="Florian Hines",
author_email="syn@ronin.io",
description="Simple Monitoring System",
url="http://github.com/pandemicsyn/stalker",
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[
'stalkerutils==2.0.2',
'eventlet==0.17.4',
'flask==0.10.1',
'redis==2.10.3',
'pymongo==3.0.3',
'mmh3==2.3.1',
'flask-rethinkdb==0.2',
'rethinkdb==2.1.0.post2',
'flask-bcrypt==0.7.1',
'flask-wtf==0.12',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/stalker-web',],
data_files=[('share/doc/stalkerweb',
['README.md', 'INSTALL',
'etc/stalker-web.conf',
'etc/init.d/stalker-web',
])]
)
| apache-2.0 |
yephper/django | tests/template_tests/syntax_tests/test_spaceless.py | 1 | 1804 | from django.test import SimpleTestCase
from ..utils import setup
class SpacelessTagTests(SimpleTestCase):
@setup({'spaceless01': "{% spaceless %} <b> <i> text </i> </b> {% endspaceless %}"})
def test_spaceless01(self):
output = self.engine.render_to_string('spaceless01')
self.assertEqual(output, "<b><i> text </i></b>")
@setup({'spaceless02': "{% spaceless %} <b> \n <i> text </i> \n </b> {% endspaceless %}"})
def test_spaceless02(self):
output = self.engine.render_to_string('spaceless02')
self.assertEqual(output, "<b><i> text </i></b>")
@setup({'spaceless03': "{% spaceless %}<b><i>text</i></b>{% endspaceless %}"})
def test_spaceless03(self):
output = self.engine.render_to_string('spaceless03')
self.assertEqual(output, "<b><i>text</i></b>")
@setup({'spaceless04': "{% spaceless %}<b> <i>{{ text }}</i> </b>{% endspaceless %}"})
def test_spaceless04(self):
output = self.engine.render_to_string('spaceless04', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
@setup({'spaceless05': "{% autoescape off %}{% spaceless %}"
"<b> <i>{{ text }}</i> </b>{% endspaceless %}"
"{% endautoescape %}"})
def test_spaceless05(self):
output = self.engine.render_to_string('spaceless05', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
@setup({'spaceless06': "{% spaceless %}<b> <i>{{ text|safe }}</i> </b>{% endspaceless %}"})
def test_spaceless06(self):
output = self.engine.render_to_string('spaceless06', {'text': 'This & that'})
self.assertEqual(output, "<b><i>This & that</i></b>")
| bsd-3-clause |
mxOBS/deb-pkg_trusty_chromium-browser | tools/perf/page_sets/key_silk_cases.py | 9 | 20248 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class KeySilkCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(KeySilkCasesPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json')
self.user_agent_type = 'mobile'
self.archive_data_file = 'data/key_silk_cases.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Page1(KeySilkCasesPage):
""" Why: Infinite scroll. Brings out all of our perf issues. """
def __init__(self, page_set):
super(Page1, self).__init__(
url='http://groupcloned.com/test/plain/list-recycle-transform.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(selector='#scrollable')
interaction.End()
class Page2(KeySilkCasesPage):
""" Why: Brings out layer management bottlenecks. """
def __init__(self, page_set):
super(Page2, self).__init__(
url='http://groupcloned.com/test/plain/list-animation-simple.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(2)
class Page3(KeySilkCasesPage):
"""
Why: Best-known method for fake sticky. Janks sometimes. Interacts badly with
compositor scrolls.
"""
def __init__(self, page_set):
super(Page3, self).__init__(
# pylint: disable=C0301
url='http://groupcloned.com/test/plain/sticky-using-webkit-backface-visibility.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(selector='#container')
interaction.End()
class Page4(KeySilkCasesPage):
"""
Why: Card expansion: only the card should repaint, but in reality lots of
storms happen.
"""
def __init__(self, page_set):
super(Page4, self).__init__(
url='http://jsfiddle.net/3yDKh/15/show/',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(3)
class Page5(KeySilkCasesPage):
"""
Why: Card expansion with animated contents, using will-change on the card
"""
def __init__(self, page_set):
super(Page5, self).__init__(
url='http://jsfiddle.net/jx5De/14/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page6(KeySilkCasesPage):
"""
Why: Card fly-in: It should be fast to animate in a bunch of cards using
margin-top and letting layout do the rest.
"""
def __init__(self, page_set):
super(Page6, self).__init__(
url='http://jsfiddle.net/3yDKh/16/show/',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(3)
class Page7(KeySilkCasesPage):
"""
Why: Image search expands a spacer div when you click an image to accomplish
a zoomin effect. Each image has a layer. Even so, this triggers a lot of
unnecessary repainting.
"""
def __init__(self, page_set):
super(Page7, self).__init__(
url='http://jsfiddle.net/R8DX9/4/show/',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(3)
class Page8(KeySilkCasesPage):
"""
Why: Swipe to dismiss of an element that has a fixed-position child that is
its pseudo-sticky header. Brings out issues with layer creation and
repainting.
"""
def __init__(self, page_set):
super(Page8, self).__init__(
url='http://jsfiddle.net/rF9Gh/7/show/',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(3)
class Page9(KeySilkCasesPage):
"""
Why: Horizontal and vertical expansion of a card that is cheap to layout but
costly to rasterize.
"""
def __init__(self, page_set):
super(Page9, self).__init__(
url='http://jsfiddle.net/TLXLu/3/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page10(KeySilkCasesPage):
"""
Why: Vertical Expansion of a card that is cheap to layout but costly to
rasterize.
"""
def __init__(self, page_set):
super(Page10, self).__init__(
url='http://jsfiddle.net/cKB9D/7/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page11(KeySilkCasesPage):
"""
Why: Parallax effect is common on photo-viewer-like applications, overloading
software rasterization
"""
def __init__(self, page_set):
super(Page11, self).__init__(
url='http://jsfiddle.net/vBQHH/11/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page12(KeySilkCasesPage):
""" Why: Addressing paint storms during coordinated animations. """
def __init__(self, page_set):
super(Page12, self).__init__(
url='http://jsfiddle.net/ugkd4/10/show/',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(5)
class Page13(KeySilkCasesPage):
""" Why: Mask transitions are common mobile use cases. """
def __init__(self, page_set):
super(Page13, self).__init__(
url='http://jsfiddle.net/xLuvC/1/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page14(KeySilkCasesPage):
""" Why: Card expansions with images and text are pretty and common. """
def __init__(self, page_set):
super(Page14, self).__init__(
url='http://jsfiddle.net/bNp2h/3/show/',
page_set=page_set)
self.gpu_raster = True
def RunPageInteractions(self, action_runner):
action_runner.Wait(4)
class Page15(KeySilkCasesPage):
""" Why: Coordinated animations for expanding elements. """
def __init__(self, page_set):
super(Page15, self).__init__(
url='file://key_silk_cases/font_wipe.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
action_runner.Wait(5)
class Page16(KeySilkCasesPage):
def __init__(self, page_set):
super(Page16, self).__init__(
url='file://key_silk_cases/inbox_app.html?swipe_to_dismiss',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
def SwipeToDismiss(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'SwipeAction', is_smooth=True)
action_runner.SwipeElement(
left_start_ratio=0.8, top_start_ratio=0.2,
direction='left', distance=400, speed_in_pixels_per_second=5000,
element_function='document.getElementsByClassName("message")[2]')
interaction.End()
def RunPageInteractions(self, action_runner):
self.SwipeToDismiss(action_runner)
class Page17(KeySilkCasesPage):
def __init__(self, page_set):
super(Page17, self).__init__(
url='file://key_silk_cases/inbox_app.html?stress_hidey_bars',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
self.StressHideyBars(action_runner)
def StressHideyBars(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#messages', direction='down', speed_in_pixels_per_second=200)
interaction.End()
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#messages', direction='up', speed_in_pixels_per_second=200)
interaction.End()
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#messages', direction='down', speed_in_pixels_per_second=200)
interaction.End()
class Page18(KeySilkCasesPage):
def __init__(self, page_set):
super(Page18, self).__init__(
url='file://key_silk_cases/inbox_app.html?toggle_drawer',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
for _ in xrange(6):
self.ToggleDrawer(action_runner)
def ToggleDrawer(self, action_runner):
interaction = action_runner.BeginInteraction(
'Action_TapAction', is_smooth=True)
action_runner.TapElement('#menu-button')
action_runner.Wait(1)
interaction.End()
class Page19(KeySilkCasesPage):
def __init__(self, page_set):
super(Page19, self).__init__(
url='file://key_silk_cases/inbox_app.html?slide_drawer',
page_set=page_set)
def ToggleDrawer(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'TapAction', is_smooth=True)
action_runner.TapElement('#menu-button')
interaction.End()
interaction = action_runner.BeginInteraction('Wait', is_smooth=True)
action_runner.WaitForJavaScriptCondition('''
document.getElementById("nav-drawer").active &&
document.getElementById("nav-drawer").children[0]
.getBoundingClientRect().left == 0''')
interaction.End()
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
self.ToggleDrawer(action_runner)
def RunPageInteractions(self, action_runner):
self.SlideDrawer(action_runner)
def SlideDrawer(self, action_runner):
interaction = action_runner.BeginInteraction(
'Action_SwipeAction', is_smooth=True)
action_runner.SwipeElement(
left_start_ratio=0.8, top_start_ratio=0.2,
direction='left', distance=200,
element_function='document.getElementById("nav-drawer").children[0]')
action_runner.WaitForJavaScriptCondition(
'!document.getElementById("nav-drawer").active')
interaction.End()
class Page20(KeySilkCasesPage):
""" Why: Shadow DOM infinite scrolling. """
def __init__(self, page_set):
super(Page20, self).__init__(
url='file://key_silk_cases/infinite_scrolling.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#container', speed_in_pixels_per_second=5000)
interaction.End()
class GwsExpansionPage(KeySilkCasesPage):
"""Abstract base class for pages that expand Google knowledge panels."""
def NavigateWait(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(3)
def ExpandKnowledgeCard(self, action_runner):
# expand card
interaction = action_runner.BeginInteraction(
'Action_TapAction', is_smooth=True)
action_runner.TapElement(
element_function='document.getElementsByClassName("vk_arc")[0]')
action_runner.Wait(2)
interaction.End()
def ScrollKnowledgeCardToTop(self, action_runner, card_id):
# scroll until the knowledge card is at the top
action_runner.ExecuteJavaScript(
"document.getElementById('%s').scrollIntoView()" % card_id)
def RunPageInteractions(self, action_runner):
self.ExpandKnowledgeCard(action_runner)
class GwsGoogleExpansion(GwsExpansionPage):
""" Why: Animating height of a complex content card is common. """
def __init__(self, page_set):
super(GwsGoogleExpansion, self).__init__(
url='http://www.google.com/#q=google',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
self.NavigateWait(action_runner)
self.ScrollKnowledgeCardToTop(action_runner, 'kno-result')
class GwsBoogieExpansion(GwsExpansionPage):
""" Why: Same case as Google expansion but text-heavy rather than image. """
def __init__(self, page_set):
super(GwsBoogieExpansion, self).__init__(
url='https://www.google.com/search?hl=en&q=define%3Aboogie',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
self.NavigateWait(action_runner)
self.ScrollKnowledgeCardToTop(action_runner, 'rso')
class Page22(KeySilkCasesPage):
def __init__(self, page_set):
super(Page22, self).__init__(
url='http://plus.google.com/app/basic/stream',
page_set=page_set)
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("fHa").length > 0')
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(selector='#mainContent')
interaction.End()
class Page23(KeySilkCasesPage):
"""
Why: Physical simulation demo that does a lot of element.style mutation
triggering JS and recalc slowness
"""
def __init__(self, page_set):
super(Page23, self).__init__(
url='http://jsbin.com/UVIgUTa/38/quiet',
page_set=page_set)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(
distance_expr='window.innerHeight / 2',
direction='down',
use_touch=True)
interaction.End()
interaction = action_runner.BeginInteraction('Wait', is_smooth=True)
action_runner.Wait(1)
interaction.End()
class Page24(KeySilkCasesPage):
"""
Why: Google News: this iOS version is slower than accelerated scrolling
"""
def __init__(self, page_set):
super(Page24, self).__init__(
url='http://mobile-news.sandbox.google.com/news/pt0?scroll',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById(":h") != null')
action_runner.Wait(1)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
element_function='document.getElementById(":5")',
distance=2500,
use_touch=True)
interaction.End()
class Page25(KeySilkCasesPage):
def __init__(self, page_set):
super(Page25, self).__init__(
url='http://mobile-news.sandbox.google.com/news/pt0?swipe',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementById(":h") != null')
action_runner.Wait(1)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'SwipeAction', is_smooth=True)
action_runner.SwipeElement(
direction='left', distance=100,
element_function='document.getElementById(":f")')
interaction.End()
interaction = action_runner.BeginInteraction('Wait', is_smooth=True)
action_runner.Wait(1)
interaction.End()
class Page26(KeySilkCasesPage):
""" Why: famo.us twitter demo """
def __init__(self, page_set):
super(Page26, self).__init__(
url='http://s.codepen.io/befamous/fullpage/pFsqb?scroll',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("tweet").length > 0')
action_runner.Wait(1)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(distance=5000)
interaction.End()
class SVGIconRaster(KeySilkCasesPage):
""" Why: Mutating SVG icons; these paint storm and paint slowly. """
def __init__(self, page_set):
super(SVGIconRaster, self).__init__(
url='http://wiltzius.github.io/shape-shifter/',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'loaded = true')
action_runner.Wait(1)
def RunPageInteractions(self, action_runner):
for i in xrange(9):
button_func = ('document.getElementById("demo").$.'
'buttons.children[%d]') % i
interaction = action_runner.BeginInteraction(
'Action_TapAction', is_smooth=True)
action_runner.TapElement(element_function=button_func)
action_runner.Wait(1)
interaction.End()
class UpdateHistoryState(KeySilkCasesPage):
""" Why: Modern apps often update history state, which currently is janky."""
def __init__(self, page_set):
super(UpdateHistoryState, self).__init__(
url='file://key_silk_cases/pushState.html',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.ExecuteJavaScript('''
window.requestAnimationFrame(function() {
window.__history_state_loaded = true;
});
''')
action_runner.WaitForJavaScriptCondition(
'window.__history_state_loaded == true;')
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginInteraction('animation_interaction',
is_smooth=True)
action_runner.Wait(5) # JS runs the animation continuously on the page
interaction.End()
class SilkFinance(KeySilkCasesPage):
""" Why: Some effects repaint the page, possibly including plenty of text. """
def __init__(self, page_set):
super(SilkFinance, self).__init__(
url='file://key_silk_cases/silk_finance.html',
page_set=page_set)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginInteraction('animation_interaction',
is_smooth=True)
action_runner.Wait(10) # animation runs automatically
interaction.End()
class KeySilkCasesPageSet(page_set_module.PageSet):
""" Pages hand-picked for project Silk. """
def __init__(self):
super(KeySilkCasesPageSet, self).__init__(
user_agent_type='mobile',
archive_data_file='data/key_silk_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
self.AddUserStory(Page1(self))
self.AddUserStory(Page2(self))
self.AddUserStory(Page3(self))
self.AddUserStory(Page4(self))
self.AddUserStory(Page5(self))
self.AddUserStory(Page6(self))
self.AddUserStory(Page7(self))
self.AddUserStory(Page8(self))
self.AddUserStory(Page9(self))
self.AddUserStory(Page10(self))
self.AddUserStory(Page11(self))
self.AddUserStory(Page12(self))
self.AddUserStory(Page13(self))
self.AddUserStory(Page14(self))
self.AddUserStory(Page15(self))
self.AddUserStory(Page16(self))
self.AddUserStory(Page17(self))
self.AddUserStory(Page18(self))
# Missing frames during tap interaction; crbug.com/446332
# self.AddUserStory(Page19(self))
self.AddUserStory(Page20(self))
self.AddUserStory(GwsGoogleExpansion(self))
self.AddUserStory(GwsBoogieExpansion(self))
# Times out on Windows; crbug.com/338838
# self.AddUserStory(Page22(self))
self.AddUserStory(Page23(self))
self.AddUserStory(Page24(self))
self.AddUserStory(Page25(self))
self.AddUserStory(Page26(self))
self.AddUserStory(SVGIconRaster(self))
self.AddUserStory(UpdateHistoryState(self))
self.AddUserStory(SilkFinance(self))
| bsd-3-clause |
jallohm/django | django/core/serializers/python.py | 140 | 7685 | """
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils import six
from django.utils.encoding import force_text, is_protected_type
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = OrderedDict()
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
data = OrderedDict([('model', force_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
data["pk"] = force_text(obj._get_pk_val(), strings_only=True)
data['fields'] = self._current
return data
def handle_field(self, obj, field):
value = field.value_from_object(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
if not is_protected_type(value):
value = field.value_to_string(obj)
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.remote_field.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: force_text(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
ignore = options.pop('ignorenonexistent', False)
for d in object_list:
# Look up the model and starting build a dict of data for it.
try:
Model = _get_model(d["model"])
except base.DeserializationError:
if ignore:
continue
else:
raise
data = {}
if 'pk' in d:
try:
data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get('pk'))
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), None)
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Handle each field
for (field_name, field_value) in six.iteritems(d["fields"]):
if ignore and field_name not in field_names:
# skip fields no longer on model
continue
if isinstance(field_value, str):
field_value = force_text(
field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True
)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
if hasattr(field.remote_field.model._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
return field.remote_field.model._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return force_text(field.remote_field.model._meta.pk.to_python(value), strings_only=True)
else:
m2m_convert = lambda v: force_text(field.remote_field.model._meta.pk.to_python(v), strings_only=True)
try:
m2m_data[field.name] = []
for pk in field_value:
m2m_data[field.name].append(m2m_convert(pk))
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), pk)
# Handle FK fields
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
if field_value is not None:
try:
if hasattr(field.remote_field.model._default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):
obj = field.remote_field.model._default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
value = value.pk
else:
value = field.remote_field.model._meta.get_field(field.remote_field.field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = field.remote_field.model._meta.get_field(field.remote_field.field_name).to_python(field_value)
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
try:
data[field.name] = field.to_python(field_value)
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)
obj = base.build_instance(Model, data, db)
yield base.DeserializedObject(obj, m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
| bsd-3-clause |
osrf/opensplice | src/api/dcps/python/test/testArrayOfStruct.py | 2 | 2303 | #
# Vortex OpenSplice
#
# This software and documentation are Copyright 2006 to TO_YEAR ADLINK
# Technology Limited, its affiliated companies and licensors. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Created on Dec 7, 2017
@author: prismtech
'''
import unittest
import struct
import countTest
if countTest.count_test == False:
import ArrayOfStruct.basic.module_ArrayOfStruct
class TestArrayOfStruct(unittest.TestCase):
def testCopyInCopyOut(self):
data = ArrayOfStruct.basic.module_ArrayOfStruct.ArrayOfStruct_struct(
long1 = 12,
array1=[ArrayOfStruct.basic.module_ArrayOfStruct.Inner(long1=21,double1=2.5),
ArrayOfStruct.basic.module_ArrayOfStruct.Inner(long1=31,double1=3.5)],
mylong1 = 42)
print('data: ' + str(data))
print('data._get_packing_fmt(): ', data._get_packing_fmt())
print('data._get_packing_args(): ', data._get_packing_args())
buffer = data._serialize()
print('buffer: ', buffer)
values = struct.unpack(data._get_packing_fmt(), buffer)
data1 = ArrayOfStruct.basic.module_ArrayOfStruct.ArrayOfStruct_struct()
data1._deserialize(list(values))
self.assertEqual(data.long1, data1.long1)
self.assertEqual(data.array1[0].long1, data1.array1[0].long1)
self.assertEqual(data.array1[0].double1, data1.array1[0].double1)
self.assertEqual(data.array1[1].long1, data1.array1[1].long1)
self.assertEqual(data.array1[1].double1, data1.array1[1].double1)
self.assertEqual(data.mylong1, data1.mylong1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testCopyInCopyOut']
unittest.main() | apache-2.0 |
pshowalter/solutions-geoprocessing-toolbox | utils/test/clearing_operations_tests/ClearingOperationsNumberFeaturesTestCase.py | 1 | 4144 | #------------------------------------------------------------------------------
# Copyright 2017 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
import logging
import arcpy
from arcpy.sa import *
import sys
import traceback
import datetime
import os
# Add parent folder to python path if running test case standalone
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import Configuration
import UnitTestUtilities
import DataDownload
class ClearingOperationsNumberFeaturesTestCase(unittest.TestCase):
toolboxUnderTest = None # Set to Pro or ArcMap toolbox at runtime
inputArea = None
pointFeatures = None
output = None
scratchGDB = None
def setUp(self):
if Configuration.DEBUG == True: print(" ClearingOperationsNumberFeaturesTestCase.setUp")
''' Initialization needed if running Test Case standalone '''
Configuration.GetLogger()
Configuration.GetPlatform()
''' End standalone initialization '''
self.toolboxUnderTest = Configuration.clearingOperationsToolboxPath + \
Configuration.GetToolboxSuffix()
UnitTestUtilities.checkArcPy()
DataDownload.runDataDownload(Configuration.clearingOperationsPath, \
Configuration.clearingOperationsInputGDB, Configuration.clearingOperationsURL)
if (self.scratchGDB == None) or (not arcpy.Exists(self.scratchGDB)):
self.scratchGDB = UnitTestUtilities.createScratch(Configuration.clearingOperationsPath)
# set up inputs
self.inputArea = os.path.join(Configuration.clearingOperationsInputGDB, r"AO")
self.pointFeatures = os.path.join(Configuration.clearingOperationsInputGDB, r"Structures")
UnitTestUtilities.checkFilePaths([Configuration.clearingOperationsPath])
UnitTestUtilities.checkGeoObjects([Configuration.clearingOperationsInputGDB, self.toolboxUnderTest, self.scratchGDB, self.inputArea, self.pointFeatures])
def tearDown(self):
if Configuration.DEBUG == True: print(" ClearingOperationsNumberFeaturesTestCase.tearDown")
UnitTestUtilities.deleteScratch(self.scratchGDB)
def testClearingOperationsNumberFeatures(self):
if Configuration.DEBUG == True:print(".....ClearingOperationsNumberFeaturesTestCase.testClearingOperationsNumberFeatures")
print("Importing toolbox...")
arcpy.ImportToolbox(self.toolboxUnderTest)
arcpy.env.overwriteOutput = True
#inputs
fieldToNumber = "number"
output = os.path.join(self.scratchGDB, "numFields")
#Testing
runToolMsg="Running tool (Number Features)"
arcpy.AddMessage(runToolMsg)
Configuration.Logger.info(runToolMsg)
try:
#Calling the NumberFeatures_ClearingOperations Script Tool
arcpy.NumberFeatures_ClearingOperations(self.inputArea, self.pointFeatures, fieldToNumber, output)
except arcpy.ExecuteError:
UnitTestUtilities.handleArcPyError()
except:
UnitTestUtilities.handleGeneralError()
result = arcpy.GetCount_management(output)
count = int(result.getOutput(0))
cursor = arcpy.SearchCursor(output)
row = cursor.next()
val = row.getValue(fieldToNumber)
print("Field number first row: " + str(val) + " should not be null")
self.assertIsNotNone(val)
print("number features: " + str(count))
self.assertEqual(count, 90)
if __name__ == "__main__":
unittest.main() | apache-2.0 |
hbuyse/VBTournaments | core/models.py | 1 | 11860 | #! /usr/bin/env python
__author__ = "Henri Buyse"
from django.db import models
from django.contrib.auth.models import User
import datetime
import logging
logging.basicConfig()
logger = logging.getLogger()
class Event(models.Model):
"""Event : details of the tournament(s)
To one event corresponds MULTIPLE tournaments
"""
SURFACE_CHOICES = (('sand', 'Sable'), ('grass', 'Herbe'), ('indoor', 'Intérieur'))
_vbuserprofile = models.ForeignKey('accounts.VBUserProfile', db_column='vbuserprofile', related_name='events')
_name = models.CharField(db_column='name', max_length=100, blank=False)
_nb_terrains = models.IntegerField(db_column='nb_terrains', blank=False)
_nb_gymnasiums = models.IntegerField(db_column='nb_gymnasiums', blank=False)
_nb_teams = models.SmallIntegerField(db_column='nb_teams', blank=False)
_night = models.BooleanField(db_column='night', default=False)
_surface = models.CharField(db_column='surface',
max_length=10,
blank=False,
choices=SURFACE_CHOICES)
_name_gymnasium = models.CharField(db_column='name_gymnasium', max_length=255, blank=True)
_nb_in_street = models.CharField(db_column='nb_in_street', max_length=10, blank=True)
_street = models.CharField(db_column='street', max_length=255, blank=False)
_city = models.CharField(db_column='city', max_length=255, blank=False)
_zip_code = models.CharField(db_column='zip_code', max_length=16, blank=True)
_region = models.CharField(db_column='region', max_length=100, blank=True)
_country = models.CharField(db_column='country', max_length=50, blank=False)
_latitude = models.FloatField(db_column='latitude', blank=True, default=0)
_longitude = models.FloatField(db_column='longitude', blank=True, default=0)
# poster = models.ImageField()
_description = models.TextField(db_column='description', blank=False)
_website = models.URLField(db_column='website', max_length=100, blank=True)
_full = models.BooleanField(db_column='full', default=False)
def __str__(self):
return u"{0}".format(self.name)
def get_all_tournaments_related(self):
"""
:return: A list of all the tournaments that are related to the event
"""
return self.tournaments.order_by('_date').all()
# TODO: Try to get this method (get_all_events) out of the class
def get_all_events(self):
"""
:return: a list of all the tournaments
"""
return Event.objects.all()
def get_address(self):
"""
Based on the address of the event that have been given by the user, this method return a unicode string that
will be used by easy_maps in order to show the place on a map
:return: A unicode string that contains the address of the event
"""
# address_1 = [self.country, self.region, self.city, self.street, str(self.nb_in_street), self.name_gymnasium]
address_1 = [self.country, self.region, self.city, self.street, str(self.nb_in_street)]
address_2 = ", ".join([part_address for part_address in address_1 if part_address])
address = "{0}\n".format(self.name_gymnasium) if self.name_gymnasium else ""
address += "{0}, {1}\n".format(self.nb_in_street, self.street)
address += "{0} {1}\n".format(self.zip_code, self.city)
address += "{0}\n".format(self.region) if self.region else str()
address += "{0}".format(self.country)
return {
"for_html": address,
"for_maps": address_2,
}
@property
def vbuserprofile(self):
"""
:return: The first name and the last_name of the event's organizer (VBUserProfile).
"""
return self._vbuserprofile
@vbuserprofile.setter
def vbuserprofile(self, val):
self._vbuserprofile = val
@property
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
@property
def nb_terrains(self):
return self._nb_terrains
@nb_terrains.setter
def nb_terrains(self, val):
self._nb_terrains = val
@property
def nb_gymnasiums(self):
return self._nb_gymnasiums
@nb_gymnasiums.setter
def nb_gymnasiums(self, val):
self._nb_gymnasiums = val
@property
def nb_teams(self):
return self._nb_teams
@nb_teams.setter
def nb_teams(self, val):
self._nb_teams = val
@property
def night(self):
return self._night
@night.setter
def night(self, val):
self._night = val
@property
def surface(self):
return self._surface
@surface.setter
def surface(self, val):
self._surface = val
@property
def name_gymnasium(self):
return self._name_gymnasium
@name_gymnasium.setter
def name_gymnasium(self, val):
self._name_gymnasium = val
@property
def nb_in_street(self):
return self._nb_in_street
@nb_in_street.setter
def nb_in_street(self, val):
self._nb_in_street = val
@property
def street(self):
return self._street
@street.setter
def street(self, val):
self._street = val
@property
def city(self):
return self._city
@city.setter
def city(self, val):
self._city = val
@property
def zip_code(self):
return self._zip_code
@zip_code.setter
def zip_code(self, val):
self._zip_code = val
@property
def region(self):
return self._region
@region.setter
def region(self, val):
self._region = val
@property
def country(self):
return self._country
@country.setter
def country(self, val):
self._country = val
@property
def get_country_iso(self):
return self._country[:2].upper()
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, val):
self._latitude = val
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, val):
self._longitude = val
@property
def description(self):
return self._description
@description.setter
def description(self, val):
self._description = val
@property
def website(self):
return self._website
@website.setter
def website(self, val):
self._website = val
@property
def full(self):
return self._full
@full.setter
def full(self, val):
self._full = val
class Tournament(models.Model):
"""Create a Tournament
It could have multiple formats of tournaments during the same day.
One tournament is at ONE place and belongs to ONE VBUserProfile
"""
TWO_VS_TWO = 2
THREE_VS_THREE = 3
FOUR_VS_FOUR = 4
SIX_VS_SIX = 6
_event = models.ForeignKey('core.Event', db_column='event', related_name='tournaments')
_date = models.DateField(db_column='date', auto_now=False)
_starting_time = models.TimeField(db_column='time', auto_now=False, default='09:00')
_nb_players = models.PositiveSmallIntegerField(db_column='nb_players',
choices=[
(TWO_VS_TWO, '2x2'),
(THREE_VS_THREE, '3x3'),
(FOUR_VS_FOUR, '4x4'),
(SIX_VS_SIX, '6x6')
])
_sx_players = models.CharField(db_column='sx_players',
max_length=8,
choices=[
('male', 'Masculin'),
('female', 'Féminin'),
('mixed', 'Mixte')
])
_price = models.FloatField(db_column='price', blank=False, default=0)
# Different levels
_hobby = models.BooleanField(db_column='hobby', default=False)
_departmental = models.BooleanField(db_column='departmental', default=False)
_regional = models.BooleanField(db_column='regional', default=False)
_national = models.BooleanField(db_column='national', default=False)
_professional = models.BooleanField(db_column='professional', default=False)
_kids = models.BooleanField(db_column='kids', default=False)
def __str__(self):
return u"{0} | {1} | {2} | {3}".format(self.event.name, self.date, self.nb_players, self.sx_players)
def at_least_one_level(self):
"""
:return: boolean to check the user put at least one level
"""
levels = [self.hobby, self.departmental, self.regional,
self.national, self.professional, self.kids]
return True if True in levels else False
def get_event_name(self):
return self._event.name
def get_event_id(self):
return self._event.id
def get_day(self):
return self._date.strftime("%d")
def get_month(self):
return self._date.strftime("%b")
@property
def event(self):
return self._event
@event.setter
def event(self, val):
self._event = val
@property
def date(self):
return self._date
@date.setter
def date(self, val):
"""
:return: boolean to check if the date of the tournament is not in the past
"""
if self.date < datetime.date.today():
raise models.ValidationError("The date of the tournament {} cannot be in the past!".format(self.event.name))
self._date = val
@property
def starting_time(self):
return self._starting_time
@starting_time.setter
def starting_time(self, val):
self._starting_time = val
@property
def nb_players(self):
return self._nb_players
@nb_players.setter
def nb_players(self, val):
self._nb_players = val
@property
def sx_players(self):
return self._sx_players
@sx_players.setter
def sx_players(self, val):
self._sx_players = val
@property
def price(self):
return self._price
@price.setter
def price(self, val):
self._price = val
@property
def hobby(self):
return self._hobby
@hobby.setter
def hobby(self, val):
self._hobby = val
@property
def departmental(self):
return self._departmental
@departmental.setter
def departmental(self, val):
self._departmental = val
@property
def regional(self):
return self._regional
@regional.setter
def regional(self, val):
self._regional = val
@property
def national(self):
return self._national
@national.setter
def national(self, val):
self._national = val
@property
def professional(self):
return self._professional
@professional.setter
def professional(self, val):
self._professional = val
@property
def kids(self):
return self._kids
@kids.setter
def kids(self, val):
self._kids = val
def get_list_levels(self):
l = list()
if self.hobby:
l.append("Loisirs")
if self.departmental:
l.append("Départemental")
if self.regional:
l.append("Régional")
if self.national:
l.append("National")
if self.professional:
l.append("Professionel")
if self.kids:
l.append("Enfant")
return l
| mit |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/duplicity/backends/gdocsbackend.py | 4 | 12305 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <carlos.abalde@gmail.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import string
import urllib;
import duplicity.backend
from duplicity.backend import retry
from duplicity import log
from duplicity.errors import * #@UnusedWildImport
class GDocsBackend(duplicity.backend.Backend):
"""Connect to remote store using Google Google Documents List API"""
ROOT_FOLDER_ID = 'folder%3Aroot'
BACKUP_DOCUMENT_TYPE = 'application/binary'
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# Import Google Data APIs libraries.
try:
global atom
global gdata
import atom.data
import gdata.client
import gdata.docs.client
import gdata.docs.data
except ImportError:
raise BackendException('Google Docs backend requires Google Data APIs Python '
'Client Library (see http://code.google.com/p/gdata-python-client/).')
# Setup client instance.
self.client = gdata.docs.client.DocsClient(source='duplicity $version')
self.client.ssl = True
self.client.http_client.debug = False
self.__authorize(parsed_url.username + '@' + parsed_url.hostname, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = string.split(parsed_url.path[1:], '/')
parent_folder = None
parent_folder_id = GDocsBackend.ROOT_FOLDER_ID
for folder_name in folder_names:
entries = self.__fetch_entries(parent_folder_id, 'folder', folder_name)
if entries is not None:
if len(entries) == 1:
parent_folder = entries[0]
elif len(entries) == 0:
parent_folder = self.client.create(gdata.docs.data.FOLDER_LABEL, folder_name, parent_folder)
else:
parent_folder = None
if parent_folder:
parent_folder_id = parent_folder.resource_id.text
else:
raise BackendException("Error while creating destination folder '%s'." % folder_name)
else:
raise BackendException("Error while fetching destination folder '%s'." % folder_name)
self.folder = parent_folder
@retry
def put(self, source_path, remote_filename=None, raise_errors = False):
"""Transfer source_path to remote_filename"""
# Default remote file name.
if not remote_filename:
remote_filename = source_path.get_filename()
# Upload!
try:
# If remote file already exists in destination folder, remove it.
entries = self.__fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
for entry in entries:
self.client.delete(entry.get_edit_link().href + '?delete=true', force=True)
# Set uploader instance. Note that resumable uploads are required in order to
# enable uploads for all file types.
# (see http://googleappsdeveloper.blogspot.com/2011/05/upload-all-file-types-to-any-google.html)
file = source_path.open()
uploader = gdata.client.ResumableUploader(
self.client, file, GDocsBackend.BACKUP_DOCUMENT_TYPE, os.path.getsize(file.name),
chunk_size=gdata.client.ResumableUploader.DEFAULT_CHUNK_SIZE,
desired_class=gdata.docs.data.DocsEntry)
if uploader:
# Chunked upload.
entry = gdata.docs.data.DocsEntry(title = atom.data.Title(text = remote_filename))
uri = '/feeds/upload/create-session/default/private/full?convert=false'
entry = uploader.UploadFile(uri, entry = entry)
if entry:
# Move to destination folder.
# TODO: any ideas on how to avoid this step?
if self.client.Move(entry, self.folder):
assert not file.close()
return
else:
self.__handle_error("Failed to move uploaded file '%s' to destination remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text), raise_errors)
else:
self.__handle_error("Failed to upload file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text), raise_errors)
else:
self.__handle_error("Failed to initialize upload of file '%s' to remote folder '%s'"
% (source_path.get_filename(), self.folder.title.text), raise_errors)
assert not file.close()
except Exception, e:
self.__handle_error("Failed to upload file '%s' to remote folder '%s': %s"
% (source_path.get_filename(), self.folder.title.text, str(e)), raise_errors)
@retry
def get(self, remote_filename, local_path, raise_errors = False):
"""Get remote filename, saving it to local_path"""
try:
entries = self.__fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
remote_filename)
if len(entries) == 1:
entry = entries[0]
self.client.Download(entry, local_path.name)
local_path.setdata()
return
else:
self.__handle_error("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.folder.title.text), raise_errors)
except Exception, e:
self.__handle_error("Failed to download file '%s' in remote folder '%s': %s"
% (remote_filename, self.folder.title.text, str(e)), raise_errors)
@retry
def list(self, raise_errors = False):
"""List files in folder"""
try:
entries = self.__fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE)
return [entry.title.text for entry in entries]
except Exception, e:
self.__handle_error("Failed to fetch list of files in remote folder '%s': %s"
% (self.folder.title.text, str(e)), raise_errors)
@retry
def delete(self, filename_list, raise_errors = False):
"""Delete files in filename_list"""
for filename in filename_list:
try:
entries = self.__fetch_entries(self.folder.resource_id.text,
GDocsBackend.BACKUP_DOCUMENT_TYPE,
filename)
if len(entries) > 0:
success = True
for entry in entries:
if not self.client.delete(entry.get_edit_link().href + '?delete=true', force = True):
success = False
if not success:
self.__handle_error("Failed to remove file '%s' in remote folder '%s'"
% (filename, self.folder.title.text), raise_errors)
else:
log.Warn("Failed to fetch file '%s' in remote folder '%s'"
% (filename, self.folder.title.text))
except Exception, e:
self.__handle_error("Failed to remove file '%s' in remote folder '%s': %s"
% (filename, self.folder.title.text, str(e)), raise_errors)
def __handle_error(self, message, raise_errors = True):
if raise_errors:
raise BackendException(message)
else:
log.FatalError(message, log.ErrorCode.backend_error)
def __authorize(self, email, password, captcha_token = None, captcha_response = None):
try:
self.client.client_login(email,
password,
source = 'duplicity $version',
service = 'writely',
captcha_token = captcha_token,
captcha_response = captcha_response)
except gdata.client.CaptchaChallenge, challenge:
print('A captcha challenge in required. Please visit ' + challenge.captcha_url)
answer = None
while not answer:
answer = raw_input('Answer to the challenge? ')
self.__authorize(email, password, challenge.captcha_token, answer)
except gdata.client.BadAuthentication:
self.__handle_error('Invalid user credentials given. Be aware that accounts '
'that use 2-step verification require creating an application specific '
'access code for using this Duplicity backend. Follow the instrucction in '
'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
'and create your application-specific password to run duplicity backups.')
except Exception, e:
self.__handle_error('Error while authenticating client: %s.' % str(e))
def __fetch_entries(self, folder_id, type, title = None):
# Build URI.
uri = '/feeds/default/private/full/%s/contents' % folder_id
if type == 'folder':
uri += '/-/folder?showfolders=true'
elif type == GDocsBackend.BACKUP_DOCUMENT_TYPE:
uri += '?showfolders=false'
else:
uri += '?showfolders=true'
if title:
uri += '&title=' + urllib.quote(title) + '&title-exact=true'
try:
# Fetch entries
entries = self.client.get_everything(uri = uri)
# When filtering by entry title, API is returning (don't know why) documents in other
# folders (apart from folder_id) matching the title, so some extra filtering is required.
if title:
result = []
for entry in entries:
if (not type) or (entry.get_document_type() == type):
if folder_id != GDocsBackend.ROOT_FOLDER_ID:
for link in entry.in_folders():
folder_entry = self.client.get_entry(link.href, None, None,
desired_class=gdata.docs.data.DocsEntry)
if folder_entry and (folder_entry.resource_id.text == folder_id):
result.append(entry)
elif len(entry.in_folders()) == 0:
result.append(entry)
else:
result = entries
# Done!
return result
except Exception, e:
self.__handle_error('Error while fetching remote entries: %s.' % str(e))
duplicity.backend.register_backend('gdocs', GDocsBackend)
| gpl-3.0 |
Serag8/Bachelor | google_appengine/google/appengine/tools/devappserver2/start_response_utils_test.py | 8 | 2160 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.start_response_utils."""
import unittest
from google.appengine.tools.devappserver2 import start_response_utils
class TestCapturingStartResponse(unittest.TestCase):
"""Tests for start_response_util.CapturingStartResponse."""
def test_success(self):
start_response = start_response_utils.CapturingStartResponse()
stream = start_response('200 OK', [('header1', 'value1')])
stream.write('Hello World!')
self.assertEqual('200 OK', start_response.status)
self.assertEqual(None, start_response.exc_info)
self.assertEqual([('header1', 'value1')], start_response.response_headers)
self.assertEqual('Hello World!', start_response.response_stream.getvalue())
def test_exception(self):
exc_info = (object(), object(), object())
start_response = start_response_utils.CapturingStartResponse()
start_response('200 OK', [('header1', 'value1')])
start_response('500 Internal Server Error', [], exc_info)
self.assertEqual('500 Internal Server Error', start_response.status)
self.assertEqual(exc_info, start_response.exc_info)
self.assertEqual([], start_response.response_headers)
def test_merged_response(self):
start_response = start_response_utils.CapturingStartResponse()
stream = start_response('200 OK', [('header1', 'value1')])
stream.write('Hello World!')
self.assertEqual('Hello World! Goodbye World!',
start_response.merged_response([' Goodbye ', 'World!']))
if __name__ == '__main__':
unittest.main()
| mit |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/lib/pan-python/bin/panxapi.py | 2 | 31601 | #!/usr/bin/env python
#
# Copyright (c) 2013-2015 Kevin Steves <kevin.steves@pobox.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from __future__ import print_function
from datetime import datetime
import sys
import os
import getopt
import re
import json
import pprint
import logging
import ssl
import signal
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, os.pardir, 'lib')]
import pan.xapi
import pan.commit
import pan.config
debug = 0
def main():
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
# Windows
pass
set_encoding()
options = parse_opts()
if options['debug']:
logger = logging.getLogger()
if options['debug'] == 3:
logger.setLevel(pan.xapi.DEBUG3)
elif options['debug'] == 2:
logger.setLevel(pan.xapi.DEBUG2)
elif options['debug'] == 1:
logger.setLevel(pan.xapi.DEBUG1)
# log_format = '%(levelname)s %(name)s %(message)s'
log_format = '%(message)s'
handler = logging.StreamHandler()
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
if options['cafile'] or options['capath']:
ssl_context = create_ssl_context(options['cafile'],
options['capath'])
else:
ssl_context = None
try:
xapi = pan.xapi.PanXapi(timeout=options['timeout'],
tag=None if not options['tag'] else options['tag'],
use_http=options['use_http'],
use_get=options['use_get'],
api_username=options['api_username'],
api_password=options['api_password'],
api_key=options['api_key'],
hostname=options['hostname'],
port=options['port'],
serial=options['serial'],
ssl_context=ssl_context)
except pan.xapi.PanXapiError as msg:
print('pan.xapi.PanXapi:', msg, file=sys.stderr)
sys.exit(1)
if options['debug'] > 2:
print('xapi.__str__()===>\n', xapi, '\n<===',
sep='', file=sys.stderr)
extra_qs_used = False
try:
if options['keygen']:
action = 'keygen'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.keygen(extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if (options['api_username'] and options['api_password'] and
options['hostname'] and options['tag'] is not None):
# .panrc
d = datetime.now()
x = ''
if options['tag']:
x = '%%%s' % options['tag']
print('# %s generated: %s' % (os.path.basename(sys.argv[0]),
d.strftime('%Y/%m/%d %H:%M:%S')))
print('hostname%s=%s' % (x, options['hostname']))
print('api_key%s=%s' % (x, xapi.api_key))
else:
print('API key: "%s"' % xapi.api_key)
if options['show']:
action = 'show'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.show(xpath=options['xpath'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['get']:
action = 'get'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.get(xpath=options['xpath'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['delete']:
action = 'delete'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.delete(xpath=options['xpath'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['edit']:
action = 'edit'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.edit(xpath=options['xpath'],
element=options['element'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['set']:
action = 'set'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.set(xpath=options['xpath'],
element=options['element'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['dynamic-update']:
action = 'dynamic-update'
kwargs = {
'cmd': options['cmd'],
}
if options['ad_hoc'] is not None:
extra_qs_used = True
kwargs['extra_qs'] = options['ad_hoc']
if len(options['vsys']):
kwargs['vsys'] = options['vsys'][0]
xapi.user_id(**kwargs)
print_status(xapi, action)
print_response(xapi, options)
if options['move'] is not None:
action = 'move'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.move(xpath=options['xpath'],
where=options['move'],
dst=options['dst'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['rename']:
action = 'rename'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.rename(xpath=options['xpath'],
newname=options['dst'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['clone']:
action = 'clone'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.clone(xpath=options['xpath'],
xpath_from=options['src'],
newname=options['dst'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['override']:
action = 'override'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.override(xpath=options['xpath'],
element=options['element'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['export'] is not None:
action = 'export'
if options['ad_hoc'] is not None:
extra_qs_used = True
if options['pcapid'] is not None:
xapi.export(category=options['export'],
pcapid=options['pcapid'],
search_time=options['stime'],
serialno=options['serial'],
extra_qs=options['ad_hoc'])
else:
xapi.export(category=options['export'],
from_name=options['src'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['pcap_listing']:
pcap_listing(xapi, options['export'])
save_attachment(xapi, options)
if options['log'] is not None:
action = 'log'
if options['ad_hoc'] is not None:
extra_qs_used = True
xapi.log(log_type=options['log'],
nlogs=options['nlogs'],
skip=options['skip'],
filter=options['filter'],
interval=options['interval'],
timeout=options['job_timeout'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['report'] is not None:
action = 'report'
if options['ad_hoc'] is not None:
extra_qs_used = True
vsys = options['vsys'][0] if len(options['vsys']) else None
xapi.report(reporttype=options['report'],
reportname=options['name'],
vsys=vsys,
interval=options['interval'],
timeout=options['job_timeout'],
extra_qs=options['ad_hoc'])
print_status(xapi, action)
print_response(xapi, options)
if options['op'] is not None:
action = 'op'
kwargs = {
'cmd': options['op'],
'cmd_xml': options['cmd_xml'],
}
if options['ad_hoc'] is not None:
extra_qs_used = True
kwargs['extra_qs'] = options['ad_hoc']
if len(options['vsys']):
kwargs['vsys'] = options['vsys'][0]
xapi.op(**kwargs)
print_status(xapi, action)
print_response(xapi, options)
if (options['commit'] or options['commit_all']):
if options['cmd']:
cmd = options['cmd']
if options['cmd_xml']:
cmd = xapi.cmd_xml(cmd)
else:
c = pan.commit.PanCommit(validate=options['validate'],
force=options['force'],
commit_all=options['commit_all'],
merge_with_candidate=
options['merge'])
for part in options['partial']:
if part == 'device-and-network-excluded':
c.device_and_network_excluded()
elif part == 'policy-and-objects-excluded':
c.policy_and_objects_excluded()
elif part == 'shared-object-excluded':
c.shared_object_excluded()
elif part == 'no-vsys':
c.no_vsys()
elif part == 'vsys':
c.vsys(options['vsys'])
if options['serial'] is not None:
c.device(options['serial'])
if options['group'] is not None:
c.device_group(options['group'])
if options['commit_all'] and options['vsys']:
c.vsys(options['vsys'][0])
cmd = c.cmd()
kwargs = {
'cmd': cmd,
'sync': options['sync'],
'interval': options['interval'],
'timeout': options['job_timeout'],
}
if options['ad_hoc'] is not None:
extra_qs_used = True
kwargs['extra_qs'] = options['ad_hoc']
if options['commit_all']:
kwargs['action'] = 'all'
action = 'commit'
xapi.commit(**kwargs)
print_status(xapi, action)
print_response(xapi, options)
if not extra_qs_used and options['ad_hoc'] is not None:
action = 'ad_hoc'
xapi.ad_hoc(qs=options['ad_hoc'],
xpath=options['xpath'],
modify_qs=options['modify'])
print_status(xapi, action)
print_response(xapi, options)
except pan.xapi.PanXapiError as msg:
print_status(xapi, action, str(msg))
print_response(xapi, options)
sys.exit(1)
sys.exit(0)
def passwd_prompt():
import getpass
try:
x = getpass.getpass('Password: ')
except EOFError:
return None
except KeyboardInterrupt:
sys.exit(0)
return x
def parse_opts():
options = {
'delete': False,
'edit': False,
'get': False,
'keygen': False,
'show': False,
'set': False,
'dynamic-update': False,
'commit': False,
'validate': False,
'force': False,
'partial': [],
'sync': False,
'vsys': [],
'commit_all': False,
'ad_hoc': None,
'modify': False,
'op': None,
'export': None,
'log': None,
'report': None,
'name': None,
'src': None,
'dst': None,
'move': None,
'rename': False,
'clone': False,
'override': False,
'api_username': None,
'api_password': None,
'hostname': None,
'port': None,
'serial': None,
'group': None,
'merge': False,
'nlogs': None,
'skip': None,
'filter': None,
'interval': None,
'job_timeout': None,
'stime': None,
'pcapid': None,
'api_key': None,
'cafile': None,
'capath': None,
'print_xml': False,
'print_result': False,
'print_python': False,
'print_json': False,
'print_text': False,
'cmd_xml': False,
'pcap_listing': False,
'recursive': False,
'use_http': False,
'use_get': False,
'debug': 0,
'tag': None,
'xpath': None,
'element': None,
'cmd': None,
'timeout': None,
}
valid_where = ['after', 'before', 'top', 'bottom']
short_options = 'de:gksS:U:C:A:o:l:h:P:K:xpjrXHGDt:T:'
long_options = ['version', 'help',
'ad-hoc=', 'modify', 'validate', 'force', 'partial=',
'sync', 'vsys=', 'src=', 'dst=', 'move=', 'rename',
'clone', 'override=', 'export=', 'log=', 'recursive',
'cafile=', 'capath=', 'ls', 'serial=',
'group=', 'merge', 'nlogs=', 'skip=', 'filter=',
'interval=', 'timeout=',
'stime=', 'pcapid=', 'text',
'report=', 'name=',
]
try:
opts, args = getopt.getopt(sys.argv[1:],
short_options,
long_options)
except getopt.GetoptError as error:
print(error, file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if opt == '-d':
options['delete'] = True
elif opt == '-e':
options['edit'] = True
options['element'] = get_element(arg)
elif opt == '-g':
options['get'] = True
elif opt == '-k':
options['keygen'] = True
elif opt == '-s':
options['show'] = True
elif opt == '-S':
options['set'] = True
options['element'] = get_element(arg)
elif opt == '-U':
options['dynamic-update'] = True
options['cmd'] = get_element(arg)
elif opt == '-C':
options['commit'] = True
options['cmd'] = get_element(arg)
elif opt == '--validate':
options['validate'] = True
elif opt == '--force':
options['force'] = True
elif opt == '--partial':
if arg:
l = get_parts(arg)
[options['partial'].append(s) for s in l]
elif opt == '--sync':
options['sync'] = True
elif opt == '--vsys':
if arg:
l = get_vsys(arg)
[options['vsys'].append(s) for s in l]
elif opt == '-A':
options['commit_all'] = True
options['cmd'] = get_element(arg)
elif opt == '--ad-hoc':
options['ad_hoc'] = arg
elif opt == '--modify':
options['modify'] = True
elif opt == '-o':
options['op'] = get_element(arg)
elif opt == '--export':
options['export'] = arg
elif opt == '--log':
options['log'] = arg
elif opt == '--report':
options['report'] = arg
elif opt == '--name':
options['name'] = arg
elif opt == '--src':
options['src'] = arg
elif opt == '--dst':
options['dst'] = arg
elif opt == '--move':
if arg not in valid_where:
print('Invalid where: "%s"' % arg, file=sys.stderr)
sys.exit(1)
options['move'] = arg
elif opt == '--rename':
options['rename'] = True
elif opt == '--clone':
options['clone'] = True
elif opt == '--override':
options['override'] = True
options['element'] = get_element(arg)
elif opt == '-l':
try:
(options['api_username'],
options['api_password']) = arg.split(':', 1)
except ValueError:
options['api_username'] = arg
options['api_password'] = passwd_prompt()
elif opt == '-P':
options['port'] = arg
elif opt == '--serial':
options['serial'] = arg
elif opt == '--group':
options['group'] = arg
elif opt == '--merge':
options['merge'] = True
elif opt == '--nlogs':
options['nlogs'] = arg
elif opt == '--skip':
options['skip'] = arg
elif opt == '--filter':
options['filter'] = arg
elif opt == '--interval':
options['interval'] = arg
elif opt == '--timeout':
options['job_timeout'] = arg
elif opt == '--stime':
options['stime'] = arg
elif opt == '--pcapid':
options['pcapid'] = arg
elif opt == '-h':
options['hostname'] = arg
elif opt == '-K':
options['api_key'] = arg
elif opt == '--cafile':
options['cafile'] = arg
elif opt == '--capath':
options['capath'] = arg
elif opt == '-x':
options['print_xml'] = True
elif opt == '-p':
options['print_python'] = True
elif opt == '-j':
options['print_json'] = True
elif opt == '-r':
options['print_result'] = True
elif opt == '--text':
options['print_text'] = True
elif opt == '-X':
options['cmd_xml'] = True
elif opt == '--ls':
options['pcap_listing'] = True
elif opt == '--recursive':
options['recursive'] = True
elif opt == '-H':
options['use_http'] = True
elif opt == '-G':
options['use_get'] = True
elif opt == '-D':
if not options['debug'] < 3:
print('Maximum debug level is 3', file=sys.stderr)
sys.exit(1)
global debug
debug += 1
options['debug'] = debug
elif opt == '-t':
# allow '' to create tagname-less .panrc
options['tag'] = arg
elif opt == '-T':
options['timeout'] = arg
elif opt == '--version':
print('pan-python', pan.xapi.__version__)
sys.exit(0)
elif opt == '--help':
usage()
sys.exit(0)
else:
assert False, 'unhandled option %s' % opt
if len(args) > 0:
s = get_element(args.pop(0))
options['xpath'] = s.rstrip('\r\n')
if len(args) > 0:
print('Extra options after xpath:', args, file=sys.stderr)
if options['debug'] > 2:
s = pprint.pformat(options, indent=4)
print(s, file=sys.stderr)
if options['print_result'] and not (options['print_xml'] or
options['print_json'] or
options['print_python']):
options['print_xml'] = True
return options
def create_ssl_context(cafile, capath):
if (sys.version_info.major == 2 and sys.hexversion >= 0x02070900 or
sys.version_info.major == 3 and sys.hexversion >= 0x03020000):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_REQUIRED
# added 3.4
if hasattr(context, 'check_hostname'):
context.check_hostname = True
try:
context.load_verify_locations(cafile=cafile, capath=capath)
except Exception as e:
print('cafile or capath invalid: %s' % e, file=sys.stderr)
sys.exit(1)
return context
print('Warning: Python %d.%d: cafile and capath ignored' %
(sys.version_info.major, sys.version_info.minor),
file=sys.stderr)
return None
def get_vsys(s):
list = []
vsys = s.split(',')
for v in vsys:
if v:
if v.isdigit():
list.append('vsys' + v)
else:
list.append(v)
return list
def get_parts(s):
list = []
parts = s.split(',')
for part in parts:
if part:
if not pan.commit.valid_part(part):
print('Invalid part: "%s"' % part, file=sys.stderr)
sys.exit(1)
list.append(part)
return list
def get_element(s):
stdin_char = '-'
if s == stdin_char:
element = sys.stdin.readlines()
elif os.path.isfile(s):
try:
f = open(s)
except IOError as msg:
print('open %s: %s' % (s, msg), file=sys.stderr)
sys.exit(1)
element = f.readlines()
f.close()
else:
element = s
element = ''.join(element)
if debug > 1:
print('element: \"%s\"' % element, file=sys.stderr)
return element
def print_status(xapi, action, exception_msg=None):
print(action, end='', file=sys.stderr)
if xapi.status_code is not None:
code = ' [code=\"%s\"]' % xapi.status_code
else:
code = ''
if xapi.status is not None:
print(': %s%s' % (xapi.status, code), end='', file=sys.stderr)
if exception_msg is not None and exception_msg:
print(': "%s"' % exception_msg.rstrip(), end='', file=sys.stderr)
elif xapi.status_detail is not None:
print(': "%s"' % xapi.status_detail.rstrip(), end='', file=sys.stderr)
print(file=sys.stderr)
def xml_python(xapi, result=False):
xpath = None
if result:
if (xapi.element_result is None or
not len(xapi.element_result)):
return None
elem = xapi.element_result
# select all child elements
xpath = '*'
else:
if xapi.element_root is None:
return None
elem = xapi.element_root
try:
conf = pan.config.PanConfig(config=elem)
except pan.config.PanConfigError as msg:
print('pan.config.PanConfigError:', msg, file=sys.stderr)
sys.exit(1)
d = conf.python(xpath)
return d
def print_response(xapi, options):
if options['print_xml']:
if options['print_result']:
s = xapi.xml_result()
else:
s = xapi.xml_root()
if s is not None:
print(s.lstrip('\r\n').rstrip())
if options['print_python'] or options['print_json']:
d = xml_python(xapi, options['print_result'])
if d:
if options['print_python']:
print('var1 =', pprint.pformat(d))
if options['print_json']:
print(json.dumps(d, sort_keys=True,
separators=(',', ': '), indent=2))
if options['print_text'] and xapi.text_document is not None:
print(xapi.text_document, end='')
def save_attachment(xapi, options):
if xapi.export_result is None:
return
if options['src'] is not None:
# pcap
src_dir, src_file = os.path.split(options['src'])
else:
# 6.0 threat-pcap
# device-state
src_dir = None
src_file = xapi.export_result['file']
path = ''
path_done = False
if options['dst'] is not None:
path = options['dst']
if not os.path.isdir(path):
path_done = True
if not path_done:
if (options['recursive'] and src_dir and
re.search(r'^\d{8,8}$', src_dir)):
path = os.path.join(path, src_dir)
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError as msg:
print('mkdir %s: %s' % (path, msg),
file=sys.stderr)
# fallthrough, return on open fail
path = os.path.join(path, src_file)
try:
f = open(path, 'wb')
except IOError as msg:
print('open %s: %s' % (path, msg), file=sys.stderr)
return
try:
f.write(xapi.export_result['content'])
except IOError as msg:
print('write %s: %s' % (path, msg), file=sys.stderr)
f.close()
return
f.close()
print('exported %s: %s' % (xapi.export_result['category'], path),
file=sys.stderr)
def pcap_listing(xapi, category):
d = xml_python(xapi, result=True)
if d and 'dir-listing' in d:
pcap_listing = d['dir-listing']
if pcap_listing is None:
print('No %s directories' % category)
elif 'file' in pcap_listing:
file = pcap_listing['file']
if isinstance(file, str):
file = [file]
size = len(file)
print('%d %s files:' % (size, category))
for item in sorted(file):
print(' %s' % item)
elif 'dir' in pcap_listing:
dir = pcap_listing['dir']
if isinstance(dir, str):
dir = [dir]
size = len(dir)
print('%d %s directories:' % (size, category))
for item in sorted(dir):
print(' %s/' % item)
def set_encoding():
#
# XXX UTF-8 won't encode to latin-1/ISO8859-1:
# UnicodeEncodeError: 'latin-1' codec can't encode character '\u2019'
#
# do PYTHONIOENCODING=utf8 equivalent
#
encoding = 'utf-8'
if hasattr(sys.stdin, 'detach'):
# >= 3.1
import io
for s in ('stdin', 'stdout', 'stderr'):
line_buffering = getattr(sys, s).line_buffering
# print(s, line_buffering, file=sys.stderr)
setattr(sys, s, io.TextIOWrapper(getattr(sys, s).detach(),
encoding=encoding,
line_buffering=line_buffering))
else:
import codecs
sys.stdin = codecs.getreader(encoding)(sys.stdin)
sys.stdout = codecs.getwriter(encoding)(sys.stdout)
sys.stderr = codecs.getwriter(encoding)(sys.stderr)
def usage():
usage = '''%s [options] [xpath]
-d delete object at xpath
-e element edit XML element at xpath
-g get candidate config at xpath
-k generate API key
-s show active config at xpath
-S element set XML element at xpath
-U cmd execute dynamic update command
-C cmd commit candidate configuration
--validate validate candidate configuration
--force force commit when conflict
--partial part commit specified part
--sync synchronous commit
-A cmd commit-all (Panorama)
--ad-hoc query perform ad hoc request
--modify insert known fields in ad hoc query
-o cmd execute operational command
--export category export files
--log log-type retrieve log files
--report report-type retrieve reports (dynamic|predefined|custom)
--name report-name report name
--src src clone source node xpath
export source file/path/directory
--dst dst move/clone destination node name
rename new name
export destination file/path/directory
--move where move after, before, bottom or top
--rename rename object at xpath to dst
--clone clone object at xpath, src xpath
--override element override template object at xpath
--vsys vsys VSYS for dynamic update/partial commit/
operational command/report
-l api_username[:api_password]
-h hostname
-P port URL port number
--serial number serial number for Panorama redirection/
commit-all/threat-pcap
--group name device group for commit-all
--merge merge with candidate for commit-all
--nlogs num retrieve num logs
--skip num skip num logs
--filter filter log selection filter
--interval seconds log/commit/report job query interval
--timeout seconds log/commit/report job query timeout
--stime time search time for threat-pcap
--pcapid id threat-pcap ID
-K api_key
-x print XML response to stdout
-p print XML response in Python to stdout
-j print XML response in JSON to stdout
-r print result content when printing response
--text print text response to stdout
-X convert text command to XML
--ls print formatted PCAP listing to stdout
--recursive recursive export
-H use http URL scheme (default https)
-G use HTTP GET method (default POST)
-D enable debug (multiple up to -DDD)
-t tag .panrc tagname
-T seconds urlopen() timeout
--cafile path file containing CA certificates
--capath path directory of hashed certificate files
--version display version
--help display usage
'''
print(usage % os.path.basename(sys.argv[0]), end='')
if __name__ == '__main__':
main()
| isc |
fabian4/ceilometer | ceilometer/compute/pollsters/instance.py | 9 | 1129 | #
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer import sample
class InstancePollster(pollsters.BaseComputePollster):
@staticmethod
def get_samples(manager, cache, resources):
for instance in resources:
yield util.make_sample_from_instance(
instance,
name='instance',
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
)
| apache-2.0 |
hainm/scipy | scipy/weave/tests/test_blitz_tools.py | 91 | 7141 | from __future__ import absolute_import, print_function
import time
import parser
import warnings
from numpy import (float32, float64, complex64, complex128,
zeros, random, array)
from numpy.testing import (TestCase, assert_equal,
assert_allclose, run_module_suite)
from scipy.weave import blitz_tools, blitz, BlitzWarning
from scipy.weave.ast_tools import harvest_variables
from weave_test_utils import remove_whitespace, debug_print, TempdirBlitz, dec
class TestAstToBlitzExpr(TestCase):
def generic_check(self,expr,desired):
ast = parser.suite(expr)
ast_list = ast.tolist()
actual = blitz_tools.ast_to_blitz_expr(ast_list)
actual = remove_whitespace(actual)
desired = remove_whitespace(desired)
assert_equal(actual,desired,expr)
def test_simple_expr(self):
# convert simple expr to blitz
expr = "a[:1:2] = b[:1+i+2:]"
desired = "a(blitz::Range(_beg,1-1,2))="\
"b(blitz::Range(_beg,1+i+2-1));"
self.generic_check(expr,desired)
def test_fdtd_expr(self):
# Convert fdtd equation to blitz.
# Note: This really should have "\" at the end of each line to
# indicate continuation.
expr = "ex[:,1:,1:] = ca_x[:,1:,1:] * ex[:,1:,1:]" \
"+ cb_y_x[:,1:,1:] * (hz[:,1:,1:] - hz[:,:-1,:])"\
"- cb_z_x[:,1:,1:] * (hy[:,1:,1:] - hy[:,1:,:-1])"
desired = 'ex(_all,blitz::Range(1,_end),blitz::Range(1,_end))='\
' ca_x(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
' *ex(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
'+cb_y_x(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
'*(hz(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
' -hz(_all,blitz::Range(_beg,Nhz(1)-1-1),_all))'\
' -cb_z_x(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
'*(hy(_all,blitz::Range(1,_end),blitz::Range(1,_end))'\
'-hy(_all,blitz::Range(1,_end),blitz::Range(_beg,Nhy(2)-1-1)));'
self.generic_check(expr,desired)
class TestBlitz(TestCase):
"""These are long running tests...
Would be useful to benchmark these things somehow.
"""
def generic_check(self, expr, arg_dict, type, size):
clean_result = array(arg_dict['result'],copy=1)
t1 = time.time()
exec(expr, globals(),arg_dict)
t2 = time.time()
standard = t2 - t1
desired = arg_dict['result']
arg_dict['result'] = clean_result
t1 = time.time()
blitz_tools.blitz(expr,arg_dict,{},verbose=0)
t2 = time.time()
compiled = t2 - t1
actual = arg_dict['result']
# TODO: this isn't very stringent. Need to tighten this up and
# learn where failures are occurring.
assert_allclose(abs(actual.ravel()), abs(desired.ravel()),
rtol=1e-4, atol=1e-6)
return standard, compiled
def generic_2d(self,expr,typ):
# The complex testing is pretty lame...
ast = parser.suite(expr)
arg_list = harvest_variables(ast.tolist())
all_sizes = [(10,10), (50,50), (100,100), (500,500), (1000,1000)]
debug_print('\nExpression:', expr)
with TempdirBlitz():
for size in all_sizes:
arg_dict = {}
for arg in arg_list:
arg_dict[arg] = random.normal(0,1,size).astype(typ)
# set imag part of complex values to non-zero value
try:
arg_dict[arg].imag = arg_dict[arg].real
except:
pass
debug_print('Run:', size,typ)
standard,compiled = self.generic_check(expr,arg_dict,type,size)
try:
speed_up = standard/compiled
except:
speed_up = -1.
debug_print("1st run(numpy,compiled,speed up): %3.4f, %3.4f, "
"%3.4f" % (standard,compiled,speed_up))
standard,compiled = self.generic_check(expr,arg_dict,type,size)
try:
speed_up = standard/compiled
except:
speed_up = -1.
debug_print("2nd run(numpy,compiled,speed up): %3.4f, %3.4f, "
"%3.4f" % (standard,compiled,speed_up))
@dec.slow
def test_5point_avg_2d_float(self):
expr = "result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \
"+ b[1:-1,2:] + b[1:-1,:-2]) / 5."
self.generic_2d(expr,float32)
@dec.slow
def test_5point_avg_2d_double(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=BlitzWarning)
expr = "result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \
"+ b[1:-1,2:] + b[1:-1,:-2]) / 5."
self.generic_2d(expr,float64)
@dec.slow
def _check_5point_avg_2d_complex_float(self):
""" Note: THIS TEST is KNOWN TO FAIL ON GCC 3.x.
It will not adversely affect 99.99 percent of weave
result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]
+ b[1:-1,2:] + b[1:-1,:-2]) / 5.
Note: THIS TEST is KNOWN TO FAIL ON GCC 3.x. The reason is that
5. is a double and b is a complex32. blitz doesn't know
how to handle complex32/double. See:
http://www.oonumerics.org/MailArchives/blitz-support/msg00541.php
Unfortunately, the fix isn't trivial. Instead of fixing it, I
prefer to wait until we replace blitz++ with Pat Miller's code
that doesn't rely on blitz..
"""
expr = "result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \
"+ b[1:-1,2:] + b[1:-1,:-2]) / 5."
self.generic_2d(expr,complex64)
@dec.slow
def test_5point_avg_2d_complex_double(self):
expr = "result[1:-1,1:-1] = (b[1:-1,1:-1] + b[2:,1:-1] + b[:-2,1:-1]" \
"+ b[1:-1,2:] + b[1:-1,:-2]) / 5."
self.generic_2d(expr,complex128)
@dec.slow
def test_blitz_bug():
# Assignment to arr[i:] used to fail inside blitz expressions.
with TempdirBlitz():
N = 4
expr_buggy = 'arr_blitz_buggy[{0}:] = arr[{0}:]'
expr_not_buggy = 'arr_blitz_not_buggy[{0}:{1}] = arr[{0}:]'
random.seed(7)
arr = random.randn(N)
sh = arr.shape[0]
for lim in [0, 1, 2]:
arr_blitz_buggy = zeros(N)
arr_blitz_not_buggy = zeros(N)
arr_np = zeros(N)
blitz(expr_buggy.format(lim))
blitz(expr_not_buggy.format(lim, 'sh'))
arr_np[lim:] = arr[lim:]
assert_allclose(arr_blitz_buggy, arr_np)
assert_allclose(arr_blitz_not_buggy, arr_np)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
bowang/tensorflow | tensorflow/python/debug/cli/analyzer_cli_test.py | 5 | 76835 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Analyzer CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def line_number_above():
return tf_inspect.stack()[1][2] - 1
def parse_op_and_node(line):
"""Parse a line containing an op node followed by a node name.
For example, if the line is
" [Variable] hidden/weights",
this function will return ("Variable", "hidden/weights")
Args:
line: The line to be parsed, as a str.
Returns:
Name of the parsed op type.
Name of the parsed node.
"""
op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "")
# Not using [-1], to tolerate any other items that might be present behind
# the node name.
node_name = line.strip().split(" ")[1]
return op_type, node_name
def assert_column_header_command_shortcut(tst,
command,
reverse,
node_name_regex,
op_type_regex,
tensor_filter_name):
tst.assertFalse(reverse and "-r" in command)
tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command)
tst.assertFalse(
not(node_name_regex) and ("-t %s" % node_name_regex) in command)
tst.assertFalse(
not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command)
def assert_listed_tensors(tst,
out,
expected_tensor_names,
expected_op_types,
node_name_regex=None,
op_type_regex=None,
tensor_filter_name=None,
sort_by="timestamp",
reverse=False):
"""Check RichTextLines output for list_tensors commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
expected_tensor_names: (list of str) Expected tensor names in the list.
expected_op_types: (list of str) Expected op types of the tensors, in the
same order as the expected_tensor_names.
node_name_regex: Optional: node name regex filter.
op_type_regex: Optional: op type regex filter.
tensor_filter_name: Optional: name of the tensor filter.
sort_by: (str) (timestamp | op_type | tensor_name) the field by which the
tensors in the list are sorted.
reverse: (bool) whether the sorting is in reverse (i.e., descending) order.
"""
line_iter = iter(out.lines)
attr_segs = out.font_attr_segs
line_counter = 0
num_tensors = len(expected_tensor_names)
if tensor_filter_name is None:
tst.assertEqual("%d dumped tensor(s):" % num_tensors, next(line_iter))
else:
tst.assertEqual("%d dumped tensor(s) passing filter \"%s\":" %
(num_tensors, tensor_filter_name), next(line_iter))
line_counter += 1
if op_type_regex is not None:
tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex,
next(line_iter))
line_counter += 1
if node_name_regex is not None:
tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex,
next(line_iter))
line_counter += 1
tst.assertEqual("", next(line_iter))
line_counter += 1
# Verify the column heads "t (ms)", "Op type" and "Tensor name" are present.
line = next(line_iter)
tst.assertIn("t (ms)", line)
tst.assertIn("Op type", line)
tst.assertIn("Tensor name", line)
# Verify the command shortcuts in the top row.
attr_segs = out.font_attr_segs[line_counter]
attr_seg = attr_segs[0]
tst.assertEqual(0, attr_seg[0])
tst.assertEqual(len("t (ms)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s timestamp", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Size")
attr_seg = attr_segs[1]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s dump_size", command)
assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,
op_type_regex, tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Op type")
attr_seg = attr_segs[2]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Op type"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s op_type", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Tensor name")
attr_seg = attr_segs[3]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s tensor_name", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
# Verify the listed tensors and their timestamps.
tensor_timestamps = []
dump_sizes_bytes = []
op_types = []
tensor_names = []
for line in line_iter:
items = line.split(" ")
items = [item for item in items if item]
rel_time = float(items[0][1:-1])
tst.assertGreaterEqual(rel_time, 0.0)
tensor_timestamps.append(rel_time)
dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))
op_types.append(items[2])
tensor_names.append(items[3])
# Verify that the tensors should be listed in ascending order of their
# timestamps.
if sort_by == "timestamp":
sorted_timestamps = sorted(tensor_timestamps)
if reverse:
sorted_timestamps.reverse()
tst.assertEqual(sorted_timestamps, tensor_timestamps)
elif sort_by == "dump_size":
sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)
if reverse:
sorted_dump_sizes_bytes.reverse()
tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)
elif sort_by == "op_type":
sorted_op_types = sorted(op_types)
if reverse:
sorted_op_types.reverse()
tst.assertEqual(sorted_op_types, op_types)
elif sort_by == "tensor_name":
sorted_tensor_names = sorted(tensor_names)
if reverse:
sorted_tensor_names.reverse()
tst.assertEqual(sorted_tensor_names, tensor_names)
else:
tst.fail("Invalid value in sort_by: %s" % sort_by)
# Verify that the tensors are all listed.
for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):
tst.assertIn(tensor_name, tensor_names)
index = tensor_names.index(tensor_name)
tst.assertEqual(op_type, op_types[index])
def assert_node_attribute_lines(tst,
out,
node_name,
op_type,
device,
input_op_type_node_name_pairs,
ctrl_input_op_type_node_name_pairs,
recipient_op_type_node_name_pairs,
ctrl_recipient_op_type_node_name_pairs,
attr_key_val_pairs=None,
num_dumped_tensors=None,
show_stack_trace=False,
stack_trace_available=False):
"""Check RichTextLines output for node_info commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
node_name: Name of the node.
op_type: Op type of the node, as a str.
device: Name of the device on which the node resides.
input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,
for the (non-control) inputs to the node.
ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the control inputs to the node.
recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the (non-control) output recipients to the node.
ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and
node name, for the control output recipients to the node.
attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a
list of 2-tuples.
num_dumped_tensors: Optional: number of tensor dumps from the node.
show_stack_trace: (bool) whether the stack trace of the node's
construction is asserted to be present.
stack_trace_available: (bool) whether Python stack trace is available.
"""
line_iter = iter(out.lines)
tst.assertEqual("Node %s" % node_name, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" Op: %s" % op_type, next(line_iter))
tst.assertEqual(" Device: %s" % device, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d input(s) + %d control input(s):" %
(len(input_op_type_node_name_pairs),
len(ctrl_input_op_type_node_name_pairs)), next(line_iter))
# Check inputs.
tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs),
next(line_iter))
for op_type, node_name in input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
# Check control inputs.
if ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" %d control input(s):" %
len(ctrl_input_op_type_node_name_pairs), next(line_iter))
for op_type, node_name in ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d recipient(s) + %d control recipient(s):" %
(len(recipient_op_type_node_name_pairs),
len(ctrl_recipient_op_type_node_name_pairs)),
next(line_iter))
# Check recipients, the order of which is not deterministic.
tst.assertEqual(" %d recipient(s):" %
len(recipient_op_type_node_name_pairs), next(line_iter))
t_recs = []
for _ in recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_recs.append((op_type, node_name))
tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)
# Check control recipients, the order of which is not deterministic.
if ctrl_recipient_op_type_node_name_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d control recipient(s):" %
len(ctrl_recipient_op_type_node_name_pairs),
next(line_iter))
t_ctrl_recs = []
for _ in ctrl_recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_ctrl_recs.append((op_type, node_name))
tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)
# The order of multiple attributes can be non-deterministic.
if attr_key_val_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual("Node attributes:", next(line_iter))
kv_pairs = []
for key, val in attr_key_val_pairs:
key = next(line_iter).strip().replace(":", "")
val = next(line_iter).strip()
kv_pairs.append((key, val))
tst.assertEqual("", next(line_iter))
tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)
if num_dumped_tensors is not None:
tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors,
next(line_iter))
tst.assertEqual("", next(line_iter))
dump_timestamps_ms = []
for _ in xrange(num_dumped_tensors):
line = next(line_iter)
tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @")
tst.assertTrue(line.strip().endswith(" ms"))
dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", ""))
tst.assertGreaterEqual(dump_timestamp_ms, 0.0)
dump_timestamps_ms.append(dump_timestamp_ms)
tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)
if show_stack_trace:
tst.assertEqual("", next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual("Traceback of node construction:", next(line_iter))
if stack_trace_available:
try:
depth_counter = 0
while True:
for i in range(5):
line = next(line_iter)
if i == 0:
tst.assertEqual(depth_counter, int(line.split(":")[0]))
elif i == 1:
tst.assertStartsWith(line, " Line:")
elif i == 2:
tst.assertStartsWith(line, " Function:")
elif i == 3:
tst.assertStartsWith(line, " Text:")
elif i == 4:
tst.assertEqual("", line)
depth_counter += 1
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
def create_analyzer_cli(dump):
"""Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
"""
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(dump)
# Construct the handler registry.
registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
registry.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
registry.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
registry.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
registry.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
registry.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
return analyzer, registry
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variables.Variable(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variables.Variable(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variables.Variable([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
shutil.rmtree(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], ["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command("list_tensors",
["-t", "(Add|MatMul)"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", "MatMul"],
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, "MatMul",
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=[("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[("MatMul", "simple_mul_add/matmul")], [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]", "-s"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"Numeric summary:",
"| - + | total |",
"| 1 1 | 2 |",
"| min max mean std |",
"| -2.0 7.0 2.5 4.5 |",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(10, out.annotations)
self.assertIn(11, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[11])
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testEvalExpression(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"eval", ["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name)],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
"",
"Numeric summary:",
"| - + | total |",
"| 2 2 | 4 |",
"| min max mean std |",
"| -14.0 49.0 6.25 25.7524270701 |",
"",
"array([[ 49., -14.],",
" [-14., 4.]])"], out.lines)
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError,
"Input argument filter_name is expected to be str, ""but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegexp(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
def _findSourceLine(self, annotated_source, line_number):
"""Find line of given line number in annotated source.
Args:
annotated_source: (debugger_cli_common.RichTextLines) the annotated source
line_number: (int) 1-based line number
Returns:
(int) If line_number is found, 0-based line index in
annotated_source.lines. Otherwise, None.
"""
index = None
for i, line in enumerate(annotated_source.lines):
if line.startswith("L%d " % line_number):
index = i
break
return index
def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source", [self._curr_file_path], screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
# Verify the annotation of the line that creates v.
index = self._findSourceLine(out, self._v_line_number)
self.assertEqual(
["L%d v = variables.Variable(v_init, name=v_name)" %
self._v_line_number,
" simple_mul_add/v"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/v",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates w.
index = self._findSourceLine(out, self._w_line_number)
self.assertEqual(
["L%d " % self._w_line_number +
"w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")",
" simple_mul_add/matmul"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/matmul",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates x.
index = self._findSourceLine(out, self._x_line_number)
self.assertEqual(
["L%d " % self._x_line_number +
"x = math_ops.add(w, w, name=\"simple_mul_add/add\")",
" simple_mul_add/add"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content)
def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "--tensors"],
screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u/read:0",
" simple_mul_add/u:0"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u/read:0",
out.font_attr_segs[index + 1][0][2].content)
self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content)
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-b", "3"],
screen_info={"cols": 80})
self.assertEqual(
2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-m", "1"],
screen_info={"cols": 80})
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.Variable(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" (... Omitted 2 of 3 op(s) ...) +5"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
more_elements_command = out.font_attr_segs[index + 2][-1][2].content
self.assertStartsWith(more_elements_command,
"ps %s " % self._curr_file_path)
self.assertIn(" -m 6", more_elements_command)
def testListSourceWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", [])
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"")
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", "^$"])
self.assertEqual([
"List of source files that created nodes in this run",
"Node name regex filter: \"^$\"", "",
"[No source file information.]"], out.lines)
def testListSourceWithPathAndNodeNameFiltersWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"list_source", ["-p", self._curr_file_path, "-n", ".*read"])
self.assertEqual([
"List of source files that created nodes in this run",
"File path regex filter: \"%s\"" % self._curr_file_path,
"Node name regex filter: \".*read\"", ""], out.lines[:4])
def testListSourceWithCompiledPythonSourceWorks(self):
def fake_list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
del dump, path_regex_whitelist, node_name_regex_whitelist
return [("compiled_1.pyc", False, 10, 20, 30, 4),
("compiled_2.pyo", False, 10, 20, 30, 5),
("uncompiled.py", False, 10, 20, 30, 6)]
with test.mock.patch.object(
source_utils, "list_source_files_against_dump",
side_effect=fake_list_source_files_against_dump):
out = self._registry.dispatch_command("list_source", [])
self.assertStartsWith(out.lines[4], "compiled_1.pyc")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[4][0])
self.assertStartsWith(out.lines[5], "compiled_2.pyo")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[5][0])
self.assertStartsWith(out.lines[6], "uncompiled.py")
self.assertEqual(0, out.font_attr_segs[6][0][0])
self.assertEqual(13, out.font_attr_segs[6][0][1])
self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])
self.assertEqual("ps uncompiled.py -b 6",
out.font_attr_segs[6][0][2][1].content)
def testListInputInvolvingNodesWithMultipleOutputs(self):
"""List an input tree containing tensors from non-:0 output slot."""
with session.Session(config=no_rewrite_session_config()) as sess:
x = variables.Variable([1, 3, 3, 7], name="x")
_, idx = array_ops.unique(x, name="x_unique")
idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two")
sess.run(x.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root_for_unique)
run_metadata = config_pb2.RunMetadata()
self.assertAllEqual(
[0, 2, 2, 4],
sess.run(idx_times_two,
options=run_options,
run_metadata=run_metadata))
debug_dump = debug_data.DebugDumpDir(
self._dump_root_for_unique,
partition_graphs=run_metadata.partition_graphs)
_, registry = create_analyzer_cli(debug_dump)
out = registry.dispatch_command("li", ["idx_times_two"])
self.assertEqual(
["Inputs to node \"idx_times_two\" (Depth limit = 1):",
"|- (1) x_unique:1"], out.lines[:2])
class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
# 2400 elements should exceed the default threshold (2000).
x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command registry.
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testPrintLargeTensorWithoutAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80})
# Assert that ellipses are present in the tensor value printout.
self.assertIn("...,", out.lines[4])
# 2100 still exceeds 2000.
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]"],
screen_info={"cols": 80})
self.assertIn("...,", out.lines[4])
def testPrintLargeTensorWithAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0", "-a"],
screen_info={"cols": 80})
# Assert that ellipses are not present in the tensor value printout.
self.assertNotIn("...,", out.lines[4])
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"],
screen_info={"cols": 80})
self.assertNotIn("...,", out.lines[4])
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
with session.Session(config=no_rewrite_session_config()) as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.Variable(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command handler registry.
_, cls._registry = create_analyzer_cli(debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(
self, out, "control_deps/ctrl_dep_y", "Identity",
self._main_device, [("Add", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d -t control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]),
"ni -a -d -t control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [Add] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
cond = lambda loop_var: math_ops.less(loop_var, 10)
body = lambda loop_var: math_ops.add(loop_var, 1)
while_loop = control_flow_ops.while_loop(
cond, body, [loop_var], parallel_iterations=1)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_url = "file://%s" % cls._dump_root
watch_opts = run_options.debug_options.debug_tensor_watch_opts
# Add debug tensor watch for "while/Identity".
watch = watch_opts.add()
watch.node_name = "while/Identity"
watch.output_slot = 0
watch.debug_ops.append("DebugIdentity")
watch.debug_urls.append(debug_url)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(while_loop, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"])
self.assertEqual("Tensor \"while/Identity:0\" generated 10 dumps:",
output.lines[0])
for i in xrange(10):
self.assertTrue(output.lines[i + 1].startswith("#%d" % i))
self.assertTrue(output.lines[i + 1].endswith(
" ms] while/Identity:0:DebugIdentity"))
self.assertEqual(
"You can use the -n (--number) flag to specify which dump to print.",
output.lines[-3])
self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5):
output = self._registry.dispatch_command(
"pt", ["while/Identity:0", "-n", "%d" % i])
self.assertEqual("Tensor \"while/Identity:0:DebugIdentity (dump #%d)\":" %
i, output.lines[0])
self.assertEqual(" dtype: int32", output.lines[1])
self.assertEqual(" shape: ()", output.lines[2])
self.assertEqual("", output.lines[3])
self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")"))
def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"])
self.assertEqual([
"ERROR: Specified number (10) exceeds the number of available dumps "
"(10) for tensor while/Identity:0"
], output.lines)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
soldag/home-assistant | tests/components/vera/common.py | 7 | 5752 | """Common code for tests."""
from enum import Enum
from typing import Callable, Dict, NamedTuple, Tuple
import pyvera as pv
from homeassistant import config_entries
from homeassistant.components.vera.const import (
CONF_CONTROLLER,
CONF_LEGACY_UNIQUE_ID,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock
from tests.common import MockConfigEntry
SetupCallback = Callable[[pv.VeraController, dict], None]
class ControllerData(NamedTuple):
"""Test data about a specific vera controller."""
controller: pv.VeraController
update_callback: Callable
class ComponentData(NamedTuple):
"""Test data about the vera component."""
controller_data: Tuple[ControllerData]
class ConfigSource(Enum):
"""Source of configuration."""
FILE = "file"
CONFIG_FLOW = "config_flow"
CONFIG_ENTRY = "config_entry"
class ControllerConfig(NamedTuple):
"""Test config for mocking a vera controller."""
config: Dict
options: Dict
config_source: ConfigSource
serial_number: str
devices: Tuple[pv.VeraDevice, ...]
scenes: Tuple[pv.VeraScene, ...]
setup_callback: SetupCallback
legacy_entity_unique_id: bool
def new_simple_controller_config(
config: dict = None,
options: dict = None,
config_source=ConfigSource.CONFIG_FLOW,
serial_number="1111",
devices: Tuple[pv.VeraDevice, ...] = (),
scenes: Tuple[pv.VeraScene, ...] = (),
setup_callback: SetupCallback = None,
legacy_entity_unique_id=False,
) -> ControllerConfig:
"""Create simple contorller config."""
return ControllerConfig(
config=config or {CONF_CONTROLLER: "http://127.0.0.1:123"},
options=options,
config_source=config_source,
serial_number=serial_number,
devices=devices,
scenes=scenes,
setup_callback=setup_callback,
legacy_entity_unique_id=legacy_entity_unique_id,
)
class ComponentFactory:
"""Factory class."""
def __init__(self, vera_controller_class_mock):
"""Initialize the factory."""
self.vera_controller_class_mock = vera_controller_class_mock
async def configure_component(
self,
hass: HomeAssistant,
controller_config: ControllerConfig = None,
controller_configs: Tuple[ControllerConfig] = (),
) -> ComponentData:
"""Configure the component with multiple specific mock data."""
configs = list(controller_configs)
if controller_config:
configs.append(controller_config)
return ComponentData(
controller_data=tuple(
[
await self._configure_component(hass, controller_config)
for controller_config in configs
]
)
)
async def _configure_component(
self, hass: HomeAssistant, controller_config: ControllerConfig
) -> ControllerData:
"""Configure the component with specific mock data."""
component_config = {
**(controller_config.config or {}),
**(controller_config.options or {}),
}
if controller_config.legacy_entity_unique_id:
component_config[CONF_LEGACY_UNIQUE_ID] = True
controller = MagicMock(spec=pv.VeraController) # type: pv.VeraController
controller.base_url = component_config.get(CONF_CONTROLLER)
controller.register = MagicMock()
controller.start = MagicMock()
controller.stop = MagicMock()
controller.refresh_data = MagicMock()
controller.temperature_units = "C"
controller.serial_number = controller_config.serial_number
controller.get_devices = MagicMock(return_value=controller_config.devices)
controller.get_scenes = MagicMock(return_value=controller_config.scenes)
for vera_obj in controller.get_devices() + controller.get_scenes():
vera_obj.vera_controller = controller
controller.get_devices.reset_mock()
controller.get_scenes.reset_mock()
if controller_config.setup_callback:
controller_config.setup_callback(controller)
self.vera_controller_class_mock.return_value = controller
hass_config = {}
# Setup component through config file import.
if controller_config.config_source == ConfigSource.FILE:
hass_config[DOMAIN] = component_config
# Setup Home Assistant.
assert await async_setup_component(hass, DOMAIN, hass_config)
await hass.async_block_till_done()
# Setup component through config flow.
if controller_config.config_source == ConfigSource.CONFIG_FLOW:
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=component_config,
)
await hass.async_block_till_done()
# Setup component directly from config entry.
if controller_config.config_source == ConfigSource.CONFIG_ENTRY:
entry = MockConfigEntry(
domain=DOMAIN,
data=controller_config.config,
options=controller_config.options,
unique_id="12345",
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
update_callback = (
controller.register.call_args_list[0][0][1]
if controller.register.call_args_list
else None
)
return ControllerData(controller=controller, update_callback=update_callback)
| apache-2.0 |
mikedingjan/wagtail | wagtail/images/views/images.py | 1 | 10523 | import os
from django.core.paginator import Paginator
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.admin import messages
from wagtail.admin.auth import PermissionPolicyChecker, permission_denied
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.models import popular_tags_for_model
from wagtail.core.models import Collection, Site
from wagtail.images import get_image_model
from wagtail.images.exceptions import InvalidFilterSpecError
from wagtail.images.forms import URLGeneratorForm, get_image_form
from wagtail.images.models import Filter, SourceImageIOError
from wagtail.images.permissions import permission_policy
from wagtail.images.views.serve import generate_signature
from wagtail.search import index as search_index
permission_checker = PermissionPolicyChecker(permission_policy)
@permission_checker.require_any('add', 'change', 'delete')
@vary_on_headers('X-Requested-With')
def index(request):
Image = get_image_model()
# Get images (filtered by user permission)
images = permission_policy.instances_user_has_any_permission_for(
request.user, ['change', 'delete']
).order_by('-created_at')
# Search
query_string = None
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search images"))
if form.is_valid():
query_string = form.cleaned_data['q']
images = images.search(query_string)
else:
form = SearchForm(placeholder=_("Search images"))
# Filter by collection
current_collection = None
collection_id = request.GET.get('collection_id')
if collection_id:
try:
current_collection = Collection.objects.get(id=collection_id)
images = images.filter(collection=current_collection)
except (ValueError, Collection.DoesNotExist):
pass
paginator = Paginator(images, per_page=20)
images = paginator.get_page(request.GET.get('p'))
collections = permission_policy.collections_user_has_any_permission_for(
request.user, ['add', 'change']
)
if len(collections) < 2:
collections = None
else:
collections = Collection.order_for_display(collections)
# Create response
if request.is_ajax():
return render(request, 'wagtailimages/images/results.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
})
else:
return render(request, 'wagtailimages/images/index.html', {
'images': images,
'query_string': query_string,
'is_searching': bool(query_string),
'search_form': form,
'popular_tags': popular_tags_for_model(Image),
'collections': collections,
'current_collection': current_collection,
'user_can_add': permission_policy.user_has_permission(request.user, 'add'),
})
@permission_checker.require('change')
def edit(request, image_id):
Image = get_image_model()
ImageForm = get_image_form(Image)
image = get_object_or_404(Image, id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return permission_denied(request)
if request.method == 'POST':
original_file = image.file
form = ImageForm(request.POST, request.FILES, instance=image, user=request.user)
if form.is_valid():
if 'file' in form.changed_data:
# Set new image file size
image.file_size = image.file.size
# Set new image file hash
image.file.seek(0)
image._set_file_hash(image.file.read())
image.file.seek(0)
form.save()
if 'file' in form.changed_data:
# if providing a new image file, delete the old one and all renditions.
# NB Doing this via original_file.delete() clears the file field,
# which definitely isn't what we want...
original_file.storage.delete(original_file.name)
image.renditions.all().delete()
# Reindex the image to make sure all tags are indexed
search_index.insert_or_update_object(image)
messages.success(request, _("Image '{0}' updated.").format(image.title), buttons=[
messages.button(reverse('wagtailimages:edit', args=(image.id,)), _('Edit again'))
])
return redirect('wagtailimages:index')
else:
messages.error(request, _("The image could not be saved due to errors."))
else:
form = ImageForm(instance=image, user=request.user)
# Check if we should enable the frontend url generator
try:
reverse('wagtailimages_serve', args=('foo', '1', 'bar'))
url_generator_enabled = True
except NoReverseMatch:
url_generator_enabled = False
if image.is_stored_locally():
# Give error if image file doesn't exist
if not os.path.isfile(image.file.path):
messages.error(request, _(
"The source image file could not be found. Please change the source or delete the image."
).format(image.title), buttons=[
messages.button(reverse('wagtailimages:delete', args=(image.id,)), _('Delete'))
])
try:
filesize = image.get_file_size()
except SourceImageIOError:
filesize = None
return render(request, "wagtailimages/images/edit.html", {
'image': image,
'form': form,
'url_generator_enabled': url_generator_enabled,
'filesize': filesize,
'user_can_delete': permission_policy.user_has_permission_for_instance(
request.user, 'delete', image
),
})
def url_generator(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return permission_denied(request)
form = URLGeneratorForm(initial={
'filter_method': 'original',
'width': image.width,
'height': image.height,
})
return render(request, "wagtailimages/images/url_generator.html", {
'image': image,
'form': form,
})
def generate_url(request, image_id, filter_spec):
# Get the image
Image = get_image_model()
try:
image = Image.objects.get(id=image_id)
except Image.DoesNotExist:
return JsonResponse({
'error': "Cannot find image."
}, status=404)
# Check if this user has edit permission on this image
if not permission_policy.user_has_permission_for_instance(request.user, 'change', image):
return JsonResponse({
'error': "You do not have permission to generate a URL for this image."
}, status=403)
# Parse the filter spec to make sure its valid
try:
Filter(spec=filter_spec).operations
except InvalidFilterSpecError:
return JsonResponse({
'error': "Invalid filter spec."
}, status=400)
# Generate url
signature = generate_signature(image_id, filter_spec)
url = reverse('wagtailimages_serve', args=(signature, image_id, filter_spec))
# Get site root url
try:
site_root_url = Site.objects.get(is_default_site=True).root_url
except Site.DoesNotExist:
site_root_url = Site.objects.first().root_url
# Generate preview url
preview_url = reverse('wagtailimages:preview', args=(image_id, filter_spec))
return JsonResponse({'url': site_root_url + url, 'preview_url': preview_url}, status=200)
def preview(request, image_id, filter_spec):
image = get_object_or_404(get_image_model(), id=image_id)
try:
response = HttpResponse()
image = Filter(spec=filter_spec).run(image, response)
response['Content-Type'] = 'image/' + image.format_name
return response
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
@permission_checker.require('delete')
def delete(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
if not permission_policy.user_has_permission_for_instance(request.user, 'delete', image):
return permission_denied(request)
if request.method == 'POST':
image.delete()
messages.success(request, _("Image '{0}' deleted.").format(image.title))
return redirect('wagtailimages:index')
return render(request, "wagtailimages/images/confirm_delete.html", {
'image': image,
})
@permission_checker.require('add')
def add(request):
ImageModel = get_image_model()
ImageForm = get_image_form(ImageModel)
if request.method == 'POST':
image = ImageModel(uploaded_by_user=request.user)
form = ImageForm(request.POST, request.FILES, instance=image, user=request.user)
if form.is_valid():
# Set image file size
image.file_size = image.file.size
# Set image file hash
image.file.seek(0)
image._set_file_hash(image.file.read())
image.file.seek(0)
form.save()
# Reindex the image to make sure all tags are indexed
search_index.insert_or_update_object(image)
messages.success(request, _("Image '{0}' added.").format(image.title), buttons=[
messages.button(reverse('wagtailimages:edit', args=(image.id,)), _('Edit'))
])
return redirect('wagtailimages:index')
else:
messages.error(request, _("The image could not be created due to errors."))
else:
form = ImageForm(user=request.user)
return render(request, "wagtailimages/images/add.html", {
'form': form,
})
def usage(request, image_id):
image = get_object_or_404(get_image_model(), id=image_id)
paginator = Paginator(image.get_usage(), per_page=20)
used_by = paginator.get_page(request.GET.get('p'))
return render(request, "wagtailimages/images/usage.html", {
'image': image,
'used_by': used_by
})
| bsd-3-clause |
hopeall/odoo | addons/subscription/subscription.py | 337 | 8906 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO:
# Error treatment: exception, request, ... -> send request to user_id
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
class subscription_document(osv.osv):
_name = "subscription.document"
_description = "Subscription Document"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription document without removing it."),
'model': fields.many2one('ir.model', 'Object', required=True),
'field_ids': fields.one2many('subscription.document.fields', 'document_id', 'Fields', copy=True)
}
_defaults = {
'active' : lambda *a: True,
}
class subscription_document_fields(osv.osv):
_name = "subscription.document.fields"
_description = "Subscription Document Fields"
_rec_name = 'field'
_columns = {
'field': fields.many2one('ir.model.fields', 'Field', domain="[('model_id', '=', parent.model)]", required=True),
'value': fields.selection([('false','False'),('date','Current Date')], 'Default Value', size=40, help="Default value is considered for field when new document is generated."),
'document_id': fields.many2one('subscription.document', 'Subscription Document', ondelete='cascade'),
}
_defaults = {}
def _get_document_types(self, cr, uid, context=None):
cr.execute('select m.model, s.name from subscription_document s, ir_model m WHERE s.model = m.id order by s.name')
return cr.fetchall()
class subscription_subscription(osv.osv):
_name = "subscription.subscription"
_description = "Subscription"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription without removing it."),
'partner_id': fields.many2one('res.partner', 'Partner'),
'notes': fields.text('Internal Notes'),
'user_id': fields.many2one('res.users', 'User', required=True),
'interval_number': fields.integer('Interval Qty'),
'interval_type': fields.selection([('days', 'Days'), ('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'exec_init': fields.integer('Number of documents'),
'date_init': fields.datetime('First Date'),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', copy=False),
'doc_source': fields.reference('Source Document', required=True, selection=_get_document_types, size=128, help="User can choose the source document on which he wants to create documents"),
'doc_lines': fields.one2many('subscription.subscription.history', 'subscription_id', 'Documents created', readonly=True),
'cron_id': fields.many2one('ir.cron', 'Cron Job', help="Scheduler which runs on subscription", states={'running':[('readonly',True)], 'done':[('readonly',True)]}),
'note': fields.text('Notes', help="Description or Summary of Subscription"),
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'user_id': lambda obj,cr,uid,context: uid,
'active': lambda *a: True,
'interval_number': lambda *a: 1,
'interval_type': lambda *a: 'months',
'doc_source': lambda *a: False,
'state': lambda *a: 'draft'
}
def _auto_end(self, cr, context=None):
super(subscription_subscription, self)._auto_end(cr, context=context)
# drop the FK from subscription to ir.cron, as it would cause deadlocks
# during cron job execution. When model_copy() tries to write() on the subscription,
# it has to wait for an ExclusiveLock on the cron job record, but the latter
# is locked by the cron system for the duration of the job!
# FIXME: the subscription module should be reviewed to simplify the scheduling process
# and to use a unique cron job for all subscriptions, so that it never needs to
# be updated during its execution.
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (self._table, '%s_cron_id_fkey' % self._table))
def set_process(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
mapping = {'name':'name','interval_number':'interval_number','interval_type':'interval_type','exec_init':'numbercall','date_init':'nextcall'}
res = {'model':'subscription.subscription', 'args': repr([[row['id']]]), 'function':'model_copy', 'priority':6, 'user_id':row['user_id'] and row['user_id'][0]}
for key,value in mapping.items():
res[value] = row[key]
id = self.pool.get('ir.cron').create(cr, uid, res)
self.write(cr, uid, [row['id']], {'cron_id':id, 'state':'running'})
return True
def model_copy(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
if not row.get('cron_id',False):
continue
cron_ids = [row['cron_id'][0]]
remaining = self.pool.get('ir.cron').read(cr, uid, cron_ids, ['numbercall'])[0]['numbercall']
try:
(model_name, id) = row['doc_source'].split(',')
id = int(id)
model = self.pool[model_name]
except:
raise osv.except_osv(_('Wrong Source Document!'), _('Please provide another source document.\nThis one does not exist!'))
default = {'state':'draft'}
doc_obj = self.pool.get('subscription.document')
document_ids = doc_obj.search(cr, uid, [('model.model','=',model_name)])
doc = doc_obj.browse(cr, uid, document_ids)[0]
for f in doc.field_ids:
if f.value=='date':
value = time.strftime('%Y-%m-%d')
else:
value = False
default[f.field.name] = value
state = 'running'
# if there was only one remaining document to generate
# the subscription is over and we mark it as being done
if remaining == 1:
state = 'done'
id = self.pool[model_name].copy(cr, uid, id, default, context)
self.pool.get('subscription.subscription.history').create(cr, uid, {'subscription_id': row['id'], 'date':time.strftime('%Y-%m-%d %H:%M:%S'), 'document_id': model_name+','+str(id)})
self.write(cr, uid, [row['id']], {'state':state})
return True
def unlink(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context or {}):
if record.state=="running":
raise osv.except_osv(_('Error!'),_('You cannot delete an active subscription!'))
return super(subscription_subscription, self).unlink(cr, uid, ids, context)
def set_done(self, cr, uid, ids, context=None):
res = self.read(cr,uid, ids, ['cron_id'])
ids2 = [x['cron_id'][0] for x in res if x['id']]
self.pool.get('ir.cron').write(cr, uid, ids2, {'active':False})
self.write(cr, uid, ids, {'state':'done'})
return True
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return True
class subscription_subscription_history(osv.osv):
_name = "subscription.subscription.history"
_description = "Subscription history"
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date'),
'subscription_id': fields.many2one('subscription.subscription', 'Subscription', ondelete='cascade'),
'document_id': fields.reference('Source Document', required=True, selection=_get_document_types, size=128),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sgraham/nope | third_party/closure_linter/closure_linter/requireprovidesorter.py | 84 | 11383 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
The first provide token in the token stream.
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return provide_tokens[0]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
The first require token in the token stream.
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return require_tokens[0]
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token and i is not None:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Save token to rest of file. Sorted token will be inserted before this.
rest_of_file = tokens_map[strings[-1]][-1].next
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
if rest_of_file:
tokenutil.InsertTokenBefore(i, rest_of_file)
else:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in [
'goog.provide', 'goog.require', 'goog.setTestOnly']:
# These 3 identifiers are at the top of the file. So if any other
# identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
if not token.is_deleted:
name = tokenutil.GetStringAfterToken(token)
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.GetStringAfterToken(token)
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while (previous_first_token and
previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
def GetFixedRequireString(self, token):
"""Get fixed/sorted order of goog.require statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.require.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def GetFixedProvideString(self, token):
"""Get fixed/sorted order of goog.provide statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.provide.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def _GetFixedRequireOrProvideString(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
Returns:
A string for sorted goog.require or goog.provide statements
"""
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
sorted_strings = sorted(tokens_map.keys())
new_order = ''
for string in sorted_strings:
for i in tokens_map[string]:
new_order += i.string
if i.IsLastInLine():
new_order += '\n'
return new_order
| bsd-3-clause |
mvais/hockeyscripts | HockeyNameGenerator/download_data.py | 1 | 2491 | '''
download_data.py
-------------------
This module is used to download the data necessary to generate random
hockey players.
'''
from urllib.request import urlopen
from collections import defaultdict
import json
import codecs
import os
URL = 'http://api.eliteprospects.com:80/beta/players'
def download_players():
'''
Downloads player information data from eliteprospect.
'''
number_of_players = get_data(URL)['metadata']['totalCount']
names, countries = {}, defaultdict(int)
for offset in range(0, number_of_players, 1000):
players = get_data(URL + "?limit=1000" + "&offset=" + str(offset))
for num, player in enumerate(players['data']):
country = get_country(player)
if not country:
continue
if country not in names:
names[country] = dict([("fname", defaultdict(int)),
("lname", defaultdict(int))])
names[country]["fname"][player["firstName"]] += 1
names[country]["lname"][player["lastName"]] += 1
countries[country] += 1
save_dictionary("player_names.json", names)
save_dictionary("countries.json", countries)
def download_draft():
'''
This is used to download information related to NHL Drafts. This will fetch
past NHL drafts and information of the players drafted.
'''
pass
def get_data(url):
'''
Makes the request to the site to fetch the data.
'''
try:
reader = codecs.getreader("utf-8")
http = urlopen(url)
return json.load(reader(http))
except:
print("Error fetching data.")
def get_country(player):
'''
Given a player JSON Object fetches the player home country.
'''
if "country" in player:
return player["country"]["name"]
if "birthPlace" in player:
if "country" in player["birthPlace"]:
return player["birthPlace"]["country"]["name"]
return player["birthPlace"]["parentLocality"]["country"]["name"]
return None
def save_dictionary(filename, dictionary):
'''
Converts a python dictionary into a json string and saves it with the given
filename.
'''
if not os.path.isdir("data"):
os.mkdir("data")
json_data = json.dumps(dictionary)
with open(os.path.join('data', filename), 'w') as output:
output.write(json_data)
if __name__ == '__main__':
fn
download_players()
| mit |
pacoqueen/ginn | ginn/formularios/resultados_espesor.py | 1 | 13402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## resultados_longitudinal.py - Resistencia alargamiento longitudinal
###################################################################
## NOTAS:
##
###################################################################
## Changelog:
## 26 de abril de 2006 -> Inicio
##
###################################################################
## PLAN: No estaría mal mostrar valores estadísticos como la media
## y la desviación típica de las pruebas.
###################################################################
from ventana import Ventana
from formularios import utils
import pygtk
pygtk.require('2.0')
import gtk, time
from framework import pclases
from utils import _float as float
# XXX
# Aprovechando que todas las pruebas sobre rollos son similares, modificando
# estas variables globales se tienen las 5 ventanas.
# ¿Por qué no hacer un único .py y cambiar estas variables en función de la
# prueba que se quiera insertar? Muy simple, porque CWT. En un futuro puede
# cambiar el tipo de datos de un resultado o algo así y habría que crear una
# ventana nueva de cero.
puntoglade = 'resultados_espesor.glade' # Archivo glade
claseprueba = pclases.PruebaEspesor # Clase de pclases.
nombrecampo = 'pruebasEspesor' # Campo de "partida".
nombreprueba = 'espesor' # Nombre de la prueba (media de resultados) en la partida.
titulo = 'Resultados de espesor bajo 2Kp'
# XXX
class ResultadosEspesor(Ventana):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
Ventana.__init__(self, puntoglade, objeto, usuario = usuario)
connections = {'b_salir/clicked': self.salir,
'b_lote/clicked': self.set_partida,
'b_fecha/clicked': self.fecha,
'b_add/clicked': self.add,
'b_drop/clicked': self.drop
}
self.add_connections(connections)
self.activar_widgets(False)
self.inicializar_ventana()
self.partida = None
gtk.main()
# --------------- Funciones auxiliares ------------------------------
def activar_widgets(self, valor):
self.ws = ('e_numpartida',
'e_nombre',
'e_longitudinal',
'e_transversal',
'e_compresion',
'e_perforacion',
'e_permeabilidad',
'e_fecha',
# XXX
'e_poros',
'e_espesor',
'tv_lotes',
# XXX
'e_resultado',
'tv_pruebas',
'b_add',
'b_drop',
'b_fecha')
for i in self.ws:
self.wids[i].set_sensitive(valor)
def crear_listview(self, tv):
cols = (('Fecha', 'gobject.TYPE_STRING', True, True, True, self.cambiar_fecha),
('Resultado', 'gobject.TYPE_FLOAT', True, True, False, self.cambiar_resultado),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(tv, cols)
tv.get_column(1).get_cell_renderers()[0].set_property('xalign', 0.1)
def inicializar_ventana(self):
"""
Inicializa los widgets de la ventana.
"""
self.wids['ventana'].set_title(titulo)
self.crear_listview(self.wids['tv_pruebas'])
# XXX
self.crear_treeview(self.wids['tv_lotes'])
def crear_treeview(self, tv):
cols = (('Lote y materia prima consumida', 'gobject.TYPE_STRING', False, True, True, None),
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_treeview(tv, cols)
def rellenar_lotes(self):
model = self.wids['tv_lotes'].get_model()
model.clear()
partida = self.partida
lotes = []
prods = []
for b in partida.balas:
if b.lote not in lotes:
lotes.append(b.lote)
itr = model.append(None, (b.lote.numlote, b.lote.id))
for a in b.articulos:
for c in a.parteDeProduccion.consumos:
if c.productoCompra not in prods:
prods.append(c.productoCompra)
model.append(itr, (c.productoCompra.descripcion, c.id))
# XXX
def rellenar_pruebas(self):
"""
Introduce en el treeview las pruebas del partida seleccionado y
recalcula la característica del partida.
"""
model = self.wids['tv_pruebas'].get_model()
model.clear()
self.calcular_caracteristicas()
pruebas = claseprueba.select(claseprueba.q.partidaID == self.partida.id)
for prueba in pruebas:
model.append((utils.str_fecha(prueba.fecha), prueba.resultado, prueba.id))
def calcular_caracteristicas(self):
"""
Calcula la media de los valores de las pruebas y actualiza la partida.
"""
partida = self.partida
media = 0.0
for p in getattr(partida, nombrecampo):
media += p.resultado
try:
media /= len(getattr(partida, nombrecampo))
except ZeroDivisionError:
media = 0
eval("partida.set(%s = %f)" % (nombreprueba, media))
self.rellenar_info_partida()
def actualizar_ventana(self):
"""
Método que sobreescribe el "actualizar_ventana" que hereda de la clase ventana.
PRECONDICION: self.partida no puede ser None
"""
try:
self.partida.sync()
self.rellenar_widgets()
except pclases.SQLObjectNotFound:
utils.dialogo_info(titulo = 'REGISTRO ELIMINADO', texto = 'El registro ha sido borrado desde otro puesto.')
self.partida = None
self.activar_widgets(self.partida!=None)
# --------------- Manejadores de eventos ----------------------------
def add(self, w):
if self.partida != None:
fecha = self.wids['e_fecha'].get_text()
if fecha == '':
utils.dialogo_info(titulo = 'SIN FECHA',
texto = 'Debe introducir la fecha del resultado de la prueba.')
return
resultado = self.wids['e_resultado'].get_text()
if resultado == '':
utils.dialogo_info(titulo = 'SIN RESULTADO',
texto = 'Debe introducir el resultado de la prueba.')
return
try:
prueba = claseprueba(fecha = utils.parse_fecha(fecha), # @UnusedVariable
resultado = resultado,
partida = self.partida)
except:
utils.dialogo_info(titulo = 'ERROR',
texto = 'Verifique que ha introducido los datos correctamente.')
self.wids['e_fecha'].set_text(utils.str_fecha(time.localtime()))
self.wids['e_resultado'].set_text('')
self.rellenar_pruebas()
else:
print "WARNING: Se ha intentano añadir una prueba con partida = None"
def drop(self, w):
model, itr = self.wids['tv_pruebas'].get_selection().get_selected()
if itr != None and utils.dialogo(titulo = 'BORRAR PRUEBA', texto = '¿Está seguro?'):
ide = model[itr][-1]
prueba = claseprueba.get(ide)
prueba.destroy(ventana = __file__)
self.rellenar_pruebas()
def set_partida(self, w):
numpartida = utils.dialogo_entrada(titulo = 'Nº PARTIDA',
texto = 'Introduzca número de partida:')
if numpartida != None:
partidas = pclases.Partida.select(pclases.Partida.q.numpartida.contains(numpartida))
if partidas.count() == 0:
utils.dialogo_info(titulo = 'PARTIDA NO ENCONTRADA',
texto = 'No se encontró ninguna partida %s.' % numpartida)
return
elif partidas.count() > 1:
filas = [(l.id, l.numpartida, l.codigo, l.longitudinal, l.transversal, l.compresion, l.perforacion, l.permeabilidad, l.poros, l.espesor) for l in partidas]
idpartida = utils.dialogo_resultado(filas,
titulo = 'SELECCIONE PARTIDA',
cabeceras = ('ID', 'Número', 'Código', 'Longitudinal', 'Transversal', 'CBR', 'Perforación', 'Permeabilidad', 'Poros', 'Espesor'))
if idpartida < 0:
return
partida = pclases.Partida.get(idpartida)
else:
partida = partidas[0]
if len(partida.rollos) == 0:
utils.dialogo_info(titulo = 'PARTIDA VACÍA',
texto = 'La partida no contiene rollos, no puede\nrealizar pruebas sobre una partida vacía.')
self.partida = None
return
self.partida = partida
self.actualizar_ventana()
def rellenar_widgets(self):
self.activar_widgets(self.partida != None)
if self.partida != None:
self.rellenar_info_partida()
self.rellenar_pruebas()
self.wids['e_fecha'].set_text(utils.str_fecha(time.localtime()))
self.wids['e_resultado'].set_text('')
def rellenar_info_partida(self):
"""
PRECONDICIÓN: self.partida != None y len(self.partida.rollos) > 0
"""
partida = self.partida
self.wids['e_numpartida'].set_text("%d (%s)" % (partida.numpartida, partida.codigo))
self.wids['e_nombre'].set_text(partida.rollos[0].articulos[0].productoVenta.nombre)
self.wids['e_longitudinal'].set_text("%.2f" % partida.longitudinal)
self.wids['e_transversal'].set_text("%.2f" % partida.transversal)
self.wids['e_compresion'].set_text("%.2f" % partida.compresion)
self.wids['e_perforacion'].set_text("%.2f" % partida.perforacion)
self.wids['e_permeabilidad'].set_text("%.2f" % partida.permeabilidad)
# XXX
self.wids['e_poros'].set_text("%.2f" % partida.poros)
self.wids['e_espesor'].set_text("%.2f" % partida.espesor)
self.rellenar_lotes()
# XXX
def fecha(self, w):
self.wids['e_fecha'].set_text(utils.str_fecha(utils.mostrar_calendario(fecha_defecto = self.objeto and self.objeto.fecha or None, padre = self.wids['ventana'])))
def cambiar_fecha(self, cell, path, texto):
model = self.wids['tv_pruebas'].get_model()
prueba = claseprueba.get(model[path][-1])
try:
prueba.fecha = utils.parse_fecha(texto)
except:
utils.dialogo_info('FECHA INCORRECTA',
'La fecha introducida (%s) no es correcta.' % texto)
self.rellenar_pruebas()
def cambiar_resultado(self, tv, path, texto):
model = self.wids['tv_pruebas'].get_model()
prueba = claseprueba.get(model[path][-1])
try:
prueba.resultado = float(texto)
except:
utils.dialogo_info('RESULTADO INCORRECTO',
'El número tecleado (%s) no es correcto.' % texto)
self.rellenar_pruebas()
if __name__=='__main__':
a = ResultadosEspesor()
| gpl-2.0 |
robbiet480/home-assistant | homeassistant/components/tile/device_tracker.py | 1 | 3623 | """Support for Tile device trackers."""
import logging
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_GPS
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from . import DATA_COORDINATOR, DOMAIN, TileEntity
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_CONNECTION_STATE = "connection_state"
ATTR_IS_DEAD = "is_dead"
ATTR_IS_LOST = "is_lost"
ATTR_RING_STATE = "ring_state"
ATTR_VOIP_STATE = "voip_state"
ATTR_TILE_NAME = "tile_name"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tile device trackers."""
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
async_add_entities(
[
TileDeviceTracker(coordinator, tile_uuid, tile)
for tile_uuid, tile in coordinator.data.items()
],
True,
)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Detect a legacy configuration and import it."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
},
)
)
_LOGGER.info(
"Your Tile configuration has been imported into the UI; "
"please remove it from configuration.yaml"
)
return True
class TileDeviceTracker(TileEntity, TrackerEntity):
"""Representation of a network infrastructure device."""
def __init__(self, coordinator, tile_uuid, tile):
"""Initialize."""
super().__init__(coordinator)
self._name = tile["name"]
self._tile = tile
self._tile_uuid = tile_uuid
self._unique_id = f"tile_{tile_uuid}"
@property
def available(self):
"""Return if entity is available."""
return self.coordinator.last_update_success and not self._tile["is_dead"]
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return None
@property
def location_accuracy(self):
"""Return the location accuracy of the device.
Value in meters.
"""
return round(
(
self._tile["last_tile_state"]["h_accuracy"]
+ self._tile["last_tile_state"]["v_accuracy"]
)
/ 2
)
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return self._tile["last_tile_state"]["latitude"]
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return self._tile["last_tile_state"]["longitude"]
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@callback
def _update_from_latest_data(self):
"""Update the entity from the latest data."""
self._tile = self.coordinator.data[self._tile_uuid]
self._attrs[ATTR_ALTITUDE] = self._tile["last_tile_state"]["altitude"]
self._attrs[ATTR_IS_LOST] = self._tile["last_tile_state"]["is_lost"]
self._attrs[ATTR_RING_STATE] = self._tile["last_tile_state"]["ring_state"]
self._attrs[ATTR_VOIP_STATE] = self._tile["last_tile_state"]["voip_state"]
| apache-2.0 |
aron-bordin/kivy-designer | designer/uix/code_find.py | 4 | 1554 | from kivy.properties import BooleanProperty, ObjectProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
class CodeInputFind(BoxLayout):
'''Widget responsible for searches in the Python Code Input
'''
query = StringProperty('')
'''Search query
:data:`query` is a :class:`~kivy.properties.StringProperty`
'''
txt_query = ObjectProperty(None)
'''Search query TextInput
:data:`txt_query` is a :class:`~kivy.properties.ObjectProperty`
'''
use_regex = BooleanProperty(False)
'''Filter search with regex
:data:`use_regex` is a :class:`~kivy.properties.BooleanProperty`
'''
case_sensitive = BooleanProperty(False)
'''Filter search with case sensitive text
:data:`case_sensitive` is a :class:`~kivy.properties.BooleanProperty`
'''
__events__ = ('on_close', 'on_next', 'on_prev', )
def on_touch_down(self, touch):
'''Enable touche
'''
if self.collide_point(*touch.pos):
super(CodeInputFind, self).on_touch_down(touch)
return True
def find_next(self, *args):
'''Search in the opened source code for the search string and updates
the cursor if text is found
'''
pass
def find_prev(self, *args):
'''Search in the opened source code for the search string and updates
the cursor if text is found
'''
pass
def on_close(self, *args):
pass
def on_next(self, *args):
pass
def on_prev(self, *args):
pass
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.2/django/contrib/gis/utils/layermapping.py | 157 | 27065 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from datetime import date, datetime
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, DEFAULT_DB_ALIAS
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import CoordTransform, DataSource, \
OGRException, OGRGeometry, OGRGeomType, SpatialReference
from django.contrib.gis.gdal.field import \
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
from django.db import models, transaction
from django.contrib.localflavor.us.models import USStateField
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num : OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num : OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num : OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
USStateField : OFTString,
models.XMLField : OFTString,
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None, using=DEFAULT_DB_ALIAS):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using
self.spatial_backend = connections[using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if self.spatial_backend.mysql:
transform = False
else:
self.geo_field = self.geometry_field()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
if using is None:
pass
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, (coord_dim == 3 and '(dim=3)') or '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
val = self.verify_geom(feat.geom, model_field)
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use the `get_field_by_name` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
fld, model, direct, m2m = opts.get_field_by_name(self.geom_field)
return fld
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| mit |
msegado/edx-platform | common/lib/i18n/tests/test_extract_and_generate.py | 121 | 4581 | """
This test tests that i18n extraction (`paver i18n_extract -v`) works properly.
"""
from datetime import datetime, timedelta
import os
import random
import re
import sys
import string
import subprocess
from unittest import TestCase
from mock import patch
from polib import pofile
from pytz import UTC
from i18n import extract
from i18n import generate
from i18n import dummy
from i18n.config import CONFIGURATION
class TestGenerate(TestCase):
"""
Tests functionality of i18n/generate.py
"""
generated_files = ('django-partial.po', 'djangojs-partial.po', 'mako.po')
@classmethod
def setUpClass(cls):
sys.stderr.write(
"\nThis test tests that i18n extraction (`paver i18n_extract`) works properly. "
"If you experience failures, please check that all instances of `gettext` and "
"`ngettext` are used correctly. You can also try running `paver i18n_extract -v` "
"locally for more detail.\n"
)
sys.stderr.write(
"\nExtracting i18n strings and generating dummy translations; "
"this may take a few minutes\n"
)
sys.stderr.flush()
extract.main(verbosity=0)
dummy.main(verbosity=0)
@classmethod
def tearDownClass(cls):
# Clear the Esperanto & RTL directories of any test artifacts
cmd = "git checkout conf/locale/eo conf/locale/rtl"
sys.stderr.write("Cleaning up dummy language directories: " + cmd)
sys.stderr.flush()
returncode = subprocess.call(cmd, shell=True)
assert returncode == 0
super(TestGenerate, cls).tearDownClass()
def setUp(self):
# Subtract 1 second to help comparisons with file-modify time succeed,
# since os.path.getmtime() is not millisecond-accurate
self.start_time = datetime.now(UTC) - timedelta(seconds=1)
def test_merge(self):
"""
Tests merge script on English source files.
"""
filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())
generate.merge(CONFIGURATION.source_locale, target=filename)
self.assertTrue(os.path.exists(filename))
os.remove(filename)
# Patch dummy_locales to not have esperanto present
@patch.object(CONFIGURATION, 'dummy_locales', ['fake2'])
def test_main(self):
"""
Runs generate.main() which should merge source files,
then compile all sources in all configured languages.
Validates output by checking all .mo files in all configured languages.
.mo files should exist, and be recently created (modified
after start of test suite)
"""
generate.main(verbosity=0, strict=False)
for locale in CONFIGURATION.translated_locales:
for filename in ('django', 'djangojs'):
mofile = filename + '.mo'
path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile)
exists = os.path.exists(path)
self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
self.assertTrue(
datetime.fromtimestamp(os.path.getmtime(path), UTC) >= self.start_time,
msg='File not recently modified: %s' % path
)
# Segmenting means that the merge headers don't work they way they
# used to, so don't make this check for now. I'm not sure if we'll
# get the merge header back eventually, or delete this code eventually.
# self.assert_merge_headers(locale)
def assert_merge_headers(self, locale):
"""
This is invoked by test_main to ensure that it runs after
calling generate.main().
There should be exactly three merge comment headers
in our merged .po file. This counts them to be sure.
A merge comment looks like this:
# #-#-#-#-# django-partial.po (0.1a) #-#-#-#-#
"""
path = os.path.join(CONFIGURATION.get_messages_dir(locale), 'django.po')
pof = pofile(path)
pattern = re.compile('^#-#-#-#-#', re.M)
match = pattern.findall(pof.header)
self.assertEqual(
len(match),
3,
msg="Found %s (should be 3) merge comments in the header for %s" % (len(match), path)
)
def random_name(size=6):
"""Returns random filename as string, like test-4BZ81W"""
chars = string.ascii_uppercase + string.digits
return 'test-' + ''.join(random.choice(chars) for x in range(size))
| agpl-3.0 |
akosyakov/intellij-community | python/lib/Lib/sets.py | 132 | 19624 | """Classes to represent arbitrary sets (including sets of sets).
This module implements sets using dictionaries whose values are
ignored. The usual operations (union, intersection, deletion, etc.)
are provided as both methods and operators.
Important: sets are not sequences! While they support 'x in s',
'len(s)', and 'for x in s', none of those operations are unique for
sequences; for example, mappings support all three as well. The
characteristic operation for sequences is subscripting with small
integers: s[i], for i in range(len(s)). Sets don't support
subscripting at all. Also, sequences allow multiple occurrences and
their elements have a definite order; sets on the other hand don't
record multiple occurrences and don't remember the order of element
insertion (which is why they don't support s[i]).
The following classes are provided:
BaseSet -- All the operations common to both mutable and immutable
sets. This is an abstract class, not meant to be directly
instantiated.
Set -- Mutable sets, subclass of BaseSet; not hashable.
ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
An iterable argument is mandatory to create an ImmutableSet.
_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
giving the same hash value as the immutable set equivalent
would have. Do not use this class directly.
Only hashable objects can be added to a Set. In particular, you cannot
really add a Set as an element to another Set; if you try, what is
actually added is an ImmutableSet built from it (it compares equal to
the one you tried adding).
When you ask if `x in y' where x is a Set and y is a Set or
ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
what's tested is actually `z in y'.
"""
# Code history:
#
# - Greg V. Wilson wrote the first version, using a different approach
# to the mutable/immutable problem, and inheriting from dict.
#
# - Alex Martelli modified Greg's version to implement the current
# Set/ImmutableSet approach, and make the data an attribute.
#
# - Guido van Rossum rewrote much of the code, made some API changes,
# and cleaned up the docstrings.
#
# - Raymond Hettinger added a number of speedups and other
# improvements.
from __future__ import generators
try:
from itertools import ifilter, ifilterfalse
except ImportError:
# Code to make the module run under Py2.2
def ifilter(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
def predicate(x):
return x
for x in iterable:
if not predicate(x):
yield x
try:
True, False
except NameError:
True, False = (0==0, 0!=0)
__all__ = ['BaseSet', 'Set', 'ImmutableSet']
class BaseSet(object):
"""Common base class for mutable and immutable sets."""
__slots__ = ['_data']
# Constructor
def __init__(self):
"""This is an abstract class."""
# Don't call this from a concrete subclass!
if self.__class__ is BaseSet:
raise TypeError, ("BaseSet is an abstract class. "
"Use Set or ImmutableSet.")
# Standard protocols: __len__, __repr__, __str__, __iter__
def __len__(self):
"""Return the number of elements of a set."""
return len(self._data)
def __repr__(self):
"""Return string representation of a set.
This looks like 'Set([<list of elements>])'.
"""
return self._repr()
# __str__ is the same as __repr__
__str__ = __repr__
def _repr(self, sorted=False):
elements = self._data.keys()
if sorted:
elements.sort()
return '%s(%r)' % (self.__class__.__name__, elements)
def __iter__(self):
"""Return an iterator over the elements or a set.
This is the keys iterator for the underlying dict.
"""
return self._data.iterkeys()
# Three-way comparison is not supported. However, because __eq__ is
# tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
# then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
# case).
def __cmp__(self, other):
raise TypeError, "can't compare sets using cmp()"
# Equality comparisons using the underlying dicts. Mixed-type comparisons
# are allowed here, where Set == z for non-Set z always returns False,
# and Set != z always True. This allows expressions like "x in y" to
# give the expected result when y is a sequence of mixed types, not
# raising a pointless TypeError just because y contains a Set, or x is
# a Set and y contain's a non-set ("in" invokes only __eq__).
# Subtle: it would be nicer if __eq__ and __ne__ could return
# NotImplemented instead of True or False. Then the other comparand
# would get a chance to determine the result, and if the other comparand
# also returned NotImplemented then it would fall back to object address
# comparison (which would always return False for __eq__ and always
# True for __ne__). However, that doesn't work, because this type
# *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
# Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
def __eq__(self, other):
if isinstance(other, BaseSet):
return self._data == other._data
else:
return False
def __ne__(self, other):
if isinstance(other, BaseSet):
return self._data != other._data
else:
return True
# Copying operations
def copy(self):
"""Return a shallow copy of a set."""
result = self.__class__()
result._data.update(self._data)
return result
__copy__ = copy # For the copy module
def __deepcopy__(self, memo):
"""Return a deep copy of a set; used by copy module."""
# This pre-creates the result and inserts it in the memo
# early, in case the deep copy recurses into another reference
# to this same set. A set can't be an element of itself, but
# it can certainly contain an object that has a reference to
# itself.
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
data = result._data
value = True
for elt in self:
data[deepcopy(elt, memo)] = value
return result
# Standard set operations: union, intersection, both differences.
# Each has an operator version (e.g. __or__, invoked with |) and a
# method version (e.g. union).
# Subtle: Each pair requires distinct code so that the outcome is
# correct when the type of other isn't suitable. For example, if
# we did "union = __or__" instead, then Set().union(3) would return
# NotImplemented instead of raising TypeError (albeit that *why* it
# raises TypeError as-is is also a bit subtle).
def __or__(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.union(other)
def union(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
result = self.__class__(self)
result._update(other)
return result
def __and__(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.intersection(other)
def intersection(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
other = Set(other)
if len(self) <= len(other):
little, big = self, other
else:
little, big = other, self
common = ifilter(big._data.has_key, little)
return self.__class__(common)
def __xor__(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
result = self.__class__()
data = result._data
value = True
selfdata = self._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
for elt in ifilterfalse(otherdata.has_key, selfdata):
data[elt] = value
for elt in ifilterfalse(selfdata.has_key, otherdata):
data[elt] = value
return result
def __sub__(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
return self.difference(other)
def difference(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
result = self.__class__()
data = result._data
try:
otherdata = other._data
except AttributeError:
otherdata = Set(other)._data
value = True
for elt in ifilterfalse(otherdata.has_key, self):
data[elt] = value
return result
# Membership test
def __contains__(self, element):
"""Report whether an element is a member of a set.
(Called in response to the expression `element in self'.)
"""
try:
return element in self._data
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
return transform() in self._data
# Subset and superset test
def issubset(self, other):
"""Report whether another set contains this set."""
self._binary_sanity_check(other)
if len(self) > len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(other._data.has_key, self):
return False
return True
def issuperset(self, other):
"""Report whether this set contains another set."""
self._binary_sanity_check(other)
if len(self) < len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(self._data.has_key, other):
return False
return True
# Inequality comparisons using the is-subset relation.
__le__ = issubset
__ge__ = issuperset
def __lt__(self, other):
self._binary_sanity_check(other)
return len(self) < len(other) and self.issubset(other)
def __gt__(self, other):
self._binary_sanity_check(other)
return len(self) > len(other) and self.issuperset(other)
# Assorted helpers
def _binary_sanity_check(self, other):
# Check that the other argument to a binary operation is also
# a set, raising a TypeError otherwise.
if not isinstance(other, BaseSet):
raise TypeError, "Binary operation only permitted between sets"
def _compute_hash(self):
# Calculate hash code for a set by xor'ing the hash codes of
# the elements. This ensures that the hash code does not depend
# on the order in which elements are added to the set. This is
# not called __hash__ because a BaseSet should not be hashable;
# only an ImmutableSet is hashable.
result = 0
for elt in self:
result ^= hash(elt)
return result
def _update(self, iterable):
# The main loop for update() and the subclass __init__() methods.
data = self._data
# Use the fast update() method when a dictionary is available.
if isinstance(iterable, BaseSet):
data.update(iterable._data)
return
value = True
if type(iterable) in (list, tuple, xrange):
# Optimized: we know that __iter__() and next() can't
# raise TypeError, so we can move 'try:' out of the loop.
it = iter(iterable)
while True:
try:
for element in it:
data[element] = value
return
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
else:
# Safe: only catch TypeError where intended
for element in iterable:
try:
data[element] = value
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
class ImmutableSet(BaseSet):
"""Immutable set class."""
__slots__ = ['_hashcode']
# BaseSet + hashing
def __init__(self, iterable=None):
"""Construct an immutable set from an optional iterable."""
self._hashcode = None
self._data = {}
if iterable is not None:
self._update(iterable)
def __hash__(self):
if self._hashcode is None:
self._hashcode = self._compute_hash()
return self._hashcode
def __getstate__(self):
return self._data, self._hashcode
def __setstate__(self, state):
self._data, self._hashcode = state
class Set(BaseSet):
""" Mutable set class."""
__slots__ = []
# BaseSet + operations requiring mutability; no hashing
def __init__(self, iterable=None):
"""Construct a set from an optional iterable."""
self._data = {}
if iterable is not None:
self._update(iterable)
def __getstate__(self):
# getstate's results are ignored if it is not
return self._data,
def __setstate__(self, data):
self._data, = data
def __hash__(self):
"""A Set cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Can't hash a Set, only an ImmutableSet."
# In-place union, intersection, differences.
# Subtle: The xyz_update() functions deliberately return None,
# as do all mutating operations on built-in container types.
# The __xyz__ spellings have to return self, though.
def __ior__(self, other):
"""Update a set with the union of itself and another."""
self._binary_sanity_check(other)
self._data.update(other._data)
return self
def union_update(self, other):
"""Update a set with the union of itself and another."""
self._update(other)
def __iand__(self, other):
"""Update a set with the intersection of itself and another."""
self._binary_sanity_check(other)
self._data = (self & other)._data
return self
def intersection_update(self, other):
"""Update a set with the intersection of itself and another."""
if isinstance(other, BaseSet):
self &= other
else:
self._data = (self.intersection(other))._data
def __ixor__(self, other):
"""Update a set with the symmetric difference of itself and another."""
self._binary_sanity_check(other)
self.symmetric_difference_update(other)
return self
def symmetric_difference_update(self, other):
"""Update a set with the symmetric difference of itself and another."""
data = self._data
value = True
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in other:
if elt in data:
del data[elt]
else:
data[elt] = value
def __isub__(self, other):
"""Remove all elements of another set from this set."""
self._binary_sanity_check(other)
self.difference_update(other)
return self
def difference_update(self, other):
"""Remove all elements of another set from this set."""
data = self._data
if not isinstance(other, BaseSet):
other = Set(other)
if self is other:
self.clear()
for elt in ifilter(data.has_key, other):
del data[elt]
# Python dict-like mass mutations: update, clear
def update(self, iterable):
"""Add all values from an iterable (such as a list or file)."""
self._update(iterable)
def clear(self):
"""Remove all elements from this set."""
self._data.clear()
# Single-element mutations: add, remove, discard
def add(self, element):
"""Add an element to a set.
This has no effect if the element is already present.
"""
try:
self._data[element] = True
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
self._data[transform()] = True
def remove(self, element):
"""Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
try:
del self._data[element]
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
del self._data[transform()]
def discard(self, element):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
try:
self.remove(element)
except KeyError:
pass
def pop(self):
"""Remove and return an arbitrary set element."""
return self._data.popitem()[0]
def __as_immutable__(self):
# Return a copy of self as an immutable set
return ImmutableSet(self)
def __as_temporarily_immutable__(self):
# Return self wrapped in a temporarily immutable set
return _TemporarilyImmutableSet(self)
class _TemporarilyImmutableSet(BaseSet):
# Wrap a mutable set as if it was temporarily immutable.
# This only supplies hashing and equality comparisons.
def __init__(self, set):
self._set = set
self._data = set._data # Needed by ImmutableSet.__eq__()
def __hash__(self):
return self._set._compute_hash()
| apache-2.0 |
ronakkhunt/kuma | vendor/packages/pygments/lexers/_scilab_builtins.py | 43 | 52405 | # -*- coding: utf-8 -*-
"""
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated
commands_kw = (
'abort',
'apropos',
'break',
'case',
'catch',
'continue',
'do',
'else',
'elseif',
'end',
'endfunction',
'for',
'function',
'help',
'if',
'pause',
'quit',
'select',
'then',
'try',
'while',
)
functions_kw = (
'!!_invoke_',
'%H5Object_e',
'%H5Object_fieldnames',
'%H5Object_p',
'%XMLAttr_6',
'%XMLAttr_e',
'%XMLAttr_i_XMLElem',
'%XMLAttr_length',
'%XMLAttr_p',
'%XMLAttr_size',
'%XMLDoc_6',
'%XMLDoc_e',
'%XMLDoc_i_XMLList',
'%XMLDoc_p',
'%XMLElem_6',
'%XMLElem_e',
'%XMLElem_i_XMLDoc',
'%XMLElem_i_XMLElem',
'%XMLElem_i_XMLList',
'%XMLElem_p',
'%XMLList_6',
'%XMLList_e',
'%XMLList_i_XMLElem',
'%XMLList_i_XMLList',
'%XMLList_length',
'%XMLList_p',
'%XMLList_size',
'%XMLNs_6',
'%XMLNs_e',
'%XMLNs_i_XMLElem',
'%XMLNs_p',
'%XMLSet_6',
'%XMLSet_e',
'%XMLSet_length',
'%XMLSet_p',
'%XMLSet_size',
'%XMLValid_p',
'%_EClass_6',
'%_EClass_e',
'%_EClass_p',
'%_EObj_0',
'%_EObj_1__EObj',
'%_EObj_1_b',
'%_EObj_1_c',
'%_EObj_1_i',
'%_EObj_1_s',
'%_EObj_2__EObj',
'%_EObj_2_b',
'%_EObj_2_c',
'%_EObj_2_i',
'%_EObj_2_s',
'%_EObj_3__EObj',
'%_EObj_3_b',
'%_EObj_3_c',
'%_EObj_3_i',
'%_EObj_3_s',
'%_EObj_4__EObj',
'%_EObj_4_b',
'%_EObj_4_c',
'%_EObj_4_i',
'%_EObj_4_s',
'%_EObj_5',
'%_EObj_6',
'%_EObj_a__EObj',
'%_EObj_a_b',
'%_EObj_a_c',
'%_EObj_a_i',
'%_EObj_a_s',
'%_EObj_d__EObj',
'%_EObj_d_b',
'%_EObj_d_c',
'%_EObj_d_i',
'%_EObj_d_s',
'%_EObj_disp',
'%_EObj_e',
'%_EObj_g__EObj',
'%_EObj_g_b',
'%_EObj_g_c',
'%_EObj_g_i',
'%_EObj_g_s',
'%_EObj_h__EObj',
'%_EObj_h_b',
'%_EObj_h_c',
'%_EObj_h_i',
'%_EObj_h_s',
'%_EObj_i__EObj',
'%_EObj_j__EObj',
'%_EObj_j_b',
'%_EObj_j_c',
'%_EObj_j_i',
'%_EObj_j_s',
'%_EObj_k__EObj',
'%_EObj_k_b',
'%_EObj_k_c',
'%_EObj_k_i',
'%_EObj_k_s',
'%_EObj_l__EObj',
'%_EObj_l_b',
'%_EObj_l_c',
'%_EObj_l_i',
'%_EObj_l_s',
'%_EObj_m__EObj',
'%_EObj_m_b',
'%_EObj_m_c',
'%_EObj_m_i',
'%_EObj_m_s',
'%_EObj_n__EObj',
'%_EObj_n_b',
'%_EObj_n_c',
'%_EObj_n_i',
'%_EObj_n_s',
'%_EObj_o__EObj',
'%_EObj_o_b',
'%_EObj_o_c',
'%_EObj_o_i',
'%_EObj_o_s',
'%_EObj_p',
'%_EObj_p__EObj',
'%_EObj_p_b',
'%_EObj_p_c',
'%_EObj_p_i',
'%_EObj_p_s',
'%_EObj_q__EObj',
'%_EObj_q_b',
'%_EObj_q_c',
'%_EObj_q_i',
'%_EObj_q_s',
'%_EObj_r__EObj',
'%_EObj_r_b',
'%_EObj_r_c',
'%_EObj_r_i',
'%_EObj_r_s',
'%_EObj_s__EObj',
'%_EObj_s_b',
'%_EObj_s_c',
'%_EObj_s_i',
'%_EObj_s_s',
'%_EObj_t',
'%_EObj_x__EObj',
'%_EObj_x_b',
'%_EObj_x_c',
'%_EObj_x_i',
'%_EObj_x_s',
'%_EObj_y__EObj',
'%_EObj_y_b',
'%_EObj_y_c',
'%_EObj_y_i',
'%_EObj_y_s',
'%_EObj_z__EObj',
'%_EObj_z_b',
'%_EObj_z_c',
'%_EObj_z_i',
'%_EObj_z_s',
'%_eigs',
'%_load',
'%b_1__EObj',
'%b_2__EObj',
'%b_3__EObj',
'%b_4__EObj',
'%b_a__EObj',
'%b_d__EObj',
'%b_g__EObj',
'%b_h__EObj',
'%b_i_XMLList',
'%b_i__EObj',
'%b_j__EObj',
'%b_k__EObj',
'%b_l__EObj',
'%b_m__EObj',
'%b_n__EObj',
'%b_o__EObj',
'%b_p__EObj',
'%b_q__EObj',
'%b_r__EObj',
'%b_s__EObj',
'%b_x__EObj',
'%b_y__EObj',
'%b_z__EObj',
'%c_1__EObj',
'%c_2__EObj',
'%c_3__EObj',
'%c_4__EObj',
'%c_a__EObj',
'%c_d__EObj',
'%c_g__EObj',
'%c_h__EObj',
'%c_i_XMLAttr',
'%c_i_XMLDoc',
'%c_i_XMLElem',
'%c_i_XMLList',
'%c_i__EObj',
'%c_j__EObj',
'%c_k__EObj',
'%c_l__EObj',
'%c_m__EObj',
'%c_n__EObj',
'%c_o__EObj',
'%c_p__EObj',
'%c_q__EObj',
'%c_r__EObj',
'%c_s__EObj',
'%c_x__EObj',
'%c_y__EObj',
'%c_z__EObj',
'%ce_i_XMLList',
'%fptr_i_XMLList',
'%h_i_XMLList',
'%hm_i_XMLList',
'%i_1__EObj',
'%i_2__EObj',
'%i_3__EObj',
'%i_4__EObj',
'%i_a__EObj',
'%i_abs',
'%i_cumprod',
'%i_cumsum',
'%i_d__EObj',
'%i_diag',
'%i_g__EObj',
'%i_h__EObj',
'%i_i_XMLList',
'%i_i__EObj',
'%i_j__EObj',
'%i_k__EObj',
'%i_l__EObj',
'%i_m__EObj',
'%i_matrix',
'%i_max',
'%i_maxi',
'%i_min',
'%i_mini',
'%i_mput',
'%i_n__EObj',
'%i_o__EObj',
'%i_p',
'%i_p__EObj',
'%i_prod',
'%i_q__EObj',
'%i_r__EObj',
'%i_s__EObj',
'%i_sum',
'%i_tril',
'%i_triu',
'%i_x__EObj',
'%i_y__EObj',
'%i_z__EObj',
'%ip_i_XMLList',
'%l_i_XMLList',
'%l_i__EObj',
'%lss_i_XMLList',
'%mc_i_XMLList',
'%msp_full',
'%msp_i_XMLList',
'%msp_spget',
'%p_i_XMLList',
'%ptr_i_XMLList',
'%r_i_XMLList',
'%s_1__EObj',
'%s_2__EObj',
'%s_3__EObj',
'%s_4__EObj',
'%s_a__EObj',
'%s_d__EObj',
'%s_g__EObj',
'%s_h__EObj',
'%s_i_XMLList',
'%s_i__EObj',
'%s_j__EObj',
'%s_k__EObj',
'%s_l__EObj',
'%s_m__EObj',
'%s_n__EObj',
'%s_o__EObj',
'%s_p__EObj',
'%s_q__EObj',
'%s_r__EObj',
'%s_s__EObj',
'%s_x__EObj',
'%s_y__EObj',
'%s_z__EObj',
'%sp_i_XMLList',
'%spb_i_XMLList',
'%st_i_XMLList',
'Calendar',
'ClipBoard',
'Matplot',
'Matplot1',
'PlaySound',
'TCL_DeleteInterp',
'TCL_DoOneEvent',
'TCL_EvalFile',
'TCL_EvalStr',
'TCL_ExistArray',
'TCL_ExistInterp',
'TCL_ExistVar',
'TCL_GetVar',
'TCL_GetVersion',
'TCL_SetVar',
'TCL_UnsetVar',
'TCL_UpVar',
'_',
'_code2str',
'_d',
'_str2code',
'about',
'abs',
'acos',
'addModulePreferences',
'addcolor',
'addf',
'addhistory',
'addinter',
'addlocalizationdomain',
'amell',
'and',
'argn',
'arl2_ius',
'ascii',
'asin',
'atan',
'backslash',
'balanc',
'banner',
'base2dec',
'basename',
'bdiag',
'beep',
'besselh',
'besseli',
'besselj',
'besselk',
'bessely',
'beta',
'bezout',
'bfinit',
'blkfc1i',
'blkslvi',
'bool2s',
'browsehistory',
'browsevar',
'bsplin3val',
'buildDoc',
'buildouttb',
'bvode',
'c_link',
'call',
'callblk',
'captions',
'cd',
'cdfbet',
'cdfbin',
'cdfchi',
'cdfchn',
'cdff',
'cdffnc',
'cdfgam',
'cdfnbn',
'cdfnor',
'cdfpoi',
'cdft',
'ceil',
'champ',
'champ1',
'chdir',
'chol',
'clc',
'clean',
'clear',
'clearfun',
'clearglobal',
'closeEditor',
'closeEditvar',
'closeXcos',
'code2str',
'coeff',
'color',
'comp',
'completion',
'conj',
'contour2di',
'contr',
'conv2',
'convstr',
'copy',
'copyfile',
'corr',
'cos',
'coserror',
'createdir',
'cshep2d',
'csvDefault',
'csvIsnum',
'csvRead',
'csvStringToDouble',
'csvTextScan',
'csvWrite',
'ctree2',
'ctree3',
'ctree4',
'cumprod',
'cumsum',
'curblock',
'curblockc',
'daskr',
'dasrt',
'dassl',
'data2sig',
'datatipCreate',
'datatipManagerMode',
'datatipMove',
'datatipRemove',
'datatipSetDisplay',
'datatipSetInterp',
'datatipSetOrientation',
'datatipSetStyle',
'datatipToggle',
'dawson',
'dct',
'debug',
'dec2base',
'deff',
'definedfields',
'degree',
'delbpt',
'delete',
'deletefile',
'delip',
'delmenu',
'det',
'dgettext',
'dhinf',
'diag',
'diary',
'diffobjs',
'disp',
'dispbpt',
'displayhistory',
'disposefftwlibrary',
'dlgamma',
'dnaupd',
'dneupd',
'double',
'drawaxis',
'drawlater',
'drawnow',
'driver',
'dsaupd',
'dsearch',
'dseupd',
'dst',
'duplicate',
'editvar',
'emptystr',
'end_scicosim',
'ereduc',
'erf',
'erfc',
'erfcx',
'erfi',
'errcatch',
'errclear',
'error',
'eval_cshep2d',
'exec',
'execstr',
'exists',
'exit',
'exp',
'expm',
'exportUI',
'export_to_hdf5',
'eye',
'fadj2sp',
'fec',
'feval',
'fft',
'fftw',
'fftw_flags',
'fftw_forget_wisdom',
'fftwlibraryisloaded',
'figure',
'file',
'filebrowser',
'fileext',
'fileinfo',
'fileparts',
'filesep',
'find',
'findBD',
'findfiles',
'fire_closing_finished',
'floor',
'format',
'fort',
'fprintfMat',
'freq',
'frexp',
'fromc',
'fromjava',
'fscanfMat',
'fsolve',
'fstair',
'full',
'fullpath',
'funcprot',
'funptr',
'gamma',
'gammaln',
'geom3d',
'get',
'getURL',
'get_absolute_file_path',
'get_fftw_wisdom',
'getblocklabel',
'getcallbackobject',
'getdate',
'getdebuginfo',
'getdefaultlanguage',
'getdrives',
'getdynlibext',
'getenv',
'getfield',
'gethistory',
'gethistoryfile',
'getinstalledlookandfeels',
'getio',
'getlanguage',
'getlongpathname',
'getlookandfeel',
'getmd5',
'getmemory',
'getmodules',
'getos',
'getpid',
'getrelativefilename',
'getscicosvars',
'getscilabmode',
'getshortpathname',
'gettext',
'getvariablesonstack',
'getversion',
'glist',
'global',
'glue',
'grand',
'graphicfunction',
'grayplot',
'grep',
'gsort',
'gstacksize',
'h5attr',
'h5close',
'h5cp',
'h5dataset',
'h5dump',
'h5exists',
'h5flush',
'h5get',
'h5group',
'h5isArray',
'h5isAttr',
'h5isCompound',
'h5isFile',
'h5isGroup',
'h5isList',
'h5isRef',
'h5isSet',
'h5isSpace',
'h5isType',
'h5isVlen',
'h5label',
'h5ln',
'h5ls',
'h5mount',
'h5mv',
'h5open',
'h5read',
'h5readattr',
'h5rm',
'h5umount',
'h5write',
'h5writeattr',
'havewindow',
'helpbrowser',
'hess',
'hinf',
'historymanager',
'historysize',
'host',
'htmlDump',
'htmlRead',
'htmlReadStr',
'htmlWrite',
'iconvert',
'ieee',
'ilib_verbose',
'imag',
'impl',
'import_from_hdf5',
'imult',
'inpnvi',
'int',
'int16',
'int2d',
'int32',
'int3d',
'int8',
'interp',
'interp2d',
'interp3d',
'intg',
'intppty',
'inttype',
'inv',
'invoke_lu',
'is_handle_valid',
'is_hdf5_file',
'isalphanum',
'isascii',
'isdef',
'isdigit',
'isdir',
'isequal',
'isequalbitwise',
'iserror',
'isfile',
'isglobal',
'isletter',
'isnum',
'isreal',
'iswaitingforinput',
'jallowClassReloading',
'jarray',
'jautoTranspose',
'jautoUnwrap',
'javaclasspath',
'javalibrarypath',
'jcast',
'jcompile',
'jconvMatrixMethod',
'jcreatejar',
'jdeff',
'jdisableTrace',
'jenableTrace',
'jexists',
'jgetclassname',
'jgetfield',
'jgetfields',
'jgetinfo',
'jgetmethods',
'jimport',
'jinvoke',
'jinvoke_db',
'jnewInstance',
'jremove',
'jsetfield',
'junwrap',
'junwraprem',
'jwrap',
'jwrapinfloat',
'kron',
'lasterror',
'ldiv',
'ldivf',
'legendre',
'length',
'lib',
'librarieslist',
'libraryinfo',
'light',
'linear_interpn',
'lines',
'link',
'linmeq',
'list',
'listvar_in_hdf5',
'load',
'loadGui',
'loadScicos',
'loadXcos',
'loadfftwlibrary',
'loadhistory',
'log',
'log1p',
'lsq',
'lsq_splin',
'lsqrsolve',
'lsslist',
'lstcat',
'lstsize',
'ltitr',
'lu',
'ludel',
'lufact',
'luget',
'lusolve',
'macr2lst',
'macr2tree',
'matfile_close',
'matfile_listvar',
'matfile_open',
'matfile_varreadnext',
'matfile_varwrite',
'matrix',
'max',
'maxfiles',
'mclearerr',
'mclose',
'meof',
'merror',
'messagebox',
'mfprintf',
'mfscanf',
'mget',
'mgeti',
'mgetl',
'mgetstr',
'min',
'mlist',
'mode',
'model2blk',
'mopen',
'move',
'movefile',
'mprintf',
'mput',
'mputl',
'mputstr',
'mscanf',
'mseek',
'msprintf',
'msscanf',
'mtell',
'mtlb_mode',
'mtlb_sparse',
'mucomp',
'mulf',
'name2rgb',
'nearfloat',
'newaxes',
'newest',
'newfun',
'nnz',
'norm',
'notify',
'number_properties',
'ode',
'odedc',
'ones',
'openged',
'opentk',
'optim',
'or',
'ordmmd',
'parallel_concurrency',
'parallel_run',
'param3d',
'param3d1',
'part',
'pathconvert',
'pathsep',
'phase_simulation',
'plot2d',
'plot2d1',
'plot2d2',
'plot2d3',
'plot2d4',
'plot3d',
'plot3d1',
'plotbrowser',
'pointer_xproperty',
'poly',
'ppol',
'pppdiv',
'predef',
'preferences',
'print',
'printf',
'printfigure',
'printsetupbox',
'prod',
'progressionbar',
'prompt',
'pwd',
'qld',
'qp_solve',
'qr',
'raise_window',
'rand',
'rankqr',
'rat',
'rcond',
'rdivf',
'read',
'read4b',
'read_csv',
'readb',
'readgateway',
'readmps',
'real',
'realtime',
'realtimeinit',
'regexp',
'relocate_handle',
'remez',
'removeModulePreferences',
'removedir',
'removelinehistory',
'res_with_prec',
'resethistory',
'residu',
'resume',
'return',
'ricc',
'rlist',
'roots',
'rotate_axes',
'round',
'rpem',
'rtitr',
'rubberbox',
'save',
'saveGui',
'saveafterncommands',
'saveconsecutivecommands',
'savehistory',
'schur',
'sci_haltscicos',
'sci_tree2',
'sci_tree3',
'sci_tree4',
'sciargs',
'scicos_debug',
'scicos_debug_count',
'scicos_time',
'scicosim',
'scinotes',
'sctree',
'semidef',
'set',
'set_blockerror',
'set_fftw_wisdom',
'set_xproperty',
'setbpt',
'setdefaultlanguage',
'setenv',
'setfield',
'sethistoryfile',
'setlanguage',
'setlookandfeel',
'setmenu',
'sfact',
'sfinit',
'show_window',
'sident',
'sig2data',
'sign',
'simp',
'simp_mode',
'sin',
'size',
'slash',
'sleep',
'sorder',
'sparse',
'spchol',
'spcompack',
'spec',
'spget',
'splin',
'splin2d',
'splin3d',
'splitURL',
'spones',
'sprintf',
'sqrt',
'stacksize',
'str2code',
'strcat',
'strchr',
'strcmp',
'strcspn',
'strindex',
'string',
'stringbox',
'stripblanks',
'strncpy',
'strrchr',
'strrev',
'strsplit',
'strspn',
'strstr',
'strsubst',
'strtod',
'strtok',
'subf',
'sum',
'svd',
'swap_handles',
'symfcti',
'syredi',
'system_getproperty',
'system_setproperty',
'ta2lpd',
'tan',
'taucs_chdel',
'taucs_chfact',
'taucs_chget',
'taucs_chinfo',
'taucs_chsolve',
'tempname',
'testmatrix',
'timer',
'tlist',
'tohome',
'tokens',
'toolbar',
'toprint',
'tr_zer',
'tril',
'triu',
'type',
'typename',
'uiDisplayTree',
'uicontextmenu',
'uicontrol',
'uigetcolor',
'uigetdir',
'uigetfile',
'uigetfont',
'uimenu',
'uint16',
'uint32',
'uint8',
'uipopup',
'uiputfile',
'uiwait',
'ulink',
'umf_ludel',
'umf_lufact',
'umf_luget',
'umf_luinfo',
'umf_lusolve',
'umfpack',
'unglue',
'unix',
'unsetmenu',
'unzoom',
'updatebrowsevar',
'usecanvas',
'useeditor',
'user',
'var2vec',
'varn',
'vec2var',
'waitbar',
'warnBlockByUID',
'warning',
'what',
'where',
'whereis',
'who',
'winsid',
'with_module',
'writb',
'write',
'write4b',
'write_csv',
'x_choose',
'x_choose_modeless',
'x_dialog',
'x_mdialog',
'xarc',
'xarcs',
'xarrows',
'xchange',
'xchoicesi',
'xclick',
'xcos',
'xcosAddToolsMenu',
'xcosConfigureXmlFile',
'xcosDiagramToScilab',
'xcosPalCategoryAdd',
'xcosPalDelete',
'xcosPalDisable',
'xcosPalEnable',
'xcosPalGenerateIcon',
'xcosPalGet',
'xcosPalLoad',
'xcosPalMove',
'xcosSimulationStarted',
'xcosUpdateBlock',
'xdel',
'xend',
'xfarc',
'xfarcs',
'xfpoly',
'xfpolys',
'xfrect',
'xget',
'xgetmouse',
'xgraduate',
'xgrid',
'xinit',
'xlfont',
'xls_open',
'xls_read',
'xmlAddNs',
'xmlAppend',
'xmlAsNumber',
'xmlAsText',
'xmlDTD',
'xmlDelete',
'xmlDocument',
'xmlDump',
'xmlElement',
'xmlFormat',
'xmlGetNsByHref',
'xmlGetNsByPrefix',
'xmlGetOpenDocs',
'xmlIsValidObject',
'xmlName',
'xmlNs',
'xmlRead',
'xmlReadStr',
'xmlRelaxNG',
'xmlRemove',
'xmlSchema',
'xmlSetAttributes',
'xmlValidate',
'xmlWrite',
'xmlXPath',
'xname',
'xpause',
'xpoly',
'xpolys',
'xrect',
'xrects',
'xs2bmp',
'xs2emf',
'xs2eps',
'xs2gif',
'xs2jpg',
'xs2pdf',
'xs2png',
'xs2ppm',
'xs2ps',
'xs2svg',
'xsegs',
'xset',
'xstring',
'xstringb',
'xtitle',
'zeros',
'znaupd',
'zneupd',
'zoom_rect',
)
macros_kw = (
'!_deff_wrapper',
'%0_i_st',
'%3d_i_h',
'%Block_xcosUpdateBlock',
'%TNELDER_p',
'%TNELDER_string',
'%TNMPLOT_p',
'%TNMPLOT_string',
'%TOPTIM_p',
'%TOPTIM_string',
'%TSIMPLEX_p',
'%TSIMPLEX_string',
'%_EVoid_p',
'%_gsort',
'%_listvarinfile',
'%_rlist',
'%_save',
'%_sodload',
'%_strsplit',
'%_unwrap',
'%ar_p',
'%asn',
'%b_a_b',
'%b_a_s',
'%b_c_s',
'%b_c_spb',
'%b_cumprod',
'%b_cumsum',
'%b_d_s',
'%b_diag',
'%b_e',
'%b_f_s',
'%b_f_spb',
'%b_g_s',
'%b_g_spb',
'%b_grand',
'%b_h_s',
'%b_h_spb',
'%b_i_b',
'%b_i_ce',
'%b_i_h',
'%b_i_hm',
'%b_i_s',
'%b_i_sp',
'%b_i_spb',
'%b_i_st',
'%b_iconvert',
'%b_l_b',
'%b_l_s',
'%b_m_b',
'%b_m_s',
'%b_matrix',
'%b_n_hm',
'%b_o_hm',
'%b_p_s',
'%b_prod',
'%b_r_b',
'%b_r_s',
'%b_s_b',
'%b_s_s',
'%b_string',
'%b_sum',
'%b_tril',
'%b_triu',
'%b_x_b',
'%b_x_s',
'%bicg',
'%bicgstab',
'%c_a_c',
'%c_b_c',
'%c_b_s',
'%c_diag',
'%c_dsearch',
'%c_e',
'%c_eye',
'%c_f_s',
'%c_grand',
'%c_i_c',
'%c_i_ce',
'%c_i_h',
'%c_i_hm',
'%c_i_lss',
'%c_i_r',
'%c_i_s',
'%c_i_st',
'%c_matrix',
'%c_n_l',
'%c_n_st',
'%c_o_l',
'%c_o_st',
'%c_ones',
'%c_rand',
'%c_tril',
'%c_triu',
'%cblock_c_cblock',
'%cblock_c_s',
'%cblock_e',
'%cblock_f_cblock',
'%cblock_p',
'%cblock_size',
'%ce_6',
'%ce_c_ce',
'%ce_e',
'%ce_f_ce',
'%ce_i_ce',
'%ce_i_s',
'%ce_i_st',
'%ce_matrix',
'%ce_p',
'%ce_size',
'%ce_string',
'%ce_t',
'%cgs',
'%champdat_i_h',
'%choose',
'%diagram_xcos',
'%dir_p',
'%fptr_i_st',
'%grand_perm',
'%grayplot_i_h',
'%h_i_st',
'%hmS_k_hmS_generic',
'%hm_1_hm',
'%hm_1_s',
'%hm_2_hm',
'%hm_2_s',
'%hm_3_hm',
'%hm_3_s',
'%hm_4_hm',
'%hm_4_s',
'%hm_5',
'%hm_a_hm',
'%hm_a_r',
'%hm_a_s',
'%hm_abs',
'%hm_and',
'%hm_bool2s',
'%hm_c_hm',
'%hm_ceil',
'%hm_conj',
'%hm_cos',
'%hm_cumprod',
'%hm_cumsum',
'%hm_d_hm',
'%hm_d_s',
'%hm_degree',
'%hm_dsearch',
'%hm_e',
'%hm_exp',
'%hm_eye',
'%hm_f_hm',
'%hm_find',
'%hm_floor',
'%hm_g_hm',
'%hm_grand',
'%hm_gsort',
'%hm_h_hm',
'%hm_i_b',
'%hm_i_ce',
'%hm_i_h',
'%hm_i_hm',
'%hm_i_i',
'%hm_i_p',
'%hm_i_r',
'%hm_i_s',
'%hm_i_st',
'%hm_iconvert',
'%hm_imag',
'%hm_int',
'%hm_isnan',
'%hm_isreal',
'%hm_j_hm',
'%hm_j_s',
'%hm_k_hm',
'%hm_k_s',
'%hm_log',
'%hm_m_p',
'%hm_m_r',
'%hm_m_s',
'%hm_matrix',
'%hm_max',
'%hm_mean',
'%hm_median',
'%hm_min',
'%hm_n_b',
'%hm_n_c',
'%hm_n_hm',
'%hm_n_i',
'%hm_n_p',
'%hm_n_s',
'%hm_o_b',
'%hm_o_c',
'%hm_o_hm',
'%hm_o_i',
'%hm_o_p',
'%hm_o_s',
'%hm_ones',
'%hm_or',
'%hm_p',
'%hm_prod',
'%hm_q_hm',
'%hm_r_s',
'%hm_rand',
'%hm_real',
'%hm_round',
'%hm_s',
'%hm_s_hm',
'%hm_s_r',
'%hm_s_s',
'%hm_sign',
'%hm_sin',
'%hm_size',
'%hm_sqrt',
'%hm_stdev',
'%hm_string',
'%hm_sum',
'%hm_x_hm',
'%hm_x_p',
'%hm_x_s',
'%hm_zeros',
'%i_1_s',
'%i_2_s',
'%i_3_s',
'%i_4_s',
'%i_Matplot',
'%i_a_i',
'%i_a_s',
'%i_and',
'%i_ascii',
'%i_b_s',
'%i_bezout',
'%i_champ',
'%i_champ1',
'%i_contour',
'%i_contour2d',
'%i_d_i',
'%i_d_s',
'%i_dsearch',
'%i_e',
'%i_fft',
'%i_g_i',
'%i_gcd',
'%i_grand',
'%i_h_i',
'%i_i_ce',
'%i_i_h',
'%i_i_hm',
'%i_i_i',
'%i_i_s',
'%i_i_st',
'%i_j_i',
'%i_j_s',
'%i_l_s',
'%i_lcm',
'%i_length',
'%i_m_i',
'%i_m_s',
'%i_mfprintf',
'%i_mprintf',
'%i_msprintf',
'%i_n_s',
'%i_o_s',
'%i_or',
'%i_p_i',
'%i_p_s',
'%i_plot2d',
'%i_plot2d1',
'%i_plot2d2',
'%i_q_s',
'%i_r_i',
'%i_r_s',
'%i_round',
'%i_s_i',
'%i_s_s',
'%i_sign',
'%i_string',
'%i_x_i',
'%i_x_s',
'%ip_a_s',
'%ip_i_st',
'%ip_m_s',
'%ip_n_ip',
'%ip_o_ip',
'%ip_p',
'%ip_part',
'%ip_s_s',
'%ip_string',
'%k',
'%l_i_h',
'%l_i_s',
'%l_i_st',
'%l_isequal',
'%l_n_c',
'%l_n_l',
'%l_n_m',
'%l_n_p',
'%l_n_s',
'%l_n_st',
'%l_o_c',
'%l_o_l',
'%l_o_m',
'%l_o_p',
'%l_o_s',
'%l_o_st',
'%lss_a_lss',
'%lss_a_p',
'%lss_a_r',
'%lss_a_s',
'%lss_c_lss',
'%lss_c_p',
'%lss_c_r',
'%lss_c_s',
'%lss_e',
'%lss_eye',
'%lss_f_lss',
'%lss_f_p',
'%lss_f_r',
'%lss_f_s',
'%lss_i_ce',
'%lss_i_lss',
'%lss_i_p',
'%lss_i_r',
'%lss_i_s',
'%lss_i_st',
'%lss_inv',
'%lss_l_lss',
'%lss_l_p',
'%lss_l_r',
'%lss_l_s',
'%lss_m_lss',
'%lss_m_p',
'%lss_m_r',
'%lss_m_s',
'%lss_n_lss',
'%lss_n_p',
'%lss_n_r',
'%lss_n_s',
'%lss_norm',
'%lss_o_lss',
'%lss_o_p',
'%lss_o_r',
'%lss_o_s',
'%lss_ones',
'%lss_r_lss',
'%lss_r_p',
'%lss_r_r',
'%lss_r_s',
'%lss_rand',
'%lss_s',
'%lss_s_lss',
'%lss_s_p',
'%lss_s_r',
'%lss_s_s',
'%lss_size',
'%lss_t',
'%lss_v_lss',
'%lss_v_p',
'%lss_v_r',
'%lss_v_s',
'%lt_i_s',
'%m_n_l',
'%m_o_l',
'%mc_i_h',
'%mc_i_s',
'%mc_i_st',
'%mc_n_st',
'%mc_o_st',
'%mc_string',
'%mps_p',
'%mps_string',
'%msp_a_s',
'%msp_abs',
'%msp_e',
'%msp_find',
'%msp_i_s',
'%msp_i_st',
'%msp_length',
'%msp_m_s',
'%msp_maxi',
'%msp_n_msp',
'%msp_nnz',
'%msp_o_msp',
'%msp_p',
'%msp_sparse',
'%msp_spones',
'%msp_t',
'%p_a_lss',
'%p_a_r',
'%p_c_lss',
'%p_c_r',
'%p_cumprod',
'%p_cumsum',
'%p_d_p',
'%p_d_r',
'%p_d_s',
'%p_det',
'%p_e',
'%p_f_lss',
'%p_f_r',
'%p_grand',
'%p_i_ce',
'%p_i_h',
'%p_i_hm',
'%p_i_lss',
'%p_i_p',
'%p_i_r',
'%p_i_s',
'%p_i_st',
'%p_inv',
'%p_j_s',
'%p_k_p',
'%p_k_r',
'%p_k_s',
'%p_l_lss',
'%p_l_p',
'%p_l_r',
'%p_l_s',
'%p_m_hm',
'%p_m_lss',
'%p_m_r',
'%p_matrix',
'%p_n_l',
'%p_n_lss',
'%p_n_r',
'%p_o_l',
'%p_o_lss',
'%p_o_r',
'%p_o_sp',
'%p_p_s',
'%p_part',
'%p_prod',
'%p_q_p',
'%p_q_r',
'%p_q_s',
'%p_r_lss',
'%p_r_p',
'%p_r_r',
'%p_r_s',
'%p_s_lss',
'%p_s_r',
'%p_simp',
'%p_string',
'%p_sum',
'%p_v_lss',
'%p_v_p',
'%p_v_r',
'%p_v_s',
'%p_x_hm',
'%p_x_r',
'%p_y_p',
'%p_y_r',
'%p_y_s',
'%p_z_p',
'%p_z_r',
'%p_z_s',
'%pcg',
'%plist_p',
'%plist_string',
'%r_0',
'%r_a_hm',
'%r_a_lss',
'%r_a_p',
'%r_a_r',
'%r_a_s',
'%r_c_lss',
'%r_c_p',
'%r_c_r',
'%r_c_s',
'%r_clean',
'%r_cumprod',
'%r_cumsum',
'%r_d_p',
'%r_d_r',
'%r_d_s',
'%r_det',
'%r_diag',
'%r_e',
'%r_eye',
'%r_f_lss',
'%r_f_p',
'%r_f_r',
'%r_f_s',
'%r_i_ce',
'%r_i_hm',
'%r_i_lss',
'%r_i_p',
'%r_i_r',
'%r_i_s',
'%r_i_st',
'%r_inv',
'%r_j_s',
'%r_k_p',
'%r_k_r',
'%r_k_s',
'%r_l_lss',
'%r_l_p',
'%r_l_r',
'%r_l_s',
'%r_m_hm',
'%r_m_lss',
'%r_m_p',
'%r_m_r',
'%r_m_s',
'%r_matrix',
'%r_n_lss',
'%r_n_p',
'%r_n_r',
'%r_n_s',
'%r_norm',
'%r_o_lss',
'%r_o_p',
'%r_o_r',
'%r_o_s',
'%r_ones',
'%r_p',
'%r_p_s',
'%r_prod',
'%r_q_p',
'%r_q_r',
'%r_q_s',
'%r_r_lss',
'%r_r_p',
'%r_r_r',
'%r_r_s',
'%r_rand',
'%r_s',
'%r_s_hm',
'%r_s_lss',
'%r_s_p',
'%r_s_r',
'%r_s_s',
'%r_simp',
'%r_size',
'%r_string',
'%r_sum',
'%r_t',
'%r_tril',
'%r_triu',
'%r_v_lss',
'%r_v_p',
'%r_v_r',
'%r_v_s',
'%r_varn',
'%r_x_p',
'%r_x_r',
'%r_x_s',
'%r_y_p',
'%r_y_r',
'%r_y_s',
'%r_z_p',
'%r_z_r',
'%r_z_s',
'%s_1_hm',
'%s_1_i',
'%s_2_hm',
'%s_2_i',
'%s_3_hm',
'%s_3_i',
'%s_4_hm',
'%s_4_i',
'%s_5',
'%s_a_b',
'%s_a_hm',
'%s_a_i',
'%s_a_ip',
'%s_a_lss',
'%s_a_msp',
'%s_a_r',
'%s_a_sp',
'%s_and',
'%s_b_i',
'%s_b_s',
'%s_bezout',
'%s_c_b',
'%s_c_cblock',
'%s_c_lss',
'%s_c_r',
'%s_c_sp',
'%s_d_b',
'%s_d_i',
'%s_d_p',
'%s_d_r',
'%s_d_sp',
'%s_e',
'%s_f_b',
'%s_f_cblock',
'%s_f_lss',
'%s_f_r',
'%s_f_sp',
'%s_g_b',
'%s_g_s',
'%s_gcd',
'%s_grand',
'%s_h_b',
'%s_h_s',
'%s_i_b',
'%s_i_c',
'%s_i_ce',
'%s_i_h',
'%s_i_hm',
'%s_i_i',
'%s_i_lss',
'%s_i_p',
'%s_i_r',
'%s_i_s',
'%s_i_sp',
'%s_i_spb',
'%s_i_st',
'%s_j_i',
'%s_k_hm',
'%s_k_p',
'%s_k_r',
'%s_k_sp',
'%s_l_b',
'%s_l_hm',
'%s_l_i',
'%s_l_lss',
'%s_l_p',
'%s_l_r',
'%s_l_s',
'%s_l_sp',
'%s_lcm',
'%s_m_b',
'%s_m_hm',
'%s_m_i',
'%s_m_ip',
'%s_m_lss',
'%s_m_msp',
'%s_m_r',
'%s_matrix',
'%s_n_hm',
'%s_n_i',
'%s_n_l',
'%s_n_lss',
'%s_n_r',
'%s_n_st',
'%s_o_hm',
'%s_o_i',
'%s_o_l',
'%s_o_lss',
'%s_o_r',
'%s_o_st',
'%s_or',
'%s_p_b',
'%s_p_i',
'%s_pow',
'%s_q_hm',
'%s_q_i',
'%s_q_p',
'%s_q_r',
'%s_q_sp',
'%s_r_b',
'%s_r_i',
'%s_r_lss',
'%s_r_p',
'%s_r_r',
'%s_r_s',
'%s_r_sp',
'%s_s_b',
'%s_s_hm',
'%s_s_i',
'%s_s_ip',
'%s_s_lss',
'%s_s_r',
'%s_s_sp',
'%s_simp',
'%s_v_lss',
'%s_v_p',
'%s_v_r',
'%s_v_s',
'%s_x_b',
'%s_x_hm',
'%s_x_i',
'%s_x_r',
'%s_y_p',
'%s_y_r',
'%s_y_sp',
'%s_z_p',
'%s_z_r',
'%s_z_sp',
'%sn',
'%sp_a_s',
'%sp_a_sp',
'%sp_and',
'%sp_c_s',
'%sp_ceil',
'%sp_conj',
'%sp_cos',
'%sp_cumprod',
'%sp_cumsum',
'%sp_d_s',
'%sp_d_sp',
'%sp_det',
'%sp_diag',
'%sp_e',
'%sp_exp',
'%sp_f_s',
'%sp_floor',
'%sp_grand',
'%sp_gsort',
'%sp_i_ce',
'%sp_i_h',
'%sp_i_s',
'%sp_i_sp',
'%sp_i_st',
'%sp_int',
'%sp_inv',
'%sp_k_s',
'%sp_k_sp',
'%sp_l_s',
'%sp_l_sp',
'%sp_length',
'%sp_max',
'%sp_min',
'%sp_norm',
'%sp_or',
'%sp_p_s',
'%sp_prod',
'%sp_q_s',
'%sp_q_sp',
'%sp_r_s',
'%sp_r_sp',
'%sp_round',
'%sp_s_s',
'%sp_s_sp',
'%sp_sin',
'%sp_sqrt',
'%sp_string',
'%sp_sum',
'%sp_tril',
'%sp_triu',
'%sp_y_s',
'%sp_y_sp',
'%sp_z_s',
'%sp_z_sp',
'%spb_and',
'%spb_c_b',
'%spb_cumprod',
'%spb_cumsum',
'%spb_diag',
'%spb_e',
'%spb_f_b',
'%spb_g_b',
'%spb_g_spb',
'%spb_h_b',
'%spb_h_spb',
'%spb_i_b',
'%spb_i_ce',
'%spb_i_h',
'%spb_i_st',
'%spb_or',
'%spb_prod',
'%spb_sum',
'%spb_tril',
'%spb_triu',
'%st_6',
'%st_c_st',
'%st_e',
'%st_f_st',
'%st_i_b',
'%st_i_c',
'%st_i_fptr',
'%st_i_h',
'%st_i_i',
'%st_i_ip',
'%st_i_lss',
'%st_i_msp',
'%st_i_p',
'%st_i_r',
'%st_i_s',
'%st_i_sp',
'%st_i_spb',
'%st_i_st',
'%st_matrix',
'%st_n_c',
'%st_n_l',
'%st_n_mc',
'%st_n_p',
'%st_n_s',
'%st_o_c',
'%st_o_l',
'%st_o_mc',
'%st_o_p',
'%st_o_s',
'%st_o_tl',
'%st_p',
'%st_size',
'%st_string',
'%st_t',
'%ticks_i_h',
'%xls_e',
'%xls_p',
'%xlssheet_e',
'%xlssheet_p',
'%xlssheet_size',
'%xlssheet_string',
'DominationRank',
'G_make',
'IsAScalar',
'NDcost',
'OS_Version',
'PlotSparse',
'ReadHBSparse',
'TCL_CreateSlave',
'abcd',
'abinv',
'accept_func_default',
'accept_func_vfsa',
'acf',
'acosd',
'acosh',
'acoshm',
'acosm',
'acot',
'acotd',
'acoth',
'acsc',
'acscd',
'acsch',
'add_demo',
'add_help_chapter',
'add_module_help_chapter',
'add_param',
'add_profiling',
'adj2sp',
'aff2ab',
'ana_style',
'analpf',
'analyze',
'aplat',
'arhnk',
'arl2',
'arma2p',
'arma2ss',
'armac',
'armax',
'armax1',
'arobasestring2strings',
'arsimul',
'ascii2string',
'asciimat',
'asec',
'asecd',
'asech',
'asind',
'asinh',
'asinhm',
'asinm',
'assert_checkalmostequal',
'assert_checkequal',
'assert_checkerror',
'assert_checkfalse',
'assert_checkfilesequal',
'assert_checktrue',
'assert_comparecomplex',
'assert_computedigits',
'assert_cond2reltol',
'assert_cond2reqdigits',
'assert_generror',
'atand',
'atanh',
'atanhm',
'atanm',
'atomsAutoload',
'atomsAutoloadAdd',
'atomsAutoloadDel',
'atomsAutoloadList',
'atomsCategoryList',
'atomsCheckModule',
'atomsDepTreeShow',
'atomsGetConfig',
'atomsGetInstalled',
'atomsGetInstalledPath',
'atomsGetLoaded',
'atomsGetLoadedPath',
'atomsInstall',
'atomsIsInstalled',
'atomsIsLoaded',
'atomsList',
'atomsLoad',
'atomsQuit',
'atomsRemove',
'atomsRepositoryAdd',
'atomsRepositoryDel',
'atomsRepositoryList',
'atomsRestoreConfig',
'atomsSaveConfig',
'atomsSearch',
'atomsSetConfig',
'atomsShow',
'atomsSystemInit',
'atomsSystemUpdate',
'atomsTest',
'atomsUpdate',
'atomsVersion',
'augment',
'auread',
'auwrite',
'balreal',
'bench_run',
'bilin',
'bilt',
'bin2dec',
'binomial',
'bitand',
'bitcmp',
'bitget',
'bitor',
'bitset',
'bitxor',
'black',
'blanks',
'bloc2exp',
'bloc2ss',
'block_parameter_error',
'bode',
'bode_asymp',
'bstap',
'buttmag',
'bvodeS',
'bytecode',
'bytecodewalk',
'cainv',
'calendar',
'calerf',
'calfrq',
'canon',
'casc',
'cat',
'cat_code',
'cb_m2sci_gui',
'ccontrg',
'cell',
'cell2mat',
'cellstr',
'center',
'cepstrum',
'cfspec',
'char',
'chart',
'cheb1mag',
'cheb2mag',
'check_gateways',
'check_modules_xml',
'check_versions',
'chepol',
'chfact',
'chsolve',
'classmarkov',
'clean_help',
'clock',
'cls2dls',
'cmb_lin',
'cmndred',
'cmoment',
'coding_ga_binary',
'coding_ga_identity',
'coff',
'coffg',
'colcomp',
'colcompr',
'colinout',
'colregul',
'companion',
'complex',
'compute_initial_temp',
'cond',
'cond2sp',
'condestsp',
'configure_msifort',
'configure_msvc',
'conjgrad',
'cont_frm',
'cont_mat',
'contrss',
'conv',
'convert_to_float',
'convertindex',
'convol',
'convol2d',
'copfac',
'correl',
'cosd',
'cosh',
'coshm',
'cosm',
'cotd',
'cotg',
'coth',
'cothm',
'cov',
'covar',
'createXConfiguration',
'createfun',
'createstruct',
'cross',
'crossover_ga_binary',
'crossover_ga_default',
'csc',
'cscd',
'csch',
'csgn',
'csim',
'cspect',
'ctr_gram',
'czt',
'dae',
'daeoptions',
'damp',
'datafit',
'date',
'datenum',
'datevec',
'dbphi',
'dcf',
'ddp',
'dec2bin',
'dec2hex',
'dec2oct',
'del_help_chapter',
'del_module_help_chapter',
'demo_begin',
'demo_choose',
'demo_compiler',
'demo_end',
'demo_file_choice',
'demo_folder_choice',
'demo_function_choice',
'demo_gui',
'demo_run',
'demo_viewCode',
'denom',
'derivat',
'derivative',
'des2ss',
'des2tf',
'detectmsifort64tools',
'detectmsvc64tools',
'determ',
'detr',
'detrend',
'devtools_run_builder',
'dhnorm',
'diff',
'diophant',
'dir',
'dirname',
'dispfiles',
'dllinfo',
'dscr',
'dsimul',
'dt_ility',
'dtsi',
'edit',
'edit_error',
'editor',
'eigenmarkov',
'eigs',
'ell1mag',
'enlarge_shape',
'entropy',
'eomday',
'epred',
'eqfir',
'eqiir',
'equil',
'equil1',
'erfinv',
'etime',
'eval',
'evans',
'evstr',
'example_run',
'expression2code',
'extract_help_examples',
'factor',
'factorial',
'factors',
'faurre',
'ffilt',
'fft2',
'fftshift',
'fieldnames',
'filt_sinc',
'filter',
'findABCD',
'findAC',
'findBDK',
'findR',
'find_freq',
'find_links',
'find_scicos_version',
'findm',
'findmsifortcompiler',
'findmsvccompiler',
'findx0BD',
'firstnonsingleton',
'fix',
'fixedpointgcd',
'flipdim',
'flts',
'fminsearch',
'formatBlackTip',
'formatBodeMagTip',
'formatBodePhaseTip',
'formatGainplotTip',
'formatHallModuleTip',
'formatHallPhaseTip',
'formatNicholsGainTip',
'formatNicholsPhaseTip',
'formatNyquistTip',
'formatPhaseplotTip',
'formatSgridDampingTip',
'formatSgridFreqTip',
'formatZgridDampingTip',
'formatZgridFreqTip',
'format_txt',
'fourplan',
'frep2tf',
'freson',
'frfit',
'frmag',
'fseek_origin',
'fsfirlin',
'fspec',
'fspecg',
'fstabst',
'ftest',
'ftuneq',
'fullfile',
'fullrf',
'fullrfk',
'fun2string',
'g_margin',
'gainplot',
'gamitg',
'gcare',
'gcd',
'gencompilationflags_unix',
'generateBlockImage',
'generateBlockImages',
'generic_i_ce',
'generic_i_h',
'generic_i_hm',
'generic_i_s',
'generic_i_st',
'genlib',
'genmarkov',
'geomean',
'getDiagramVersion',
'getModelicaPath',
'getPreferencesValue',
'get_file_path',
'get_function_path',
'get_param',
'get_profile',
'get_scicos_version',
'getd',
'getscilabkeywords',
'getshell',
'gettklib',
'gfare',
'gfrancis',
'givens',
'glever',
'gmres',
'group',
'gschur',
'gspec',
'gtild',
'h2norm',
'h_cl',
'h_inf',
'h_inf_st',
'h_norm',
'hallchart',
'halt',
'hank',
'hankelsv',
'harmean',
'haveacompiler',
'head_comments',
'help_from_sci',
'help_skeleton',
'hermit',
'hex2dec',
'hilb',
'hilbert',
'histc',
'horner',
'householder',
'hrmt',
'htrianr',
'hypermat',
'idct',
'idst',
'ifft',
'ifftshift',
'iir',
'iirgroup',
'iirlp',
'iirmod',
'ilib_build',
'ilib_build_jar',
'ilib_compile',
'ilib_for_link',
'ilib_gen_Make',
'ilib_gen_Make_unix',
'ilib_gen_cleaner',
'ilib_gen_gateway',
'ilib_gen_loader',
'ilib_include_flag',
'ilib_mex_build',
'im_inv',
'importScicosDiagram',
'importScicosPal',
'importXcosDiagram',
'imrep2ss',
'ind2sub',
'inistate',
'init_ga_default',
'init_param',
'initial_scicos_tables',
'input',
'instruction2code',
'intc',
'intdec',
'integrate',
'interp1',
'interpln',
'intersect',
'intl',
'intsplin',
'inttrap',
'inv_coeff',
'invr',
'invrs',
'invsyslin',
'iqr',
'isLeapYear',
'is_absolute_path',
'is_param',
'iscell',
'iscellstr',
'iscolumn',
'isempty',
'isfield',
'isinf',
'ismatrix',
'isnan',
'isrow',
'isscalar',
'issparse',
'issquare',
'isstruct',
'isvector',
'jmat',
'justify',
'kalm',
'karmarkar',
'kernel',
'kpure',
'krac2',
'kroneck',
'lattn',
'lattp',
'launchtest',
'lcf',
'lcm',
'lcmdiag',
'leastsq',
'leqe',
'leqr',
'lev',
'levin',
'lex_sort',
'lft',
'lin',
'lin2mu',
'lincos',
'lindquist',
'linf',
'linfn',
'linsolve',
'linspace',
'list2vec',
'list_param',
'listfiles',
'listfunctions',
'listvarinfile',
'lmisolver',
'lmitool',
'loadXcosLibs',
'loadmatfile',
'loadwave',
'log10',
'log2',
'logm',
'logspace',
'lqe',
'lqg',
'lqg2stan',
'lqg_ltr',
'lqr',
'ls',
'lyap',
'm2sci_gui',
'm_circle',
'macglov',
'macrovar',
'mad',
'makecell',
'manedit',
'mapsound',
'markp2ss',
'matfile2sci',
'mdelete',
'mean',
'meanf',
'median',
'members',
'mese',
'meshgrid',
'mfft',
'mfile2sci',
'minreal',
'minss',
'mkdir',
'modulo',
'moment',
'mrfit',
'msd',
'mstr2sci',
'mtlb',
'mtlb_0',
'mtlb_a',
'mtlb_all',
'mtlb_any',
'mtlb_axes',
'mtlb_axis',
'mtlb_beta',
'mtlb_box',
'mtlb_choices',
'mtlb_close',
'mtlb_colordef',
'mtlb_cond',
'mtlb_cov',
'mtlb_cumprod',
'mtlb_cumsum',
'mtlb_dec2hex',
'mtlb_delete',
'mtlb_diag',
'mtlb_diff',
'mtlb_dir',
'mtlb_double',
'mtlb_e',
'mtlb_echo',
'mtlb_error',
'mtlb_eval',
'mtlb_exist',
'mtlb_eye',
'mtlb_false',
'mtlb_fft',
'mtlb_fftshift',
'mtlb_filter',
'mtlb_find',
'mtlb_findstr',
'mtlb_fliplr',
'mtlb_fopen',
'mtlb_format',
'mtlb_fprintf',
'mtlb_fread',
'mtlb_fscanf',
'mtlb_full',
'mtlb_fwrite',
'mtlb_get',
'mtlb_grid',
'mtlb_hold',
'mtlb_i',
'mtlb_ifft',
'mtlb_image',
'mtlb_imp',
'mtlb_int16',
'mtlb_int32',
'mtlb_int8',
'mtlb_is',
'mtlb_isa',
'mtlb_isfield',
'mtlb_isletter',
'mtlb_isspace',
'mtlb_l',
'mtlb_legendre',
'mtlb_linspace',
'mtlb_logic',
'mtlb_logical',
'mtlb_loglog',
'mtlb_lower',
'mtlb_max',
'mtlb_mean',
'mtlb_median',
'mtlb_mesh',
'mtlb_meshdom',
'mtlb_min',
'mtlb_more',
'mtlb_num2str',
'mtlb_ones',
'mtlb_pcolor',
'mtlb_plot',
'mtlb_prod',
'mtlb_qr',
'mtlb_qz',
'mtlb_rand',
'mtlb_randn',
'mtlb_rcond',
'mtlb_realmax',
'mtlb_realmin',
'mtlb_s',
'mtlb_semilogx',
'mtlb_semilogy',
'mtlb_setstr',
'mtlb_size',
'mtlb_sort',
'mtlb_sortrows',
'mtlb_sprintf',
'mtlb_sscanf',
'mtlb_std',
'mtlb_strcmp',
'mtlb_strcmpi',
'mtlb_strfind',
'mtlb_strrep',
'mtlb_subplot',
'mtlb_sum',
'mtlb_t',
'mtlb_toeplitz',
'mtlb_tril',
'mtlb_triu',
'mtlb_true',
'mtlb_type',
'mtlb_uint16',
'mtlb_uint32',
'mtlb_uint8',
'mtlb_upper',
'mtlb_var',
'mtlb_zeros',
'mu2lin',
'mutation_ga_binary',
'mutation_ga_default',
'mvcorrel',
'mvvacov',
'nancumsum',
'nand2mean',
'nanmax',
'nanmean',
'nanmeanf',
'nanmedian',
'nanmin',
'nanreglin',
'nanstdev',
'nansum',
'narsimul',
'ndgrid',
'ndims',
'nehari',
'neigh_func_csa',
'neigh_func_default',
'neigh_func_fsa',
'neigh_func_vfsa',
'neldermead_cget',
'neldermead_configure',
'neldermead_costf',
'neldermead_defaultoutput',
'neldermead_destroy',
'neldermead_function',
'neldermead_get',
'neldermead_log',
'neldermead_new',
'neldermead_restart',
'neldermead_search',
'neldermead_updatesimp',
'nextpow2',
'nfreq',
'nicholschart',
'nlev',
'nmplot_cget',
'nmplot_configure',
'nmplot_contour',
'nmplot_destroy',
'nmplot_function',
'nmplot_get',
'nmplot_historyplot',
'nmplot_log',
'nmplot_new',
'nmplot_outputcmd',
'nmplot_restart',
'nmplot_search',
'nmplot_simplexhistory',
'noisegen',
'nonreg_test_run',
'now',
'nthroot',
'null',
'num2cell',
'numderivative',
'numdiff',
'numer',
'nyquist',
'nyquistfrequencybounds',
'obs_gram',
'obscont',
'observer',
'obsv_mat',
'obsvss',
'oct2dec',
'odeoptions',
'optim_ga',
'optim_moga',
'optim_nsga',
'optim_nsga2',
'optim_sa',
'optimbase_cget',
'optimbase_checkbounds',
'optimbase_checkcostfun',
'optimbase_checkx0',
'optimbase_configure',
'optimbase_destroy',
'optimbase_function',
'optimbase_get',
'optimbase_hasbounds',
'optimbase_hasconstraints',
'optimbase_hasnlcons',
'optimbase_histget',
'optimbase_histset',
'optimbase_incriter',
'optimbase_isfeasible',
'optimbase_isinbounds',
'optimbase_isinnonlincons',
'optimbase_log',
'optimbase_logshutdown',
'optimbase_logstartup',
'optimbase_new',
'optimbase_outputcmd',
'optimbase_outstruct',
'optimbase_proj2bnds',
'optimbase_set',
'optimbase_stoplog',
'optimbase_terminate',
'optimget',
'optimplotfunccount',
'optimplotfval',
'optimplotx',
'optimset',
'optimsimplex_center',
'optimsimplex_check',
'optimsimplex_compsomefv',
'optimsimplex_computefv',
'optimsimplex_deltafv',
'optimsimplex_deltafvmax',
'optimsimplex_destroy',
'optimsimplex_dirmat',
'optimsimplex_fvmean',
'optimsimplex_fvstdev',
'optimsimplex_fvvariance',
'optimsimplex_getall',
'optimsimplex_getallfv',
'optimsimplex_getallx',
'optimsimplex_getfv',
'optimsimplex_getn',
'optimsimplex_getnbve',
'optimsimplex_getve',
'optimsimplex_getx',
'optimsimplex_gradientfv',
'optimsimplex_log',
'optimsimplex_new',
'optimsimplex_reflect',
'optimsimplex_setall',
'optimsimplex_setallfv',
'optimsimplex_setallx',
'optimsimplex_setfv',
'optimsimplex_setn',
'optimsimplex_setnbve',
'optimsimplex_setve',
'optimsimplex_setx',
'optimsimplex_shrink',
'optimsimplex_size',
'optimsimplex_sort',
'optimsimplex_xbar',
'orth',
'output_ga_default',
'output_moga_default',
'output_nsga2_default',
'output_nsga_default',
'p_margin',
'pack',
'pareto_filter',
'parrot',
'pbig',
'pca',
'pcg',
'pdiv',
'pen2ea',
'pencan',
'pencost',
'penlaur',
'perctl',
'perl',
'perms',
'permute',
'pertrans',
'pfactors',
'pfss',
'phasemag',
'phaseplot',
'phc',
'pinv',
'playsnd',
'plotprofile',
'plzr',
'pmodulo',
'pol2des',
'pol2str',
'polar',
'polfact',
'prbs_a',
'prettyprint',
'primes',
'princomp',
'profile',
'proj',
'projsl',
'projspec',
'psmall',
'pspect',
'qmr',
'qpsolve',
'quart',
'quaskro',
'rafiter',
'randpencil',
'range',
'rank',
'readxls',
'recompilefunction',
'recons',
'reglin',
'regress',
'remezb',
'remove_param',
'remove_profiling',
'repfreq',
'replace_Ix_by_Fx',
'repmat',
'reset_profiling',
'resize_matrix',
'returntoscilab',
'rhs2code',
'ric_desc',
'riccati',
'rmdir',
'routh_t',
'rowcomp',
'rowcompr',
'rowinout',
'rowregul',
'rowshuff',
'rref',
'sample',
'samplef',
'samwr',
'savematfile',
'savewave',
'scanf',
'sci2exp',
'sciGUI_init',
'sci_sparse',
'scicos_getvalue',
'scicos_simulate',
'scicos_workspace_init',
'scisptdemo',
'scitest',
'sdiff',
'sec',
'secd',
'sech',
'selection_ga_elitist',
'selection_ga_random',
'sensi',
'setPreferencesValue',
'set_param',
'setdiff',
'sgrid',
'show_margins',
'show_pca',
'showprofile',
'signm',
'sinc',
'sincd',
'sind',
'sinh',
'sinhm',
'sinm',
'sm2des',
'sm2ss',
'smga',
'smooth',
'solve',
'sound',
'soundsec',
'sp2adj',
'spaninter',
'spanplus',
'spantwo',
'specfact',
'speye',
'sprand',
'spzeros',
'sqroot',
'sqrtm',
'squarewave',
'squeeze',
'srfaur',
'srkf',
'ss2des',
'ss2ss',
'ss2tf',
'sskf',
'ssprint',
'ssrand',
'st_deviation',
'st_i_generic',
'st_ility',
'stabil',
'statgain',
'stdev',
'stdevf',
'steadycos',
'strange',
'strcmpi',
'struct',
'sub2ind',
'sva',
'svplot',
'sylm',
'sylv',
'sysconv',
'sysdiag',
'sysfact',
'syslin',
'syssize',
'system',
'systmat',
'tabul',
'tand',
'tanh',
'tanhm',
'tanm',
'tbx_build_blocks',
'tbx_build_cleaner',
'tbx_build_gateway',
'tbx_build_gateway_clean',
'tbx_build_gateway_loader',
'tbx_build_help',
'tbx_build_help_loader',
'tbx_build_loader',
'tbx_build_localization',
'tbx_build_macros',
'tbx_build_pal_loader',
'tbx_build_src',
'tbx_builder',
'tbx_builder_gateway',
'tbx_builder_gateway_lang',
'tbx_builder_help',
'tbx_builder_help_lang',
'tbx_builder_macros',
'tbx_builder_src',
'tbx_builder_src_lang',
'tbx_generate_pofile',
'temp_law_csa',
'temp_law_default',
'temp_law_fsa',
'temp_law_huang',
'temp_law_vfsa',
'test_clean',
'test_on_columns',
'test_run',
'test_run_level',
'testexamples',
'tf2des',
'tf2ss',
'thrownan',
'tic',
'time_id',
'toc',
'toeplitz',
'tokenpos',
'toolboxes',
'trace',
'trans',
'translatepaths',
'tree2code',
'trfmod',
'trianfml',
'trimmean',
'trisolve',
'trzeros',
'typeof',
'ui_observer',
'union',
'unique',
'unit_test_run',
'unix_g',
'unix_s',
'unix_w',
'unix_x',
'unobs',
'unpack',
'unwrap',
'variance',
'variancef',
'vec2list',
'vectorfind',
'ver',
'warnobsolete',
'wavread',
'wavwrite',
'wcenter',
'weekday',
'wfir',
'wfir_gui',
'whereami',
'who_user',
'whos',
'wiener',
'wigner',
'window',
'winlist',
'with_javasci',
'with_macros_source',
'with_modelica_compiler',
'with_tk',
'xcorr',
'xcosBlockEval',
'xcosBlockInterface',
'xcosCodeGeneration',
'xcosConfigureModelica',
'xcosPal',
'xcosPalAdd',
'xcosPalAddBlock',
'xcosPalExport',
'xcosPalGenerateAllIcons',
'xcosShowBlockWarning',
'xcosValidateBlockSet',
'xcosValidateCompareBlock',
'xcos_compile',
'xcos_debug_gui',
'xcos_run',
'xcos_simulate',
'xcov',
'xmltochm',
'xmltoformat',
'xmltohtml',
'xmltojar',
'xmltopdf',
'xmltops',
'xmltoweb',
'yulewalk',
'zeropen',
'zgrid',
'zpbutt',
'zpch1',
'zpch2',
'zpell',
)
variables_kw = (
'$',
'%F',
'%T',
'%e',
'%eps',
'%f',
'%fftw',
'%gui',
'%i',
'%inf',
'%io',
'%modalWarning',
'%nan',
'%pi',
'%s',
'%t',
'%tk',
'%toolboxes',
'%toolboxes_dir',
'%z',
'PWD',
'SCI',
'SCIHOME',
'TMPDIR',
'arnoldilib',
'assertlib',
'atomslib',
'cacsdlib',
'compatibility_functilib',
'corelib',
'data_structureslib',
'demo_toolslib',
'development_toolslib',
'differential_equationlib',
'dynamic_linklib',
'elementary_functionslib',
'enull',
'evoid',
'external_objectslib',
'fd',
'fileiolib',
'functionslib',
'genetic_algorithmslib',
'helptoolslib',
'home',
'integerlib',
'interpolationlib',
'iolib',
'jnull',
'jvoid',
'linear_algebralib',
'm2scilib',
'matiolib',
'modules_managerlib',
'neldermeadlib',
'optimbaselib',
'optimizationlib',
'optimsimplexlib',
'output_streamlib',
'overloadinglib',
'parameterslib',
'polynomialslib',
'preferenceslib',
'randliblib',
'scicos_autolib',
'scicos_utilslib',
'scinoteslib',
'signal_processinglib',
'simulated_annealinglib',
'soundlib',
'sparselib',
'special_functionslib',
'spreadsheetlib',
'statisticslib',
'stringlib',
'tclscilib',
'timelib',
'umfpacklib',
'xcoslib',
)
if __name__ == '__main__': # pragma: no cover
import subprocess
from pygments.util import format_lines, duplicates_removed
mapping = {'variables': 'builtin'}
def extract_completion(var_type):
s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = s.communicate('''\
fd = mopen("/dev/stderr", "wt");
mputl(strcat(completion("", "%s"), "||"), fd);
mclose(fd)\n''' % var_type)
if '||' not in output[1]:
raise Exception(output[0])
# Invalid DISPLAY causes this to be output:
text = output[1].strip()
if text.startswith('Error: unable to open display \n'):
text = text[len('Error: unable to open display \n'):]
return text.split('||')
new_data = {}
seen = set() # only keep first type for a given word
for t in ('functions', 'commands', 'macros', 'variables'):
new_data[t] = duplicates_removed(extract_completion(t), seen)
seen.update(set(new_data[t]))
with open(__file__) as f:
content = f.read()
header = content[:content.find('# Autogenerated')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(__file__, 'w') as f:
f.write(header)
f.write('# Autogenerated\n\n')
for k, v in sorted(new_data.iteritems()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer)
| mpl-2.0 |
muffinresearch/addons-server | apps/stats/management/commands/download_counts_from_hive.py | 18 | 1190 | from . import HiveQueryToFileCommand
class Command(HiveQueryToFileCommand):
"""Query the "download counts" requests from HIVE, save them to disk.
The data stored locally will then be processed by the
download_counts_from_file.py script.
Usage:
./manage.py download_counts_from_hive <folder> --date YYYY-MM-DD
If no date is specified, the default is the day before.
If not folder is specified, the default is "hive_results". This folder is
located in <settings.NETAPP_STORAGE>/tmp.
Example row:
2014-07-01 1 100157 search
"""
help = __doc__
filename = 'download_counts.hive'
query = """
SELECT
ds,
count(1),
split(request_url,'/')[4],
parse_url(concat('http://www.a.com', request_url), 'QUERY', 'src')
FROM v2_raw_logs
WHERE
domain='addons.mozilla.org' AND
ds='{day}' AND
request_url LIKE '/%/downloads/file/%' AND
{ip_filtering}
GROUP BY
ds,
split(request_url,'/')[4],
parse_url(concat('http://www.a.com', request_url), 'QUERY', 'src')
{limit}
"""
| bsd-3-clause |
gem/oq-hazardlib | openquake/hmtk/plotting/seismicity/completeness/plot_stepp_1972.py | 1 | 5217 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
#!/usr/bin/env python
'''
Module :mod: 'openquake.hmtk.plotting.seismicity.completeness.plot_stepp_1971'
creates plot to illustrate outcome of Stepp (1972) method for completeness
analysis
'''
import os.path
import numpy as np
import matplotlib.pyplot as plt
valid_markers = ['*', '+', '1', '2', '3', '4', '8', '<', '>', 'D', 'H', '^',
'_', 'd', 'h', 'o', 'p', 's', 'v', 'x', '|']
DEFAULT_SIZE = (8., 6.)
DEFAULT_OFFSET = (1.3, 1.0)
def create_stepp_plot(model, filename, filetype='png', filedpi=300):
'''
Creates the classic Stepp (1972) plots for a completed Stepp analysis,
and exports the figure to a file.
:param model:
Completed Stepp (1972) analysis as instance of :class:
'openquake.hmtk.seismicity.completeness.comp_stepp_1971.Stepp1971'
:param string filename:
Name of output file
:param string filetype:
Type of file (from list supported by matplotlib)
:param int filedpi:
Resolution (dots per inch) of output file
'''
plt.figure(figsize=DEFAULT_SIZE)
if os.path.exists(filename):
raise IOError('File already exists!')
legend_list = [(str(model.magnitude_bin[iloc] + 0.01) + ' - ' +
str(model.magnitude_bin[iloc + 1])) for iloc in range(0,
len(model.magnitude_bin) - 1)]
rgb_list = []
marker_vals = []
# Get marker from valid list
while len(valid_markers) < len(model.magnitude_bin):
valid_markers.append(valid_markers)
marker_sampler = np.arange(0, len(valid_markers), 1)
np.random.shuffle(marker_sampler)
# Get colour for each bin
for value in range(0, len(model.magnitude_bin) - 1):
rgb_samp = np.random.uniform(0., 1., 3)
rgb_list.append((rgb_samp[0], rgb_samp[1], rgb_samp[2]))
marker_vals.append(valid_markers[marker_sampler[value]])
# Plot observed Sigma lambda
for iloc in range(0, len(model.magnitude_bin) - 1):
plt.loglog(model.time_values,
model.sigma[:, iloc],
linestyle='None',
marker=marker_vals[iloc],
color=rgb_list[iloc])
lgd = plt.legend(legend_list, bbox_to_anchor=DEFAULT_OFFSET)
plt.grid(True)
# Plot expected Poisson rate
for iloc in range(0, len(model.magnitude_bin) - 1):
plt.loglog(model.time_values,
model.model_line[:, iloc],
linestyle='-',
marker='None',
color=rgb_list[iloc])
plt.xlim(model.time_values[0] / 2., 2. * model.time_values[-1])
xmarker = model.end_year - model.completeness_table[iloc, 0]
id0 = model.model_line[:, iloc] > 0.
ymarker = 10.0 ** np.interp(np.log10(xmarker),
np.log10(model.time_values[id0]),
np.log10(model.model_line[id0, iloc]))
plt.loglog(xmarker, ymarker, 'ks')
plt.xlabel('Time (years)', fontsize=15)
plt.ylabel("$\\sigma_{\\lambda} = \\sqrt{\\lambda} / \\sqrt{T}$",
fontsize=15)
# Save figure to file
plt.tight_layout()
plt.savefig(filename, dpi=filedpi, format=filetype,
bbox_extra_artists=(lgd,), bbox_inches="tight")
| agpl-3.0 |
google/flatbuffers | tests/MyGame/Example/Monster.py | 4 | 77583 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: Example
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# an example documentation comment: "monster object"
class Monster(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Monster()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMonster(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def MonsterBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Monster
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Monster
def Pos(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
from MyGame.Example.Vec3 import Vec3
obj = Vec3()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Mana(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 150
# Monster
def Hp(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 100
# Monster
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Monster
def Inventory(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def InventoryAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def InventoryLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def InventoryIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Monster
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 8
# Monster
def TestType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def Test(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def Test4(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
from MyGame.Example.Test import Test
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test4Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Test4IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
# Monster
def Testarrayofstring(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def TestarrayofstringLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofstringIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
return o == 0
# an example documentation comment: this will end up in the generated code
# multiline too
# Monster
def Testarrayoftables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from MyGame.Example.Monster import Monster
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayoftablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayoftablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
return o == 0
# Monster
def Enemy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from MyGame.Example.Monster import Monster
obj = Monster()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testnestedflatbuffer(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestnestedflatbufferAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def TestnestedflatbufferNestedRoot(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
from MyGame.Example.Monster import Monster
return Monster.GetRootAs(self._tab.Bytes, self._tab.Vector(o))
return 0
# Monster
def TestnestedflatbufferLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestnestedflatbufferIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
return o == 0
# Monster
def Testempty(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from MyGame.Example.Stat import Stat
obj = Stat()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Testbool(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Monster
def Testhashs32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(38))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(40))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(42))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(44))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu32Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(46))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashs64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(48))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Monster
def Testhashu64Fnv1a(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(50))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def Testarrayofbools(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestarrayofboolsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# Monster
def TestarrayofboolsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofboolsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(52))
return o == 0
# Monster
def Testf(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(54))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.14159
# Monster
def Testf2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(56))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 3.0
# Monster
def Testf3(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(58))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Monster
def Testarrayofstring2(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Monster
def Testarrayofstring2Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Testarrayofstring2IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(60))
return o == 0
# Monster
def Testarrayofsortedstruct(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 8
from MyGame.Example.Ability import Ability
obj = Ability()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def TestarrayofsortedstructLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestarrayofsortedstructIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(62))
return o == 0
# Monster
def Flex(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def FlexAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def FlexLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def FlexIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(64))
return o == 0
# Monster
def Test5(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
from MyGame.Example.Test import Test
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def Test5Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def Test5IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(66))
return o == 0
# Monster
def VectorOfLongs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfLongsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# Monster
def VectorOfLongsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfLongsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(68))
return o == 0
# Monster
def VectorOfDoubles(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfDoublesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# Monster
def VectorOfDoublesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfDoublesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(70))
return o == 0
# Monster
def ParentNamespaceTest(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(72))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from MyGame.InParentNamespace import InParentNamespace
obj = InParentNamespace()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from MyGame.Example.Referrable import Referrable
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfReferrablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(74))
return o == 0
# Monster
def SingleWeakReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(76))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfWeakReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfWeakReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfWeakReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfWeakReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(78))
return o == 0
# Monster
def VectorOfStrongReferrables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from MyGame.Example.Referrable import Referrable
obj = Referrable()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def VectorOfStrongReferrablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfStrongReferrablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(80))
return o == 0
# Monster
def CoOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(82))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfCoOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfCoOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfCoOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfCoOwningReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(84))
return o == 0
# Monster
def NonOwningReference(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(86))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Monster
def VectorOfNonOwningReferences(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Monster
def VectorOfNonOwningReferencesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# Monster
def VectorOfNonOwningReferencesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfNonOwningReferencesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(88))
return o == 0
# Monster
def AnyUniqueType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(90))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyUnique(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(92))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def AnyAmbiguousType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(94))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Monster
def AnyAmbiguous(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(96))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Monster
def VectorOfEnums(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def VectorOfEnumsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def VectorOfEnumsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def VectorOfEnumsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(98))
return o == 0
# Monster
def SignedEnum(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(100))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return -1
# Monster
def Testrequirednestedflatbuffer(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Monster
def TestrequirednestedflatbufferAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Monster
def TestrequirednestedflatbufferNestedRoot(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
from MyGame.Example.Monster import Monster
return Monster.GetRootAs(self._tab.Bytes, self._tab.Vector(o))
return 0
# Monster
def TestrequirednestedflatbufferLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def TestrequirednestedflatbufferIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(102))
return o == 0
# Monster
def ScalarKeySortedTables(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from MyGame.Example.Stat import Stat
obj = Stat()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Monster
def ScalarKeySortedTablesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Monster
def ScalarKeySortedTablesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(104))
return o == 0
def Start(builder): builder.StartObject(51)
def MonsterStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddPos(builder, pos): builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pos), 0)
def MonsterAddPos(builder, pos):
"""This method is deprecated. Please switch to AddPos."""
return AddPos(builder, pos)
def AddMana(builder, mana): builder.PrependInt16Slot(1, mana, 150)
def MonsterAddMana(builder, mana):
"""This method is deprecated. Please switch to AddMana."""
return AddMana(builder, mana)
def AddHp(builder, hp): builder.PrependInt16Slot(2, hp, 100)
def MonsterAddHp(builder, hp):
"""This method is deprecated. Please switch to AddHp."""
return AddHp(builder, hp)
def AddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def MonsterAddName(builder, name):
"""This method is deprecated. Please switch to AddName."""
return AddName(builder, name)
def AddInventory(builder, inventory): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(inventory), 0)
def MonsterAddInventory(builder, inventory):
"""This method is deprecated. Please switch to AddInventory."""
return AddInventory(builder, inventory)
def StartInventoryVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartInventoryVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartInventoryVector(builder, numElems)
def AddColor(builder, color): builder.PrependUint8Slot(6, color, 8)
def MonsterAddColor(builder, color):
"""This method is deprecated. Please switch to AddColor."""
return AddColor(builder, color)
def AddTestType(builder, testType): builder.PrependUint8Slot(7, testType, 0)
def MonsterAddTestType(builder, testType):
"""This method is deprecated. Please switch to AddTestType."""
return AddTestType(builder, testType)
def AddTest(builder, test): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(test), 0)
def MonsterAddTest(builder, test):
"""This method is deprecated. Please switch to AddTest."""
return AddTest(builder, test)
def AddTest4(builder, test4): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(test4), 0)
def MonsterAddTest4(builder, test4):
"""This method is deprecated. Please switch to AddTest4."""
return AddTest4(builder, test4)
def StartTest4Vector(builder, numElems): return builder.StartVector(4, numElems, 2)
def MonsterStartTest4Vector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTest4Vector(builder, numElems)
def AddTestarrayofstring(builder, testarrayofstring): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring), 0)
def MonsterAddTestarrayofstring(builder, testarrayofstring):
"""This method is deprecated. Please switch to AddTestarrayofstring."""
return AddTestarrayofstring(builder, testarrayofstring)
def StartTestarrayofstringVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartTestarrayofstringVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestarrayofstringVector(builder, numElems)
def AddTestarrayoftables(builder, testarrayoftables): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayoftables), 0)
def MonsterAddTestarrayoftables(builder, testarrayoftables):
"""This method is deprecated. Please switch to AddTestarrayoftables."""
return AddTestarrayoftables(builder, testarrayoftables)
def StartTestarrayoftablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartTestarrayoftablesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestarrayoftablesVector(builder, numElems)
def AddEnemy(builder, enemy): builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(enemy), 0)
def MonsterAddEnemy(builder, enemy):
"""This method is deprecated. Please switch to AddEnemy."""
return AddEnemy(builder, enemy)
def AddTestnestedflatbuffer(builder, testnestedflatbuffer): builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(testnestedflatbuffer), 0)
def MonsterAddTestnestedflatbuffer(builder, testnestedflatbuffer):
"""This method is deprecated. Please switch to AddTestnestedflatbuffer."""
return AddTestnestedflatbuffer(builder, testnestedflatbuffer)
def StartTestnestedflatbufferVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartTestnestedflatbufferVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestnestedflatbufferVector(builder, numElems)
def MakeVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def MakeTestnestedflatbufferVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def AddTestempty(builder, testempty): builder.PrependUOffsetTRelativeSlot(14, flatbuffers.number_types.UOffsetTFlags.py_type(testempty), 0)
def MonsterAddTestempty(builder, testempty):
"""This method is deprecated. Please switch to AddTestempty."""
return AddTestempty(builder, testempty)
def AddTestbool(builder, testbool): builder.PrependBoolSlot(15, testbool, 0)
def MonsterAddTestbool(builder, testbool):
"""This method is deprecated. Please switch to AddTestbool."""
return AddTestbool(builder, testbool)
def AddTesthashs32Fnv1(builder, testhashs32Fnv1): builder.PrependInt32Slot(16, testhashs32Fnv1, 0)
def MonsterAddTesthashs32Fnv1(builder, testhashs32Fnv1):
"""This method is deprecated. Please switch to AddTesthashs32Fnv1."""
return AddTesthashs32Fnv1(builder, testhashs32Fnv1)
def AddTesthashu32Fnv1(builder, testhashu32Fnv1): builder.PrependUint32Slot(17, testhashu32Fnv1, 0)
def MonsterAddTesthashu32Fnv1(builder, testhashu32Fnv1):
"""This method is deprecated. Please switch to AddTesthashu32Fnv1."""
return AddTesthashu32Fnv1(builder, testhashu32Fnv1)
def AddTesthashs64Fnv1(builder, testhashs64Fnv1): builder.PrependInt64Slot(18, testhashs64Fnv1, 0)
def MonsterAddTesthashs64Fnv1(builder, testhashs64Fnv1):
"""This method is deprecated. Please switch to AddTesthashs64Fnv1."""
return AddTesthashs64Fnv1(builder, testhashs64Fnv1)
def AddTesthashu64Fnv1(builder, testhashu64Fnv1): builder.PrependUint64Slot(19, testhashu64Fnv1, 0)
def MonsterAddTesthashu64Fnv1(builder, testhashu64Fnv1):
"""This method is deprecated. Please switch to AddTesthashu64Fnv1."""
return AddTesthashu64Fnv1(builder, testhashu64Fnv1)
def AddTesthashs32Fnv1a(builder, testhashs32Fnv1a): builder.PrependInt32Slot(20, testhashs32Fnv1a, 0)
def MonsterAddTesthashs32Fnv1a(builder, testhashs32Fnv1a):
"""This method is deprecated. Please switch to AddTesthashs32Fnv1a."""
return AddTesthashs32Fnv1a(builder, testhashs32Fnv1a)
def AddTesthashu32Fnv1a(builder, testhashu32Fnv1a): builder.PrependUint32Slot(21, testhashu32Fnv1a, 0)
def MonsterAddTesthashu32Fnv1a(builder, testhashu32Fnv1a):
"""This method is deprecated. Please switch to AddTesthashu32Fnv1a."""
return AddTesthashu32Fnv1a(builder, testhashu32Fnv1a)
def AddTesthashs64Fnv1a(builder, testhashs64Fnv1a): builder.PrependInt64Slot(22, testhashs64Fnv1a, 0)
def MonsterAddTesthashs64Fnv1a(builder, testhashs64Fnv1a):
"""This method is deprecated. Please switch to AddTesthashs64Fnv1a."""
return AddTesthashs64Fnv1a(builder, testhashs64Fnv1a)
def AddTesthashu64Fnv1a(builder, testhashu64Fnv1a): builder.PrependUint64Slot(23, testhashu64Fnv1a, 0)
def MonsterAddTesthashu64Fnv1a(builder, testhashu64Fnv1a):
"""This method is deprecated. Please switch to AddTesthashu64Fnv1a."""
return AddTesthashu64Fnv1a(builder, testhashu64Fnv1a)
def AddTestarrayofbools(builder, testarrayofbools): builder.PrependUOffsetTRelativeSlot(24, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofbools), 0)
def MonsterAddTestarrayofbools(builder, testarrayofbools):
"""This method is deprecated. Please switch to AddTestarrayofbools."""
return AddTestarrayofbools(builder, testarrayofbools)
def StartTestarrayofboolsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartTestarrayofboolsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestarrayofboolsVector(builder, numElems)
def AddTestf(builder, testf): builder.PrependFloat32Slot(25, testf, 3.14159)
def MonsterAddTestf(builder, testf):
"""This method is deprecated. Please switch to AddTestf."""
return AddTestf(builder, testf)
def AddTestf2(builder, testf2): builder.PrependFloat32Slot(26, testf2, 3.0)
def MonsterAddTestf2(builder, testf2):
"""This method is deprecated. Please switch to AddTestf2."""
return AddTestf2(builder, testf2)
def AddTestf3(builder, testf3): builder.PrependFloat32Slot(27, testf3, 0.0)
def MonsterAddTestf3(builder, testf3):
"""This method is deprecated. Please switch to AddTestf3."""
return AddTestf3(builder, testf3)
def AddTestarrayofstring2(builder, testarrayofstring2): builder.PrependUOffsetTRelativeSlot(28, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofstring2), 0)
def MonsterAddTestarrayofstring2(builder, testarrayofstring2):
"""This method is deprecated. Please switch to AddTestarrayofstring2."""
return AddTestarrayofstring2(builder, testarrayofstring2)
def StartTestarrayofstring2Vector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartTestarrayofstring2Vector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestarrayofstring2Vector(builder, numElems)
def AddTestarrayofsortedstruct(builder, testarrayofsortedstruct): builder.PrependUOffsetTRelativeSlot(29, flatbuffers.number_types.UOffsetTFlags.py_type(testarrayofsortedstruct), 0)
def MonsterAddTestarrayofsortedstruct(builder, testarrayofsortedstruct):
"""This method is deprecated. Please switch to AddTestarrayofsortedstruct."""
return AddTestarrayofsortedstruct(builder, testarrayofsortedstruct)
def StartTestarrayofsortedstructVector(builder, numElems): return builder.StartVector(8, numElems, 4)
def MonsterStartTestarrayofsortedstructVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestarrayofsortedstructVector(builder, numElems)
def AddFlex(builder, flex): builder.PrependUOffsetTRelativeSlot(30, flatbuffers.number_types.UOffsetTFlags.py_type(flex), 0)
def MonsterAddFlex(builder, flex):
"""This method is deprecated. Please switch to AddFlex."""
return AddFlex(builder, flex)
def StartFlexVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartFlexVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartFlexVector(builder, numElems)
def AddTest5(builder, test5): builder.PrependUOffsetTRelativeSlot(31, flatbuffers.number_types.UOffsetTFlags.py_type(test5), 0)
def MonsterAddTest5(builder, test5):
"""This method is deprecated. Please switch to AddTest5."""
return AddTest5(builder, test5)
def StartTest5Vector(builder, numElems): return builder.StartVector(4, numElems, 2)
def MonsterStartTest5Vector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTest5Vector(builder, numElems)
def AddVectorOfLongs(builder, vectorOfLongs): builder.PrependUOffsetTRelativeSlot(32, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfLongs), 0)
def MonsterAddVectorOfLongs(builder, vectorOfLongs):
"""This method is deprecated. Please switch to AddVectorOfLongs."""
return AddVectorOfLongs(builder, vectorOfLongs)
def StartVectorOfLongsVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterStartVectorOfLongsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfLongsVector(builder, numElems)
def AddVectorOfDoubles(builder, vectorOfDoubles): builder.PrependUOffsetTRelativeSlot(33, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfDoubles), 0)
def MonsterAddVectorOfDoubles(builder, vectorOfDoubles):
"""This method is deprecated. Please switch to AddVectorOfDoubles."""
return AddVectorOfDoubles(builder, vectorOfDoubles)
def StartVectorOfDoublesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterStartVectorOfDoublesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfDoublesVector(builder, numElems)
def AddParentNamespaceTest(builder, parentNamespaceTest): builder.PrependUOffsetTRelativeSlot(34, flatbuffers.number_types.UOffsetTFlags.py_type(parentNamespaceTest), 0)
def MonsterAddParentNamespaceTest(builder, parentNamespaceTest):
"""This method is deprecated. Please switch to AddParentNamespaceTest."""
return AddParentNamespaceTest(builder, parentNamespaceTest)
def AddVectorOfReferrables(builder, vectorOfReferrables): builder.PrependUOffsetTRelativeSlot(35, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfReferrables), 0)
def MonsterAddVectorOfReferrables(builder, vectorOfReferrables):
"""This method is deprecated. Please switch to AddVectorOfReferrables."""
return AddVectorOfReferrables(builder, vectorOfReferrables)
def StartVectorOfReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartVectorOfReferrablesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfReferrablesVector(builder, numElems)
def AddSingleWeakReference(builder, singleWeakReference): builder.PrependUint64Slot(36, singleWeakReference, 0)
def MonsterAddSingleWeakReference(builder, singleWeakReference):
"""This method is deprecated. Please switch to AddSingleWeakReference."""
return AddSingleWeakReference(builder, singleWeakReference)
def AddVectorOfWeakReferences(builder, vectorOfWeakReferences): builder.PrependUOffsetTRelativeSlot(37, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfWeakReferences), 0)
def MonsterAddVectorOfWeakReferences(builder, vectorOfWeakReferences):
"""This method is deprecated. Please switch to AddVectorOfWeakReferences."""
return AddVectorOfWeakReferences(builder, vectorOfWeakReferences)
def StartVectorOfWeakReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterStartVectorOfWeakReferencesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfWeakReferencesVector(builder, numElems)
def AddVectorOfStrongReferrables(builder, vectorOfStrongReferrables): builder.PrependUOffsetTRelativeSlot(38, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfStrongReferrables), 0)
def MonsterAddVectorOfStrongReferrables(builder, vectorOfStrongReferrables):
"""This method is deprecated. Please switch to AddVectorOfStrongReferrables."""
return AddVectorOfStrongReferrables(builder, vectorOfStrongReferrables)
def StartVectorOfStrongReferrablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartVectorOfStrongReferrablesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfStrongReferrablesVector(builder, numElems)
def AddCoOwningReference(builder, coOwningReference): builder.PrependUint64Slot(39, coOwningReference, 0)
def MonsterAddCoOwningReference(builder, coOwningReference):
"""This method is deprecated. Please switch to AddCoOwningReference."""
return AddCoOwningReference(builder, coOwningReference)
def AddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences): builder.PrependUOffsetTRelativeSlot(40, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfCoOwningReferences), 0)
def MonsterAddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences):
"""This method is deprecated. Please switch to AddVectorOfCoOwningReferences."""
return AddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences)
def StartVectorOfCoOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterStartVectorOfCoOwningReferencesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfCoOwningReferencesVector(builder, numElems)
def AddNonOwningReference(builder, nonOwningReference): builder.PrependUint64Slot(41, nonOwningReference, 0)
def MonsterAddNonOwningReference(builder, nonOwningReference):
"""This method is deprecated. Please switch to AddNonOwningReference."""
return AddNonOwningReference(builder, nonOwningReference)
def AddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences): builder.PrependUOffsetTRelativeSlot(42, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfNonOwningReferences), 0)
def MonsterAddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences):
"""This method is deprecated. Please switch to AddVectorOfNonOwningReferences."""
return AddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences)
def StartVectorOfNonOwningReferencesVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def MonsterStartVectorOfNonOwningReferencesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfNonOwningReferencesVector(builder, numElems)
def AddAnyUniqueType(builder, anyUniqueType): builder.PrependUint8Slot(43, anyUniqueType, 0)
def MonsterAddAnyUniqueType(builder, anyUniqueType):
"""This method is deprecated. Please switch to AddAnyUniqueType."""
return AddAnyUniqueType(builder, anyUniqueType)
def AddAnyUnique(builder, anyUnique): builder.PrependUOffsetTRelativeSlot(44, flatbuffers.number_types.UOffsetTFlags.py_type(anyUnique), 0)
def MonsterAddAnyUnique(builder, anyUnique):
"""This method is deprecated. Please switch to AddAnyUnique."""
return AddAnyUnique(builder, anyUnique)
def AddAnyAmbiguousType(builder, anyAmbiguousType): builder.PrependUint8Slot(45, anyAmbiguousType, 0)
def MonsterAddAnyAmbiguousType(builder, anyAmbiguousType):
"""This method is deprecated. Please switch to AddAnyAmbiguousType."""
return AddAnyAmbiguousType(builder, anyAmbiguousType)
def AddAnyAmbiguous(builder, anyAmbiguous): builder.PrependUOffsetTRelativeSlot(46, flatbuffers.number_types.UOffsetTFlags.py_type(anyAmbiguous), 0)
def MonsterAddAnyAmbiguous(builder, anyAmbiguous):
"""This method is deprecated. Please switch to AddAnyAmbiguous."""
return AddAnyAmbiguous(builder, anyAmbiguous)
def AddVectorOfEnums(builder, vectorOfEnums): builder.PrependUOffsetTRelativeSlot(47, flatbuffers.number_types.UOffsetTFlags.py_type(vectorOfEnums), 0)
def MonsterAddVectorOfEnums(builder, vectorOfEnums):
"""This method is deprecated. Please switch to AddVectorOfEnums."""
return AddVectorOfEnums(builder, vectorOfEnums)
def StartVectorOfEnumsVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartVectorOfEnumsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartVectorOfEnumsVector(builder, numElems)
def AddSignedEnum(builder, signedEnum): builder.PrependInt8Slot(48, signedEnum, -1)
def MonsterAddSignedEnum(builder, signedEnum):
"""This method is deprecated. Please switch to AddSignedEnum."""
return AddSignedEnum(builder, signedEnum)
def AddTestrequirednestedflatbuffer(builder, testrequirednestedflatbuffer): builder.PrependUOffsetTRelativeSlot(49, flatbuffers.number_types.UOffsetTFlags.py_type(testrequirednestedflatbuffer), 0)
def MonsterAddTestrequirednestedflatbuffer(builder, testrequirednestedflatbuffer):
"""This method is deprecated. Please switch to AddTestrequirednestedflatbuffer."""
return AddTestrequirednestedflatbuffer(builder, testrequirednestedflatbuffer)
def StartTestrequirednestedflatbufferVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def MonsterStartTestrequirednestedflatbufferVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartTestrequirednestedflatbufferVector(builder, numElems)
def MakeVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def MakeTestrequirednestedflatbufferVectorFromBytes(builder, bytes):
builder.StartVector(1, len(bytes), 1)
builder.head = builder.head - len(bytes)
builder.Bytes[builder.head : builder.head + len(bytes)] = bytes
return builder.EndVector()
def AddScalarKeySortedTables(builder, scalarKeySortedTables): builder.PrependUOffsetTRelativeSlot(50, flatbuffers.number_types.UOffsetTFlags.py_type(scalarKeySortedTables), 0)
def MonsterAddScalarKeySortedTables(builder, scalarKeySortedTables):
"""This method is deprecated. Please switch to AddScalarKeySortedTables."""
return AddScalarKeySortedTables(builder, scalarKeySortedTables)
def StartScalarKeySortedTablesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MonsterStartScalarKeySortedTablesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartScalarKeySortedTablesVector(builder, numElems)
def End(builder): return builder.EndObject()
def MonsterEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
import MyGame.Example.Ability
import MyGame.Example.Any
import MyGame.Example.AnyAmbiguousAliases
import MyGame.Example.AnyUniqueAliases
import MyGame.Example.Referrable
import MyGame.Example.Stat
import MyGame.Example.Test
import MyGame.Example.TestSimpleTableWithEnum
import MyGame.Example.Vec3
import MyGame.Example2.Monster
import MyGame.InParentNamespace
try:
from typing import List, Optional, Union
except:
pass
class MonsterT(object):
# MonsterT
def __init__(self):
self.pos = None # type: Optional[MyGame.Example.Vec3.Vec3T]
self.mana = 150 # type: int
self.hp = 100 # type: int
self.name = None # type: str
self.inventory = None # type: List[int]
self.color = 8 # type: int
self.testType = 0 # type: int
self.test = None # type: Union[None, MyGame.Example.Monster.MonsterT, MyGame.Example.TestSimpleTableWithEnum.TestSimpleTableWithEnumT, MyGame.Example2.Monster.MonsterT]
self.test4 = None # type: List[MyGame.Example.Test.TestT]
self.testarrayofstring = None # type: List[str]
self.testarrayoftables = None # type: List[MyGame.Example.Monster.MonsterT]
self.enemy = None # type: Optional[MyGame.Example.Monster.MonsterT]
self.testnestedflatbuffer = None # type: List[int]
self.testempty = None # type: Optional[MyGame.Example.Stat.StatT]
self.testbool = False # type: bool
self.testhashs32Fnv1 = 0 # type: int
self.testhashu32Fnv1 = 0 # type: int
self.testhashs64Fnv1 = 0 # type: int
self.testhashu64Fnv1 = 0 # type: int
self.testhashs32Fnv1a = 0 # type: int
self.testhashu32Fnv1a = 0 # type: int
self.testhashs64Fnv1a = 0 # type: int
self.testhashu64Fnv1a = 0 # type: int
self.testarrayofbools = None # type: List[bool]
self.testf = 3.14159 # type: float
self.testf2 = 3.0 # type: float
self.testf3 = 0.0 # type: float
self.testarrayofstring2 = None # type: List[str]
self.testarrayofsortedstruct = None # type: List[MyGame.Example.Ability.AbilityT]
self.flex = None # type: List[int]
self.test5 = None # type: List[MyGame.Example.Test.TestT]
self.vectorOfLongs = None # type: List[int]
self.vectorOfDoubles = None # type: List[float]
self.parentNamespaceTest = None # type: Optional[MyGame.InParentNamespace.InParentNamespaceT]
self.vectorOfReferrables = None # type: List[MyGame.Example.Referrable.ReferrableT]
self.singleWeakReference = 0 # type: int
self.vectorOfWeakReferences = None # type: List[int]
self.vectorOfStrongReferrables = None # type: List[MyGame.Example.Referrable.ReferrableT]
self.coOwningReference = 0 # type: int
self.vectorOfCoOwningReferences = None # type: List[int]
self.nonOwningReference = 0 # type: int
self.vectorOfNonOwningReferences = None # type: List[int]
self.anyUniqueType = 0 # type: int
self.anyUnique = None # type: Union[None, MyGame.Example.Monster.MonsterT, MyGame.Example.TestSimpleTableWithEnum.TestSimpleTableWithEnumT, MyGame.Example2.Monster.MonsterT]
self.anyAmbiguousType = 0 # type: int
self.anyAmbiguous = None # type: Union[None, MyGame.Example.Monster.MonsterT, MyGame.Example.Monster.MonsterT, MyGame.Example.Monster.MonsterT]
self.vectorOfEnums = None # type: List[int]
self.signedEnum = -1 # type: int
self.testrequirednestedflatbuffer = None # type: List[int]
self.scalarKeySortedTables = None # type: List[MyGame.Example.Stat.StatT]
@classmethod
def InitFromBuf(cls, buf, pos):
monster = Monster()
monster.Init(buf, pos)
return cls.InitFromObj(monster)
@classmethod
def InitFromObj(cls, monster):
x = MonsterT()
x._UnPack(monster)
return x
# MonsterT
def _UnPack(self, monster):
if monster is None:
return
if monster.Pos() is not None:
self.pos = MyGame.Example.Vec3.Vec3T.InitFromObj(monster.Pos())
self.mana = monster.Mana()
self.hp = monster.Hp()
self.name = monster.Name()
if not monster.InventoryIsNone():
if np is None:
self.inventory = []
for i in range(monster.InventoryLength()):
self.inventory.append(monster.Inventory(i))
else:
self.inventory = monster.InventoryAsNumpy()
self.color = monster.Color()
self.testType = monster.TestType()
self.test = MyGame.Example.Any.AnyCreator(self.testType, monster.Test())
if not monster.Test4IsNone():
self.test4 = []
for i in range(monster.Test4Length()):
if monster.Test4(i) is None:
self.test4.append(None)
else:
test_ = MyGame.Example.Test.TestT.InitFromObj(monster.Test4(i))
self.test4.append(test_)
if not monster.TestarrayofstringIsNone():
self.testarrayofstring = []
for i in range(monster.TestarrayofstringLength()):
self.testarrayofstring.append(monster.Testarrayofstring(i))
if not monster.TestarrayoftablesIsNone():
self.testarrayoftables = []
for i in range(monster.TestarrayoftablesLength()):
if monster.Testarrayoftables(i) is None:
self.testarrayoftables.append(None)
else:
monster_ = MyGame.Example.Monster.MonsterT.InitFromObj(monster.Testarrayoftables(i))
self.testarrayoftables.append(monster_)
if monster.Enemy() is not None:
self.enemy = MyGame.Example.Monster.MonsterT.InitFromObj(monster.Enemy())
if not monster.TestnestedflatbufferIsNone():
if np is None:
self.testnestedflatbuffer = []
for i in range(monster.TestnestedflatbufferLength()):
self.testnestedflatbuffer.append(monster.Testnestedflatbuffer(i))
else:
self.testnestedflatbuffer = monster.TestnestedflatbufferAsNumpy()
if monster.Testempty() is not None:
self.testempty = MyGame.Example.Stat.StatT.InitFromObj(monster.Testempty())
self.testbool = monster.Testbool()
self.testhashs32Fnv1 = monster.Testhashs32Fnv1()
self.testhashu32Fnv1 = monster.Testhashu32Fnv1()
self.testhashs64Fnv1 = monster.Testhashs64Fnv1()
self.testhashu64Fnv1 = monster.Testhashu64Fnv1()
self.testhashs32Fnv1a = monster.Testhashs32Fnv1a()
self.testhashu32Fnv1a = monster.Testhashu32Fnv1a()
self.testhashs64Fnv1a = monster.Testhashs64Fnv1a()
self.testhashu64Fnv1a = monster.Testhashu64Fnv1a()
if not monster.TestarrayofboolsIsNone():
if np is None:
self.testarrayofbools = []
for i in range(monster.TestarrayofboolsLength()):
self.testarrayofbools.append(monster.Testarrayofbools(i))
else:
self.testarrayofbools = monster.TestarrayofboolsAsNumpy()
self.testf = monster.Testf()
self.testf2 = monster.Testf2()
self.testf3 = monster.Testf3()
if not monster.Testarrayofstring2IsNone():
self.testarrayofstring2 = []
for i in range(monster.Testarrayofstring2Length()):
self.testarrayofstring2.append(monster.Testarrayofstring2(i))
if not monster.TestarrayofsortedstructIsNone():
self.testarrayofsortedstruct = []
for i in range(monster.TestarrayofsortedstructLength()):
if monster.Testarrayofsortedstruct(i) is None:
self.testarrayofsortedstruct.append(None)
else:
ability_ = MyGame.Example.Ability.AbilityT.InitFromObj(monster.Testarrayofsortedstruct(i))
self.testarrayofsortedstruct.append(ability_)
if not monster.FlexIsNone():
if np is None:
self.flex = []
for i in range(monster.FlexLength()):
self.flex.append(monster.Flex(i))
else:
self.flex = monster.FlexAsNumpy()
if not monster.Test5IsNone():
self.test5 = []
for i in range(monster.Test5Length()):
if monster.Test5(i) is None:
self.test5.append(None)
else:
test_ = MyGame.Example.Test.TestT.InitFromObj(monster.Test5(i))
self.test5.append(test_)
if not monster.VectorOfLongsIsNone():
if np is None:
self.vectorOfLongs = []
for i in range(monster.VectorOfLongsLength()):
self.vectorOfLongs.append(monster.VectorOfLongs(i))
else:
self.vectorOfLongs = monster.VectorOfLongsAsNumpy()
if not monster.VectorOfDoublesIsNone():
if np is None:
self.vectorOfDoubles = []
for i in range(monster.VectorOfDoublesLength()):
self.vectorOfDoubles.append(monster.VectorOfDoubles(i))
else:
self.vectorOfDoubles = monster.VectorOfDoublesAsNumpy()
if monster.ParentNamespaceTest() is not None:
self.parentNamespaceTest = MyGame.InParentNamespace.InParentNamespaceT.InitFromObj(monster.ParentNamespaceTest())
if not monster.VectorOfReferrablesIsNone():
self.vectorOfReferrables = []
for i in range(monster.VectorOfReferrablesLength()):
if monster.VectorOfReferrables(i) is None:
self.vectorOfReferrables.append(None)
else:
referrable_ = MyGame.Example.Referrable.ReferrableT.InitFromObj(monster.VectorOfReferrables(i))
self.vectorOfReferrables.append(referrable_)
self.singleWeakReference = monster.SingleWeakReference()
if not monster.VectorOfWeakReferencesIsNone():
if np is None:
self.vectorOfWeakReferences = []
for i in range(monster.VectorOfWeakReferencesLength()):
self.vectorOfWeakReferences.append(monster.VectorOfWeakReferences(i))
else:
self.vectorOfWeakReferences = monster.VectorOfWeakReferencesAsNumpy()
if not monster.VectorOfStrongReferrablesIsNone():
self.vectorOfStrongReferrables = []
for i in range(monster.VectorOfStrongReferrablesLength()):
if monster.VectorOfStrongReferrables(i) is None:
self.vectorOfStrongReferrables.append(None)
else:
referrable_ = MyGame.Example.Referrable.ReferrableT.InitFromObj(monster.VectorOfStrongReferrables(i))
self.vectorOfStrongReferrables.append(referrable_)
self.coOwningReference = monster.CoOwningReference()
if not monster.VectorOfCoOwningReferencesIsNone():
if np is None:
self.vectorOfCoOwningReferences = []
for i in range(monster.VectorOfCoOwningReferencesLength()):
self.vectorOfCoOwningReferences.append(monster.VectorOfCoOwningReferences(i))
else:
self.vectorOfCoOwningReferences = monster.VectorOfCoOwningReferencesAsNumpy()
self.nonOwningReference = monster.NonOwningReference()
if not monster.VectorOfNonOwningReferencesIsNone():
if np is None:
self.vectorOfNonOwningReferences = []
for i in range(monster.VectorOfNonOwningReferencesLength()):
self.vectorOfNonOwningReferences.append(monster.VectorOfNonOwningReferences(i))
else:
self.vectorOfNonOwningReferences = monster.VectorOfNonOwningReferencesAsNumpy()
self.anyUniqueType = monster.AnyUniqueType()
self.anyUnique = MyGame.Example.AnyUniqueAliases.AnyUniqueAliasesCreator(self.anyUniqueType, monster.AnyUnique())
self.anyAmbiguousType = monster.AnyAmbiguousType()
self.anyAmbiguous = MyGame.Example.AnyAmbiguousAliases.AnyAmbiguousAliasesCreator(self.anyAmbiguousType, monster.AnyAmbiguous())
if not monster.VectorOfEnumsIsNone():
if np is None:
self.vectorOfEnums = []
for i in range(monster.VectorOfEnumsLength()):
self.vectorOfEnums.append(monster.VectorOfEnums(i))
else:
self.vectorOfEnums = monster.VectorOfEnumsAsNumpy()
self.signedEnum = monster.SignedEnum()
if not monster.TestrequirednestedflatbufferIsNone():
if np is None:
self.testrequirednestedflatbuffer = []
for i in range(monster.TestrequirednestedflatbufferLength()):
self.testrequirednestedflatbuffer.append(monster.Testrequirednestedflatbuffer(i))
else:
self.testrequirednestedflatbuffer = monster.TestrequirednestedflatbufferAsNumpy()
if not monster.ScalarKeySortedTablesIsNone():
self.scalarKeySortedTables = []
for i in range(monster.ScalarKeySortedTablesLength()):
if monster.ScalarKeySortedTables(i) is None:
self.scalarKeySortedTables.append(None)
else:
stat_ = MyGame.Example.Stat.StatT.InitFromObj(monster.ScalarKeySortedTables(i))
self.scalarKeySortedTables.append(stat_)
# MonsterT
def Pack(self, builder):
if self.name is not None:
name = builder.CreateString(self.name)
if self.inventory is not None:
if np is not None and type(self.inventory) is np.ndarray:
inventory = builder.CreateNumpyVector(self.inventory)
else:
StartInventoryVector(builder, len(self.inventory))
for i in reversed(range(len(self.inventory))):
builder.PrependUint8(self.inventory[i])
inventory = builder.EndVector()
if self.test is not None:
test = self.test.Pack(builder)
if self.test4 is not None:
StartTest4Vector(builder, len(self.test4))
for i in reversed(range(len(self.test4))):
self.test4[i].Pack(builder)
test4 = builder.EndVector()
if self.testarrayofstring is not None:
testarrayofstringlist = []
for i in range(len(self.testarrayofstring)):
testarrayofstringlist.append(builder.CreateString(self.testarrayofstring[i]))
StartTestarrayofstringVector(builder, len(self.testarrayofstring))
for i in reversed(range(len(self.testarrayofstring))):
builder.PrependUOffsetTRelative(testarrayofstringlist[i])
testarrayofstring = builder.EndVector()
if self.testarrayoftables is not None:
testarrayoftableslist = []
for i in range(len(self.testarrayoftables)):
testarrayoftableslist.append(self.testarrayoftables[i].Pack(builder))
StartTestarrayoftablesVector(builder, len(self.testarrayoftables))
for i in reversed(range(len(self.testarrayoftables))):
builder.PrependUOffsetTRelative(testarrayoftableslist[i])
testarrayoftables = builder.EndVector()
if self.enemy is not None:
enemy = self.enemy.Pack(builder)
if self.testnestedflatbuffer is not None:
if np is not None and type(self.testnestedflatbuffer) is np.ndarray:
testnestedflatbuffer = builder.CreateNumpyVector(self.testnestedflatbuffer)
else:
StartTestnestedflatbufferVector(builder, len(self.testnestedflatbuffer))
for i in reversed(range(len(self.testnestedflatbuffer))):
builder.PrependUint8(self.testnestedflatbuffer[i])
testnestedflatbuffer = builder.EndVector()
if self.testempty is not None:
testempty = self.testempty.Pack(builder)
if self.testarrayofbools is not None:
if np is not None and type(self.testarrayofbools) is np.ndarray:
testarrayofbools = builder.CreateNumpyVector(self.testarrayofbools)
else:
StartTestarrayofboolsVector(builder, len(self.testarrayofbools))
for i in reversed(range(len(self.testarrayofbools))):
builder.PrependBool(self.testarrayofbools[i])
testarrayofbools = builder.EndVector()
if self.testarrayofstring2 is not None:
testarrayofstring2list = []
for i in range(len(self.testarrayofstring2)):
testarrayofstring2list.append(builder.CreateString(self.testarrayofstring2[i]))
StartTestarrayofstring2Vector(builder, len(self.testarrayofstring2))
for i in reversed(range(len(self.testarrayofstring2))):
builder.PrependUOffsetTRelative(testarrayofstring2list[i])
testarrayofstring2 = builder.EndVector()
if self.testarrayofsortedstruct is not None:
StartTestarrayofsortedstructVector(builder, len(self.testarrayofsortedstruct))
for i in reversed(range(len(self.testarrayofsortedstruct))):
self.testarrayofsortedstruct[i].Pack(builder)
testarrayofsortedstruct = builder.EndVector()
if self.flex is not None:
if np is not None and type(self.flex) is np.ndarray:
flex = builder.CreateNumpyVector(self.flex)
else:
StartFlexVector(builder, len(self.flex))
for i in reversed(range(len(self.flex))):
builder.PrependUint8(self.flex[i])
flex = builder.EndVector()
if self.test5 is not None:
StartTest5Vector(builder, len(self.test5))
for i in reversed(range(len(self.test5))):
self.test5[i].Pack(builder)
test5 = builder.EndVector()
if self.vectorOfLongs is not None:
if np is not None and type(self.vectorOfLongs) is np.ndarray:
vectorOfLongs = builder.CreateNumpyVector(self.vectorOfLongs)
else:
StartVectorOfLongsVector(builder, len(self.vectorOfLongs))
for i in reversed(range(len(self.vectorOfLongs))):
builder.PrependInt64(self.vectorOfLongs[i])
vectorOfLongs = builder.EndVector()
if self.vectorOfDoubles is not None:
if np is not None and type(self.vectorOfDoubles) is np.ndarray:
vectorOfDoubles = builder.CreateNumpyVector(self.vectorOfDoubles)
else:
StartVectorOfDoublesVector(builder, len(self.vectorOfDoubles))
for i in reversed(range(len(self.vectorOfDoubles))):
builder.PrependFloat64(self.vectorOfDoubles[i])
vectorOfDoubles = builder.EndVector()
if self.parentNamespaceTest is not None:
parentNamespaceTest = self.parentNamespaceTest.Pack(builder)
if self.vectorOfReferrables is not None:
vectorOfReferrableslist = []
for i in range(len(self.vectorOfReferrables)):
vectorOfReferrableslist.append(self.vectorOfReferrables[i].Pack(builder))
StartVectorOfReferrablesVector(builder, len(self.vectorOfReferrables))
for i in reversed(range(len(self.vectorOfReferrables))):
builder.PrependUOffsetTRelative(vectorOfReferrableslist[i])
vectorOfReferrables = builder.EndVector()
if self.vectorOfWeakReferences is not None:
if np is not None and type(self.vectorOfWeakReferences) is np.ndarray:
vectorOfWeakReferences = builder.CreateNumpyVector(self.vectorOfWeakReferences)
else:
StartVectorOfWeakReferencesVector(builder, len(self.vectorOfWeakReferences))
for i in reversed(range(len(self.vectorOfWeakReferences))):
builder.PrependUint64(self.vectorOfWeakReferences[i])
vectorOfWeakReferences = builder.EndVector()
if self.vectorOfStrongReferrables is not None:
vectorOfStrongReferrableslist = []
for i in range(len(self.vectorOfStrongReferrables)):
vectorOfStrongReferrableslist.append(self.vectorOfStrongReferrables[i].Pack(builder))
StartVectorOfStrongReferrablesVector(builder, len(self.vectorOfStrongReferrables))
for i in reversed(range(len(self.vectorOfStrongReferrables))):
builder.PrependUOffsetTRelative(vectorOfStrongReferrableslist[i])
vectorOfStrongReferrables = builder.EndVector()
if self.vectorOfCoOwningReferences is not None:
if np is not None and type(self.vectorOfCoOwningReferences) is np.ndarray:
vectorOfCoOwningReferences = builder.CreateNumpyVector(self.vectorOfCoOwningReferences)
else:
StartVectorOfCoOwningReferencesVector(builder, len(self.vectorOfCoOwningReferences))
for i in reversed(range(len(self.vectorOfCoOwningReferences))):
builder.PrependUint64(self.vectorOfCoOwningReferences[i])
vectorOfCoOwningReferences = builder.EndVector()
if self.vectorOfNonOwningReferences is not None:
if np is not None and type(self.vectorOfNonOwningReferences) is np.ndarray:
vectorOfNonOwningReferences = builder.CreateNumpyVector(self.vectorOfNonOwningReferences)
else:
StartVectorOfNonOwningReferencesVector(builder, len(self.vectorOfNonOwningReferences))
for i in reversed(range(len(self.vectorOfNonOwningReferences))):
builder.PrependUint64(self.vectorOfNonOwningReferences[i])
vectorOfNonOwningReferences = builder.EndVector()
if self.anyUnique is not None:
anyUnique = self.anyUnique.Pack(builder)
if self.anyAmbiguous is not None:
anyAmbiguous = self.anyAmbiguous.Pack(builder)
if self.vectorOfEnums is not None:
if np is not None and type(self.vectorOfEnums) is np.ndarray:
vectorOfEnums = builder.CreateNumpyVector(self.vectorOfEnums)
else:
StartVectorOfEnumsVector(builder, len(self.vectorOfEnums))
for i in reversed(range(len(self.vectorOfEnums))):
builder.PrependUint8(self.vectorOfEnums[i])
vectorOfEnums = builder.EndVector()
if self.testrequirednestedflatbuffer is not None:
if np is not None and type(self.testrequirednestedflatbuffer) is np.ndarray:
testrequirednestedflatbuffer = builder.CreateNumpyVector(self.testrequirednestedflatbuffer)
else:
StartTestrequirednestedflatbufferVector(builder, len(self.testrequirednestedflatbuffer))
for i in reversed(range(len(self.testrequirednestedflatbuffer))):
builder.PrependUint8(self.testrequirednestedflatbuffer[i])
testrequirednestedflatbuffer = builder.EndVector()
if self.scalarKeySortedTables is not None:
scalarKeySortedTableslist = []
for i in range(len(self.scalarKeySortedTables)):
scalarKeySortedTableslist.append(self.scalarKeySortedTables[i].Pack(builder))
StartScalarKeySortedTablesVector(builder, len(self.scalarKeySortedTables))
for i in reversed(range(len(self.scalarKeySortedTables))):
builder.PrependUOffsetTRelative(scalarKeySortedTableslist[i])
scalarKeySortedTables = builder.EndVector()
Start(builder)
if self.pos is not None:
pos = self.pos.Pack(builder)
AddPos(builder, pos)
AddMana(builder, self.mana)
AddHp(builder, self.hp)
if self.name is not None:
AddName(builder, name)
if self.inventory is not None:
AddInventory(builder, inventory)
AddColor(builder, self.color)
AddTestType(builder, self.testType)
if self.test is not None:
AddTest(builder, test)
if self.test4 is not None:
AddTest4(builder, test4)
if self.testarrayofstring is not None:
AddTestarrayofstring(builder, testarrayofstring)
if self.testarrayoftables is not None:
AddTestarrayoftables(builder, testarrayoftables)
if self.enemy is not None:
AddEnemy(builder, enemy)
if self.testnestedflatbuffer is not None:
AddTestnestedflatbuffer(builder, testnestedflatbuffer)
if self.testempty is not None:
AddTestempty(builder, testempty)
AddTestbool(builder, self.testbool)
AddTesthashs32Fnv1(builder, self.testhashs32Fnv1)
AddTesthashu32Fnv1(builder, self.testhashu32Fnv1)
AddTesthashs64Fnv1(builder, self.testhashs64Fnv1)
AddTesthashu64Fnv1(builder, self.testhashu64Fnv1)
AddTesthashs32Fnv1a(builder, self.testhashs32Fnv1a)
AddTesthashu32Fnv1a(builder, self.testhashu32Fnv1a)
AddTesthashs64Fnv1a(builder, self.testhashs64Fnv1a)
AddTesthashu64Fnv1a(builder, self.testhashu64Fnv1a)
if self.testarrayofbools is not None:
AddTestarrayofbools(builder, testarrayofbools)
AddTestf(builder, self.testf)
AddTestf2(builder, self.testf2)
AddTestf3(builder, self.testf3)
if self.testarrayofstring2 is not None:
AddTestarrayofstring2(builder, testarrayofstring2)
if self.testarrayofsortedstruct is not None:
AddTestarrayofsortedstruct(builder, testarrayofsortedstruct)
if self.flex is not None:
AddFlex(builder, flex)
if self.test5 is not None:
AddTest5(builder, test5)
if self.vectorOfLongs is not None:
AddVectorOfLongs(builder, vectorOfLongs)
if self.vectorOfDoubles is not None:
AddVectorOfDoubles(builder, vectorOfDoubles)
if self.parentNamespaceTest is not None:
AddParentNamespaceTest(builder, parentNamespaceTest)
if self.vectorOfReferrables is not None:
AddVectorOfReferrables(builder, vectorOfReferrables)
AddSingleWeakReference(builder, self.singleWeakReference)
if self.vectorOfWeakReferences is not None:
AddVectorOfWeakReferences(builder, vectorOfWeakReferences)
if self.vectorOfStrongReferrables is not None:
AddVectorOfStrongReferrables(builder, vectorOfStrongReferrables)
AddCoOwningReference(builder, self.coOwningReference)
if self.vectorOfCoOwningReferences is not None:
AddVectorOfCoOwningReferences(builder, vectorOfCoOwningReferences)
AddNonOwningReference(builder, self.nonOwningReference)
if self.vectorOfNonOwningReferences is not None:
AddVectorOfNonOwningReferences(builder, vectorOfNonOwningReferences)
AddAnyUniqueType(builder, self.anyUniqueType)
if self.anyUnique is not None:
AddAnyUnique(builder, anyUnique)
AddAnyAmbiguousType(builder, self.anyAmbiguousType)
if self.anyAmbiguous is not None:
AddAnyAmbiguous(builder, anyAmbiguous)
if self.vectorOfEnums is not None:
AddVectorOfEnums(builder, vectorOfEnums)
AddSignedEnum(builder, self.signedEnum)
if self.testrequirednestedflatbuffer is not None:
AddTestrequirednestedflatbuffer(builder, testrequirednestedflatbuffer)
if self.scalarKeySortedTables is not None:
AddScalarKeySortedTables(builder, scalarKeySortedTables)
monster = End(builder)
return monster
| apache-2.0 |
RobSpectre/Caesar-Cipher | setup.py | 1 | 1192 | from caesarcipher import __version__
scripts = ['bin/caesarcipher']
setup_args = {
'name': 'caesarcipher',
'version': __version__,
'url': 'https://github.com/RobSpectre/Caesar-Cipher',
'description': 'A Python package and command line script for encoding, '
'decoding and cracking Caesar ciphers.',
'long_description': open('README.rst').read(),
'author': 'Rob Spectre',
'author_email': 'rob@brooklynhacker.com',
'license': 'MIT',
'packages': ['caesarcipher', 'tests'],
'scripts': ['bin/caesarcipher'],
'include_package_data': True,
'classifiers': [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'Topic :: Security :: Cryptography',
]
}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**setup_args)
| mit |
hradec/cortex | test/IECore/LRUCacheTest.py | 3 | 6773 | ##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import IECore
class LRUCacheTest( unittest.TestCase ) :
def test( self ) :
self.numGetterCalls = 0
def getter( key ) :
self.numGetterCalls += 1
return (
# value
{
"same" : key,
"times2" : key * 2,
"times4" : key * 4,
},
# cost
1
)
c = IECore.LRUCache( getter, 10 )
self.assertEqual( c.getMaxCost(), 10 )
c.setMaxCost( 20 )
self.assertEqual( c.getMaxCost(), 20 )
c.setMaxCost( 10 )
self.assertEqual( c.getMaxCost(), 10 )
v = c.get( 10 )
self.assertEqual( v,
{
"same" : 10,
"times2" : 20,
"times4" : 40,
}
)
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
v2 = c.get( 10 )
self.failUnless( v2 is v )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
for k in range( 11, 10000 ) :
v = c.get( k )
self.assertEqual( v,
{
"same" : k,
"times2" : k * 2,
"times4" : k * 4,
}
)
self.failIf( c.currentCost() > 10 )
def testClearCausesReloads( self ) :
self.numGetterCalls = 0
self.multiplier = 2
def getter( key ) :
self.numGetterCalls += 1
return ( key * self.multiplier, 1 )
c = IECore.LRUCache( getter, 10 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
c.clear()
self.multiplier = 4
v = c.get( 10 )
self.assertEqual( v, 40 )
self.assertEqual( self.numGetterCalls, 2 )
def testThreadingAndLimitCost( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 10 )
def thrash() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target=thrash )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
def testThreadingAndClear( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 100000 )
def f1() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
def f2() :
for i in range( 0, 10000 ) :
c.clear()
t1 = threading.Thread( target=f1 )
t2 = threading.Thread( target=f1 )
t3 = threading.Thread( target=f2 )
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
c.clear()
self.assertEqual( c.currentCost(), 0 )
def testYieldGILInGetter( self ) :
def getter( key ) :
# this call simulates the gil getting
# yielded for some reason - in the real world
# perhaps an Op call or just the python interpreter
# deciding to switch threads.
time.sleep( 0.1 )
return ( key, 1 )
c = IECore.LRUCache( getter, 100000 )
def f() :
c.get( 0 )
t1 = threading.Thread( target=f )
t2 = threading.Thread( target=f )
t1.start()
t2.start()
t1.join()
t2.join()
def testRemovalCallback( self ) :
def getter( key ) :
return ( key * 2, 1 )
removed = []
def removalCallback( key, value ) :
removed.append( ( key, value ) )
c = IECore.LRUCache( getter, removalCallback, 5 )
self.assertEqual( c.get( 1 ), 2 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 2 ), 4 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 3 ), 6 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 4 ), 8 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 6 ), 12 )
self.assertEqual( removed, [ ( 1, 2 ) ] )
self.assertEqual( c.get( 7 ), 14 )
self.assertEqual( removed, [ ( 1, 2 ), ( 2, 4 ) ] )
c.clear()
self.assertEqual( len( removed ), 7 )
keys = [ x[0] for x in removed ]
for i in range( 1, 8 ) :
self.failUnless( i in keys )
def testSet( self ) :
def getter( key ) :
return ( None, 1 )
c = IECore.LRUCache( getter, 1000 )
c.set( 5, 10, 1 )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( c.currentCost(), 1 )
c.set( 5, 20, 100000 )
self.assertEqual( c.currentCost(), 0 )
self.assertEqual( c.get( 5 ), None )
self.assertEqual( c.currentCost(), 1 )
def testCPPThreading( self ) :
# arguments are :
# iterations, number of unique values, maximum cost, clear frequency
# cache exactly the right size
IECore.testLRUCacheThreading( 100000, 100, 100 )
# cache not quite big enough
IECore.testLRUCacheThreading( 100000, 100, 90 )
# cache thrashing like crazy
IECore.testLRUCacheThreading( 100000, 1000, 2 )
# clearing all the time while doing concurrent lookups
IECore.testLRUCacheThreading( 100000, 1000, 90, 20 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Menooker/gem5_pcm | configs/ruby/MESI_Three_Level.py | 6 | 8609 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
# Nilay Vaish
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L0Cache(RubyCache):
latency = 1
class L1Cache(RubyCache):
latency = 5
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
parser.add_option("--num-clusters", type="int", default=1,
help="number of clusters in a design in which there are shared\
caches private to clusters")
return
def create_system(options, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Three_Level':
fatal("This script requires the MESI_Three_Level protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l0_cntrl_nodes = []
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
assert (options.num_cpus % options.num_clusters == 0)
num_cpus_per_cluster = options.num_cpus / options.num_clusters
assert (options.num_l2caches % options.num_clusters == 0)
num_l2caches_per_cluster = options.num_l2caches / options.num_clusters
l2_bits = int(math.log(num_l2caches_per_cluster, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
l2_index_start = block_size_bits + l2_bits
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
for i in xrange(options.num_clusters):
for j in xrange(num_cpus_per_cluster):
#
# First create the Ruby objects associated with this cpu
#
l0i_cache = L0Cache(size = '4096B', assoc = 1, is_icache = True,
start_index_bit = block_size_bits, replacement_policy="LRU")
l0d_cache = L0Cache(size = '4096B', assoc = 1, is_icache = False,
start_index_bit = block_size_bits, replacement_policy="LRU")
l0_cntrl = L0Cache_Controller(version = i*num_cpus_per_cluster + j,
Icache = l0i_cache, Dcache = l0d_cache,
send_evictions = (options.cpu_type == "detailed"),
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i, icache = l0i_cache,
dcache = l0d_cache, ruby_system = ruby_system)
l0_cntrl.sequencer = cpu_seq
l1_cache = L1Cache(size = options.l1d_size, assoc = options.l1d_assoc,
start_index_bit = block_size_bits, is_icache = False)
l1_cntrl = L1Cache_Controller(version = i*num_cpus_per_cluster+j,
cache = l1_cache, l2_select_num_bits = l2_bits,
cluster_id = i, ruby_system = ruby_system)
exec("ruby_system.l0_cntrl%d = l0_cntrl" % (
i*num_cpus_per_cluster+j))
exec("ruby_system.l1_cntrl%d = l1_cntrl" % (
i*num_cpus_per_cluster+j))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
l0_cntrl.peer = l1_cntrl
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(
version = i * num_l2caches_per_cluster + j,
L2cache = l2_cache, cluster_id = i,
transitions_per_cycle=options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % (
i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = \
RubyDirectoryMemory(version = i,
size = dir_size,
use_map =
options.use_map),
memBuffer = mem_cntrl,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l0_cntrl_nodes + \
l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| bsd-3-clause |
2014cdbg14/2014cdbg14 | wsgi/static/Brython2.1.0-20140419-113919/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-2.0 |
norayr/unisubs | apps/teams/migrations/0007_auto__add_field_team_video__add_field_team_video_description__add_fiel.py | 5 | 12312 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.video'
db.add_column('teams_team', 'video', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='intro_for_teams', null=True, to=orm['videos.Video']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.video'
db.delete_column('teams_team', 'video_id')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.invite': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Invite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.CustomUser']", 'through': "orm['teams.Invite']", 'symmetrical': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.teamvideolanguage': {
'Meta': {'object_name': 'TeamVideoLanguage'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'languages'", 'to': "orm['teams.TeamVideo']"})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'bliptv_fileid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bliptv_flv_url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dailymotion_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'video_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '2048', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vimeo_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'youtube_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['teams']
| agpl-3.0 |
gh0std4ncer/thug | src/DOM/Plugins.py | 7 | 1540 | #!/usr/bin/env python
#
# Plugins.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .Plugin import Plugin
class Plugins(list):
def __init__(self):
list.__init__(self)
@property
def length(self):
return len(self)
def __getattr__(self, key):
return self.namedItem(key)
def __getitem__(self, key):
try:
key = int(key)
return self.item(key)
except:
return self.namedItem(key)
def item(self, index):
if index >= self.length:
#return Plugin()
return None
return list.__getitem__(self, index)
def namedItem(self, name):
index = 0
while index < self.length:
p = self.item(index)
if p['name'] == name:
return p
index += 1
#return Plugin(
return None
def refresh(self, reloadDocuments = False):
pass
| gpl-2.0 |
jusdng/odoo | addons/account/project/__init__.py | 427 | 1100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
oblique-labs/pyVM | rpython/rlib/rvmprof/__init__.py | 1 | 1786 | from rpython.rlib.objectmodel import specialize
from rpython.rlib.rvmprof.rvmprof import _get_vmprof, VMProfError
from rpython.rlib.rvmprof.rvmprof import vmprof_execute_code, MAX_FUNC_NAME
from rpython.rlib.rvmprof.rvmprof import _was_registered
from rpython.rlib.rvmprof.cintf import VMProfPlatformUnsupported
from rpython.rtyper.lltypesystem import rffi
#
# See README.txt.
#
#vmprof_execute_code(): implemented directly in rvmprof.py
def register_code_object_class(CodeClass, full_name_func):
_get_vmprof().register_code_object_class(CodeClass, full_name_func)
@specialize.argtype(0)
def register_code(code, name):
_get_vmprof().register_code(code, name)
@specialize.call_location()
def get_unique_id(code):
"""Return the internal unique ID of a code object. Can only be
called after register_code(). Call this in the jitdriver's
method 'get_unique_id(*greenkey)'. This always returns 0 if we
didn't call register_code_object_class() on the class.
"""
assert code is not None
if _was_registered(code.__class__):
# '0' can occur here too, if the code object was prebuilt,
# or if register_code() was not called for another reason.
return code._vmprof_unique_id
return 0
def enable(fileno, interval, memory=0, native=0):
_get_vmprof().enable(fileno, interval, memory, native)
def disable():
_get_vmprof().disable()
def is_enabled():
vmp = _get_vmprof()
return vmp.is_enabled
def get_profile_path(space):
vmp = _get_vmprof()
if not vmp.is_enabled:
return None
with rffi.scoped_alloc_buffer(4096) as buf:
length = vmp.cintf.vmprof_get_profile_path(buf.raw, buf.size)
if length == -1:
return ""
return buf.str(length)
return None
| mit |
havard024/prego | crm/lib/python2.7/site-packages/tests/test_build_latex.py | 7 | 3216 | # -*- coding: utf-8 -*-
"""
test_build_latex
~~~~~~~~~~~~~~~~
Test the build process with LaTeX builder with the test root.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
from StringIO import StringIO
from subprocess import Popen, PIPE
from sphinx.writers.latex import LaTeXTranslator
from util import test_root, SkipTest, remove_unicode_literals, with_app
from test_build_html import ENV_WARNINGS
def teardown_module():
(test_root / '_build').rmtree(True)
latex_warnfile = StringIO()
LATEX_WARNINGS = ENV_WARNINGS + """\
None:None: WARNING: citation not found: missing
None:None: WARNING: no matching candidate for image URI u'foo.\\*'
WARNING: invalid pair index entry u''
WARNING: invalid pair index entry u'keyword; '
"""
if sys.version_info >= (3, 0):
LATEX_WARNINGS = remove_unicode_literals(LATEX_WARNINGS)
@with_app(buildername='latex', warning=latex_warnfile, cleanenv=True)
def test_latex(app):
LaTeXTranslator.ignore_missing_images = True
app.builder.build_all()
latex_warnings = latex_warnfile.getvalue().replace(os.sep, '/')
latex_warnings_exp = LATEX_WARNINGS % {
'root': re.escape(app.srcdir.replace(os.sep, '/'))}
assert re.match(latex_warnings_exp + '$', latex_warnings), \
'Warnings don\'t match:\n' + \
'--- Expected (regex):\n' + latex_warnings_exp + \
'--- Got:\n' + latex_warnings
# file from latex_additional_files
assert (app.outdir / 'svgimg.svg').isfile()
# only run latex if all needed packages are there
def kpsetest(filename):
try:
p = Popen(['kpsewhich', filename], stdout=PIPE)
except OSError:
# no kpsewhich... either no tex distribution is installed or it is
# a "strange" one -- don't bother running latex
return None
else:
p.communicate()
if p.returncode != 0:
# not found
return False
# found
return True
if kpsetest('article.sty') is None:
raise SkipTest('not running latex, it doesn\'t seem to be installed')
for filename in ['fancyhdr.sty', 'fancybox.sty', 'titlesec.sty',
'amsmath.sty', 'framed.sty', 'color.sty', 'fancyvrb.sty',
'threeparttable.sty']:
if not kpsetest(filename):
raise SkipTest('not running latex, the %s package doesn\'t '
'seem to be installed' % filename)
# now, try to run latex over it
cwd = os.getcwd()
os.chdir(app.outdir)
try:
try:
p = Popen(['pdflatex', '--interaction=nonstopmode',
'SphinxTests.tex'], stdout=PIPE, stderr=PIPE)
except OSError:
pass # most likely pdflatex was not found
else:
stdout, stderr = p.communicate()
if p.returncode != 0:
print stdout
print stderr
del app.cleanup_trees[:]
assert False, 'latex exited with return code %s' % p.returncode
finally:
os.chdir(cwd)
| mit |
lqhuang/SAXS-tools | dashboard/base.py | 1 | 1079 | from __future__ import print_function, division
import dash
import dash_core_components as dcc
import dash_html_components as html
from webapp.app import flask_app, DASH_URL_BASE
dash_app = dash.Dash(
__name__, server=flask_app, url_base_pathname=DASH_URL_BASE)
# flask_app = dash_app.server
dash_app.config.update({
# Since we're adding callbacks to elements that don't exist in the
# app.layout, Dash will raise an exception to warn us that we might
# be doing something wrong. In this case, we're adding the elements
# through a callback, so we can ignore the exception.
'suppress_callback_exceptions': True,
})
dash_app.css.config.serve_locally = True
dash_app.scripts.config.serve_locally = True
dash_app.layout = html.Div(children=[
# FIXME: fix this hard coding path (/static)
html.Link(rel='stylesheet', href='/static/css/bWLwgP.css'),
html.Link(rel='stylesheet', href='/static/css/dash.css'),
dcc.Location(id='url', refresh=False),
html.Div(id='page-info', style={'display': 'none'}),
html.Div(id='page-content'),
])
| gpl-3.0 |
SmallLars/cryptominisat | tests/simp-checks/checks.py | 1 | 3483 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Mate Soos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import os
import glob
import sys
import optparse
import subprocess
import time
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + "\n"
else:
return ""
usage = "usage: %prog [options] cryptominisat4-binary testfile(s)"
desc = """Test solver against some problems
"""
parser = optparse.OptionParser(usage=usage, description=desc,
formatter=PlainHelpFormatter())
parser.add_option("--verbose", "-v", action="store_true", default=False,
dest="verbose", help="Print more output")
(options, args) = parser.parse_args()
if len(args) < 1:
print("ERROR: You must call this script with at least one argument, the cryptominisat4 binary")
exit(-1)
if len(args) < 2:
print("ERROR: You must call this script with at least one file to check")
exit(-1)
cms4_exe = args[0]
if not os.path.isfile(cms4_exe):
print("CryptoMiniSat executable you gave, '%s' is not a file. Exiting" % cms4_exe)
exit(-1)
if not os.access(cms4_exe, os.X_OK):
print("CryptoMiniSat executable you gave, '%s' is not executable. Exiting." % cms4_exe)
exit(-1)
def test_velim_one_file(fname):
simp_fname = "simp.out"
try:
os.unlink(simp_fname)
except:
pass
toexec = "%s --zero-exit-status -p1 %s %s" % (cms4_exe, fname, simp_fname)
print("Executing: %s" % toexec)
start = time.time()
cms_out_fname = "cms-%s.out" % os.path.split(fname)[1]
with open(cms_out_fname, "w") as f:
subprocess.check_call(toexec.split(), stdout=f)
t_cms = time.time()-start
start = time.time()
with open("minisat_elim_data.out", "w") as f:
subprocess.check_call([minisat_exe, simp_fname], stdout=f)
t_msat = time.time()-start
var_elimed = None
with open("minisat_elim_data.out", "r") as f:
for line in f:
line = line.strip()
if "num-vars-eliminated" in line:
var_elimed = int(line.split()[1])
assert var_elimed is not None, "Couldn't find var-elimed line"
if var_elimed > 30:
print("FAILED file %s" % fname)
exitnum = 1
else:
print("PASSED file %s" % fname)
exitnum = 0
print("-> T-cms: %.2f T-msat: %.2f msat-bve: %d\n" % (t_cms, t_msat, var_elimed))
return exitnum
minisat_exe = os.getcwd() + "/minisat/build/release/bin/minisat"
exitnum = 0
for fname in args[1:]:
exitnum |= test_velim_one_file(fname)
if exitnum == 0:
print("ALL PASSED")
subprocess.check_call("rm *.out", shell=True)
else:
print("SOME CHECKS FAILED")
exit(exitnum)
| lgpl-3.0 |
yangjae/grpc | src/python/src/grpc/framework/face/testing/blocking_invocation_inline_service_test_case.py | 17 | 9079 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test to verify an implementation of the Face layer of RPC Framework."""
# unittest is referenced from specification in this module.
import abc
import unittest # pylint: disable=unused-import
from grpc.framework.face import exceptions
from grpc.framework.face.testing import control
from grpc.framework.face.testing import coverage
from grpc.framework.face.testing import digest
from grpc.framework.face.testing import stock_service
from grpc.framework.face.testing import test_case
_TIMEOUT = 3
_LONG_TIMEOUT = 45
class BlockingInvocationInlineServiceTestCase(
test_case.FaceTestCase, coverage.BlockingCoverage):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must also extend unittest.TestCase.
"""
__metaclass__ = abc.ABCMeta
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self.control = control.PauseFailControl()
self.digest = digest.digest(
stock_service.STOCK_TEST_SERVICE, self.control, None)
self.stub, self.memo = self.set_up_implementation(
self.digest.name, self.digest.methods,
self.digest.inline_method_implementations, None)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self.tear_down_implementation(self.memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response = self.stub.blocking_value_in_value_out(
name, request, _LONG_TIMEOUT)
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self.stub.inline_value_in_stream_out(
name, request, _LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response = self.stub.blocking_stream_in_value_out(
name, iter(requests), _LONG_TIMEOUT)
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response_iterator = self.stub.inline_stream_in_stream_out(
name, iter(requests), _LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response = self.stub.blocking_value_in_value_out(
name, first_request, _TIMEOUT)
test_messages.verify(first_request, first_response, self)
second_response = self.stub.blocking_value_in_value_out(
name, second_request, _TIMEOUT)
test_messages.verify(second_request, second_response, self)
def testExpiredUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self.control.pause(), self.assertRaises(
exceptions.ExpirationError):
multi_callable = self.stub.unary_unary_multi_callable(name)
multi_callable(request, _TIMEOUT)
def testExpiredUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self.control.pause(), self.assertRaises(
exceptions.ExpirationError):
response_iterator = self.stub.inline_value_in_stream_out(
name, request, _TIMEOUT)
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self.control.pause(), self.assertRaises(
exceptions.ExpirationError):
multi_callable = self.stub.stream_unary_multi_callable(name)
multi_callable(iter(requests), _TIMEOUT)
def testExpiredStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self.control.pause(), self.assertRaises(
exceptions.ExpirationError):
response_iterator = self.stub.inline_stream_in_stream_out(
name, iter(requests), _TIMEOUT)
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self.control.fail(), self.assertRaises(exceptions.ServicerError):
self.stub.blocking_value_in_value_out(name, request, _TIMEOUT)
def testFailedUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self.control.fail(), self.assertRaises(exceptions.ServicerError):
response_iterator = self.stub.inline_value_in_stream_out(
name, request, _TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self.control.fail(), self.assertRaises(exceptions.ServicerError):
self.stub.blocking_stream_in_value_out(name, iter(requests), _TIMEOUT)
def testFailedStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self.control.fail(), self.assertRaises(exceptions.ServicerError):
response_iterator = self.stub.inline_stream_in_stream_out(
name, iter(requests), _TIMEOUT)
list(response_iterator)
| bsd-3-clause |
STANAPO/slack_bot | slack_bot/plugins/orz.py | 5 | 1618 | # -*-coding:utf-8-*-
"""
Copyright (c) 2012 wong2 <wonderfuly@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 来膜拜
import random
description = """
快来膜拜我!, 触发条件: "膜拜 | orz [私聊]"。比如:
* 膜拜
* orz
"""
def test(data):
message = data['message']
for word in ['膜拜', 'orz']:
if word in message:
return True
return False
def handle(data):
mobai_icon = ':mb:'
return mobai_icon * random.randrange(1, 10)
if __name__ == '__main__':
print test({'message': 'orz'})
print test({'message': 'rz'})
print handle({'message': '来膜拜'})
| mit |
zhuoyang/kernel | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
bswartz/manila | manila/share/drivers/dell_emc/driver.py | 1 | 6999 | # Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
EMC specific NAS storage driver. This driver is a pluggable driver
that allows specific EMC NAS devices to be plugged-in as the underlying
backend. Use the Manila configuration variable "share_backend_name"
to specify, which backend plugins to use.
"""
from oslo_config import cfg
from manila.share import driver
from manila.share.drivers.dell_emc import plugin_manager as manager
EMC_NAS_OPTS = [
cfg.StrOpt('emc_nas_login',
help='User name for the EMC server.'),
cfg.StrOpt('emc_nas_password',
help='Password for the EMC server.'),
cfg.HostAddressOpt('emc_nas_server',
help='EMC server hostname or IP address.'),
cfg.PortOpt('emc_nas_server_port',
default=8080,
help='Port number for the EMC server.'),
cfg.BoolOpt('emc_nas_server_secure',
default=True,
help='Use secure connection to server.'),
cfg.StrOpt('emc_share_backend',
ignore_case=True,
choices=['isilon', 'vnx', 'unity', 'vmax'],
help='Share backend.'),
cfg.StrOpt('emc_nas_root_dir',
help='The root directory where shares will be located.'),
cfg.BoolOpt('emc_ssl_cert_verify',
default=True,
help='If set to False the https client will not validate the '
'SSL certificate of the backend endpoint.'),
cfg.StrOpt('emc_ssl_cert_path',
help='Can be used to specify a non default path to a '
'CA_BUNDLE file or directory with certificates of trusted '
'CAs, which will be used to validate the backend.')
]
CONF = cfg.CONF
CONF.register_opts(EMC_NAS_OPTS)
class EMCShareDriver(driver.ShareDriver):
"""EMC specific NAS driver. Allows for NFS and CIFS NAS storage usage."""
def __init__(self, *args, **kwargs):
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(EMC_NAS_OPTS)
self.backend_name = self.configuration.safe_get(
'emc_share_backend')
else:
self.backend_name = CONF.emc_share_backend
self.backend_name = self.backend_name or 'EMC_NAS_Storage'
self.plugin_manager = manager.EMCPluginManager(
namespace='manila.share.drivers.dell_emc.plugins')
self.plugin = self.plugin_manager.load_plugin(
self.backend_name,
configuration=self.configuration)
super(EMCShareDriver, self).__init__(
self.plugin.driver_handles_share_servers, *args, **kwargs)
if hasattr(self.plugin, 'ipv6_implemented'):
self.ipv6_implemented = self.plugin.ipv6_implemented
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
location = self.plugin.create_share(context, share, share_server)
return location
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
location = self.plugin.create_share_from_snapshot(
context, share, snapshot, share_server)
return location
def extend_share(self, share, new_size, share_server=None):
"""Is called to extend share."""
self.plugin.extend_share(share, new_size, share_server)
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
self.plugin.create_snapshot(context, snapshot, share_server)
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
self.plugin.delete_share(context, share, share_server)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
self.plugin.delete_snapshot(context, snapshot, share_server)
def ensure_share(self, context, share, share_server=None):
"""Invoked to sure that share is exported."""
self.plugin.ensure_share(context, share, share_server)
def allow_access(self, context, share, access, share_server=None):
"""Allow access to the share."""
self.plugin.allow_access(context, share, access, share_server)
def deny_access(self, context, share, access, share_server=None):
"""Deny access to the share."""
self.plugin.deny_access(context, share, access, share_server)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access to the share."""
self.plugin.update_access(context, share, access_rules, add_rules,
delete_rules, share_server)
def check_for_setup_error(self):
"""Check for setup error."""
self.plugin.check_for_setup_error()
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
self.plugin.connect(self, context)
def _update_share_stats(self):
"""Retrieve stats info from share."""
backend_name = self.configuration.safe_get(
'share_backend_name') or "EMC_NAS_Storage"
data = dict(
share_backend_name=backend_name,
vendor_name='Dell EMC',
storage_protocol='NFS_CIFS',
snapshot_support=True,
create_share_from_snapshot_support=True)
self.plugin.update_share_stats(data)
super(EMCShareDriver, self)._update_share_stats(data)
def get_network_allocations_number(self):
"""Returns number of network allocations for creating VIFs."""
return self.plugin.get_network_allocations_number()
def _setup_server(self, network_info, metadata=None):
"""Set up and configures share server with given network parameters."""
return self.plugin.setup_server(network_info, metadata)
def _teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
return self.plugin.teardown_server(server_details, security_services)
def get_configured_ip_versions(self):
if self.ipv6_implemented:
return [4, 6]
else:
return [4]
| apache-2.0 |
skycucumber/xuemc | python/venv/lib/python2.7/site-packages/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| gpl-2.0 |
borysiasty/QGIS | tests/src/python/test_qgsrendercontext.py | 11 | 27676 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRenderContext.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '16/01/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsRenderContext,
QgsMapSettings,
QgsDistanceArea,
QgsRectangle, QgsPointXY,
QgsCoordinateReferenceSystem,
QgsMapUnitScale,
QgsUnitTypes,
QgsProject,
QgsRectangle,
QgsVectorSimplifyMethod,
QgsRenderedFeatureHandlerInterface,
QgsDateTimeRange,
QgsMapClippingRegion,
QgsGeometry,
QgsDoubleRange)
from qgis.PyQt.QtCore import QSize, QDateTime
from qgis.PyQt.QtGui import QPainter, QImage
from qgis.testing import start_app, unittest
import math
# Convenience instances in case you may need them
# to find the srs.db
start_app()
class TestFeatureHandler(QgsRenderedFeatureHandlerInterface):
def handleRenderedFeature(self, feature, geometry, context):
pass
class TestQgsRenderContext(unittest.TestCase):
def testGettersSetters(self):
"""
Basic getter/setter tests
"""
c = QgsRenderContext()
c.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysText)
self.assertEqual(c.textRenderFormat(), QgsRenderContext.TextFormatAlwaysText)
c.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysOutlines)
self.assertEqual(c.textRenderFormat(), QgsRenderContext.TextFormatAlwaysOutlines)
c.setMapExtent(QgsRectangle(1, 2, 3, 4))
self.assertEqual(c.mapExtent(), QgsRectangle(1, 2, 3, 4))
self.assertTrue(c.zRange().isInfinite())
c.setZRange(QgsDoubleRange(1, 10))
self.assertEqual(c.zRange(), QgsDoubleRange(1, 10))
def testCopyConstructor(self):
"""
Test the copy constructor
"""
c1 = QgsRenderContext()
c1.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysText)
c1.setMapExtent(QgsRectangle(1, 2, 3, 4))
c1.setZRange(QgsDoubleRange(1, 10))
c2 = QgsRenderContext(c1)
self.assertEqual(c2.textRenderFormat(), QgsRenderContext.TextFormatAlwaysText)
self.assertEqual(c2.mapExtent(), QgsRectangle(1, 2, 3, 4))
self.assertEqual(c2.zRange(), QgsDoubleRange(1, 10))
c1.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysOutlines)
c2 = QgsRenderContext(c1)
self.assertEqual(c2.textRenderFormat(), QgsRenderContext.TextFormatAlwaysOutlines)
c1.setIsTemporal(True)
c1.setTemporalRange(QgsDateTimeRange(QDateTime(2020, 1, 1, 0, 0), QDateTime(2010, 12, 31, 23, 59)))
c2 = QgsRenderContext(c1)
self.assertEqual(c2.isTemporal(), True)
self.assertEqual(c2.temporalRange(),
QgsDateTimeRange(QDateTime(2020, 1, 1, 0, 0), QDateTime(2010, 12, 31, 23, 59)))
def testFromQPainter(self):
""" test QgsRenderContext.fromQPainter """
# no painter
c = QgsRenderContext.fromQPainter(None)
self.assertFalse(c.painter())
# assuming 88 dpi as fallback
self.assertAlmostEqual(c.scaleFactor(), 88 / 25.4, 3)
# no painter destination
p = QPainter()
c = QgsRenderContext.fromQPainter(p)
self.assertEqual(c.painter(), p)
self.assertEqual(c.testFlag(QgsRenderContext.Antialiasing), False)
self.assertEqual(c.testFlag(QgsRenderContext.LosslessImageRendering), False)
self.assertAlmostEqual(c.scaleFactor(), 88 / 25.4, 3)
im = QImage(1000, 600, QImage.Format_RGB32)
dots_per_m = 300 / 25.4 * 1000 # 300 dpi to dots per m
im.setDotsPerMeterX(dots_per_m)
im.setDotsPerMeterY(dots_per_m)
p = QPainter(im)
p.setRenderHint(QPainter.Antialiasing)
try:
p.setRenderHint(QPainter.LosslessImageRendering)
supports_lossless = True
except AttributeError:
supports_lossless = False
c = QgsRenderContext.fromQPainter(p)
self.assertEqual(c.painter(), p)
self.assertEqual(c.testFlag(QgsRenderContext.Antialiasing), True)
self.assertEqual(c.testFlag(QgsRenderContext.LosslessImageRendering), supports_lossless)
self.assertAlmostEqual(c.scaleFactor(), dots_per_m / 1000, 3) # scaleFactor should be pixels/mm
def testFromMapSettings(self):
"""
test QgsRenderContext.fromMapSettings()
"""
ms = QgsMapSettings()
ms.setOutputSize(QSize(1000, 1000))
ms.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
ms.setExtent(QgsRectangle(10000, 20000, 30000, 40000))
ms.setFlag(QgsMapSettings.Antialiasing, True)
ms.setFlag(QgsMapSettings.LosslessImageRendering, True)
ms.setFlag(QgsMapSettings.Render3DMap, True)
ms.setZRange(QgsDoubleRange(1, 10))
ms.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysText)
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.textRenderFormat(), QgsRenderContext.TextFormatAlwaysText)
self.assertTrue(rc.testFlag(QgsRenderContext.Antialiasing))
self.assertTrue(rc.testFlag(QgsRenderContext.LosslessImageRendering))
self.assertTrue(rc.testFlag(QgsRenderContext.Render3DMap))
self.assertEqual(ms.zRange(), QgsDoubleRange(1, 10))
ms.setTextRenderFormat(QgsRenderContext.TextFormatAlwaysOutlines)
ms.setZRange(QgsDoubleRange())
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.textRenderFormat(), QgsRenderContext.TextFormatAlwaysOutlines)
self.assertTrue(ms.zRange().isInfinite())
self.assertEqual(rc.mapExtent(), QgsRectangle(10000, 20000, 30000, 40000))
ms.setIsTemporal(True)
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.isTemporal(), True)
ms.setTemporalRange(QgsDateTimeRange(QDateTime(2020, 1, 1, 0, 0), QDateTime(2010, 12, 31, 23, 59)))
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.temporalRange(),
QgsDateTimeRange(QDateTime(2020, 1, 1, 0, 0), QDateTime(2010, 12, 31, 23, 59)))
def testVectorSimplification(self):
"""
Test vector simplification hints, ensure they are copied correctly from map settings
"""
rc = QgsRenderContext()
self.assertEqual(rc.vectorSimplifyMethod().simplifyHints(), QgsVectorSimplifyMethod.NoSimplification)
ms = QgsMapSettings()
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.vectorSimplifyMethod().simplifyHints(), QgsVectorSimplifyMethod.NoSimplification)
rc2 = QgsRenderContext(rc)
self.assertEqual(rc2.vectorSimplifyMethod().simplifyHints(), QgsVectorSimplifyMethod.NoSimplification)
method = QgsVectorSimplifyMethod()
method.setSimplifyHints(QgsVectorSimplifyMethod.GeometrySimplification)
ms.setSimplifyMethod(method)
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.vectorSimplifyMethod().simplifyHints(), QgsVectorSimplifyMethod.GeometrySimplification)
rc2 = QgsRenderContext(rc)
self.assertEqual(rc2.vectorSimplifyMethod().simplifyHints(), QgsVectorSimplifyMethod.GeometrySimplification)
def testRenderedFeatureHandlers(self):
rc = QgsRenderContext()
self.assertFalse(rc.renderedFeatureHandlers())
self.assertFalse(rc.hasRenderedFeatureHandlers())
ms = QgsMapSettings()
rc = QgsRenderContext.fromMapSettings(ms)
self.assertFalse(rc.renderedFeatureHandlers())
self.assertFalse(rc.hasRenderedFeatureHandlers())
handler = TestFeatureHandler()
handler2 = TestFeatureHandler()
ms.addRenderedFeatureHandler(handler)
ms.addRenderedFeatureHandler(handler2)
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(rc.renderedFeatureHandlers(), [handler, handler2])
self.assertTrue(rc.hasRenderedFeatureHandlers())
rc2 = QgsRenderContext(rc)
self.assertEqual(rc2.renderedFeatureHandlers(), [handler, handler2])
self.assertTrue(rc2.hasRenderedFeatureHandlers())
def testRenderMetersInMapUnits(self):
crs_wsg84 = QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:4326')
rt_extent = QgsRectangle(13.37768985634235, 52.51625705830762, 13.37771931686235, 52.51628651882762)
point_berlin_wsg84 = QgsPointXY(13.37770458660236, 52.51627178856762)
length_wsg84_mapunits = 0.00001473026350140572
meters_test = 2.40
da_wsg84 = QgsDistanceArea()
da_wsg84.setSourceCrs(crs_wsg84, QgsProject.instance().transformContext())
if (da_wsg84.sourceCrs().isGeographic()):
da_wsg84.setEllipsoid(da_wsg84.sourceCrs().ellipsoidAcronym())
length_meter_mapunits = da_wsg84.measureLineProjected(point_berlin_wsg84, 1.0, (math.pi / 2))
meters_test_mapunits = meters_test * length_wsg84_mapunits
meters_test_pixel = meters_test * length_wsg84_mapunits
ms = QgsMapSettings()
ms.setDestinationCrs(crs_wsg84)
ms.setExtent(rt_extent)
r = QgsRenderContext.fromMapSettings(ms)
r.setExtent(rt_extent)
self.assertEqual(r.extent().center().toString(7), point_berlin_wsg84.toString(7))
c = QgsMapUnitScale()
r.setDistanceArea(da_wsg84)
result_test_painterunits = r.convertToPainterUnits(meters_test, QgsUnitTypes.RenderMetersInMapUnits, c)
self.assertEqual(
QgsDistanceArea.formatDistance(result_test_painterunits, 7, QgsUnitTypes.DistanceUnknownUnit, True),
QgsDistanceArea.formatDistance(meters_test_mapunits, 7, QgsUnitTypes.DistanceUnknownUnit, True))
result_test_mapunits = r.convertToMapUnits(meters_test, QgsUnitTypes.RenderMetersInMapUnits, c)
self.assertEqual(QgsDistanceArea.formatDistance(result_test_mapunits, 7, QgsUnitTypes.DistanceDegrees, True),
QgsDistanceArea.formatDistance(meters_test_mapunits, 7, QgsUnitTypes.DistanceDegrees, True))
result_test_meters = r.convertFromMapUnits(meters_test_mapunits, QgsUnitTypes.RenderMetersInMapUnits)
self.assertEqual(QgsDistanceArea.formatDistance(result_test_meters, 1, QgsUnitTypes.DistanceMeters, True),
QgsDistanceArea.formatDistance(meters_test, 1, QgsUnitTypes.DistanceMeters, True))
# attempting to convert to meters in map units when no extent is available should fallback to a very
# approximate degrees -> meters conversion
r.setExtent(QgsRectangle())
self.assertAlmostEqual(r.convertToPainterUnits(5555, QgsUnitTypes.RenderMetersInMapUnits), 0.0499, 3)
def testConvertSingleUnit(self):
ms = QgsMapSettings()
ms.setExtent(QgsRectangle(0, 0, 100, 100))
ms.setOutputSize(QSize(100, 50))
ms.setOutputDpi(300)
r = QgsRenderContext.fromMapSettings(ms)
# renderer scale should be about 1:291937841
# start with no min/max scale
c = QgsMapUnitScale()
# self.assertEqual(r.scaleFactor(),666)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 0.5, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.8110236, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
# minimum scale greater than the renderer scale, so should be limited to minScale
c.minScale = 150000000.0
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 3.89250455, places=5)
# only conversion from mapunits should be affected
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.8110236, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
c.minScale = 0
# maximum scale less than the renderer scale, so should be limited to maxScale
c.maxScale = 350000000.0
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 0.5, places=5)
# only conversion from mapunits should be affected
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.8110236, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
def testConvertToPainterUnits(self):
ms = QgsMapSettings()
ms.setExtent(QgsRectangle(0, 0, 100, 100))
ms.setOutputSize(QSize(100, 50))
ms.setOutputDpi(300)
r = QgsRenderContext.fromMapSettings(ms)
# renderer scale should be about 1:291937841
# start with no min/max scale
c = QgsMapUnitScale()
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 1.0, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 23.622047, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 8.33333333125, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 600.0, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 2.0, places=5)
# minimum size greater than the calculated size, so size should be limited to minSizeMM
c.minSizeMM = 5
c.minSizeMMEnabled = True
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 59.0551181, places=5)
# only conversion from mapunits should be affected
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 23.622047, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 8.33333333125, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 600.0, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 2.0, places=5)
c.minSizeMMEnabled = False
# maximum size less than the calculated size, so size should be limited to maxSizeMM
c.maxSizeMM = 0.1
c.maxSizeMMEnabled = True
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 1.0, places=5)
# only conversion from mapunits should be affected
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 23.622047, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 8.33333333125, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 600.0, places=5)
size = r.convertToPainterUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 2.0, places=5)
def testConvertToMapUnits(self):
ms = QgsMapSettings()
ms.setExtent(QgsRectangle(0, 0, 100, 100))
ms.setOutputSize(QSize(100, 50))
ms.setOutputDpi(300)
r = QgsRenderContext.fromMapSettings(ms)
# renderer scale should be about 1:291937841
# start with no min/max scale
c = QgsMapUnitScale()
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertEqual(size, 2.0)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 47.244094, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 47.2440833, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 3401.574, places=5)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 4.0, places=5)
# minimum size greater than the calculated size, so size should be limited to minSizeMM
c.minSizeMM = 5
c.minSizeMMEnabled = True
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 118.1102362, places=5)
# only conversion from mapunits should be affected
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 47.244094, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 47.2440833, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 3401.574, places=5)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 4.0, places=5)
c.minSizeMMEnabled = False
# maximum size less than the calculated size, so size should be limited to maxSizeMM
c.maxSizeMM = 0.05
c.maxSizeMMEnabled = True
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 1.1811023622047245, places=5)
# only conversion from mapunits should be affected
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 47.244094, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 47.2440833, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 3401.574, places=5)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 4.0, places=5)
c.maxSizeMMEnabled = False
# test with minimum scale set
c.minScale = 150000000.0
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 15.57001821, places=5)
# only conversion from mapunits should be affected
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 47.244094, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 47.2440833, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 3401.574, places=5)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 4.0, places=5)
c.minScale = 0
# test with maximum scale set
c.maxScale = 1550000000.0
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(size, 1.50677595625, places=5)
# only conversion from mapunits should be affected
size = r.convertToMapUnits(2, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(size, 47.244094, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(size, 47.2440833, places=5)
size = r.convertToMapUnits(5.66929, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(size, 3401.574, places=5)
size = r.convertToMapUnits(2, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(size, 4.0, places=5)
c.maxScale = 0
def testPixelSizeScaleFactor(self):
ms = QgsMapSettings()
ms.setExtent(QgsRectangle(0, 0, 100, 100))
ms.setOutputSize(QSize(100, 50))
ms.setOutputDpi(300)
r = QgsRenderContext.fromMapSettings(ms)
# renderer scale should be about 1:291937841
# start with no min/max scale
c = QgsMapUnitScale()
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 0.5, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.8110236, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
# minimum scale greater than the renderer scale, so should be limited to minScale
c.minScale = 150000000.0
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 3.8925045, places=5)
# only conversion from mapunits should be affected
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.811023, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
c.minScale = 0
# maximum scale less than the renderer scale, so should be limited to maxScale
c.maxScale = 350000000.0
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 0.5, places=5)
# only conversion from mapunits should be affected
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 11.8110236, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 4.166666665625, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 300.0, places=5)
sf = r.convertToPainterUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 1.0, places=5)
def testMapUnitScaleFactor(self):
# test QgsSymbolLayerUtils::mapUnitScaleFactor() using QgsMapUnitScale
ms = QgsMapSettings()
ms.setExtent(QgsRectangle(0, 0, 100, 100))
ms.setOutputSize(QSize(100, 50))
ms.setOutputDpi(300)
r = QgsRenderContext.fromMapSettings(ms)
# renderer scale should be about 1:291937841
c = QgsMapUnitScale()
sf = r.convertToMapUnits(1, QgsUnitTypes.RenderMapUnits, c)
self.assertAlmostEqual(sf, 1.0, places=5)
sf = r.convertToMapUnits(1, QgsUnitTypes.RenderMillimeters, c)
self.assertAlmostEqual(sf, 23.622047, places=5)
sf = r.convertToMapUnits(1, QgsUnitTypes.RenderPoints, c)
self.assertAlmostEqual(sf, 8.33333324723, places=5)
sf = r.convertToMapUnits(1, QgsUnitTypes.RenderInches, c)
self.assertAlmostEqual(sf, 600.0, places=5)
sf = r.convertToMapUnits(1, QgsUnitTypes.RenderPixels, c)
self.assertAlmostEqual(sf, 2.0, places=5)
def testCustomRenderingFlags(self):
rc = QgsRenderContext()
rc.setCustomRenderingFlag('myexport', True)
rc.setCustomRenderingFlag('omitgeometries', 'points')
self.assertTrue(rc.customRenderingFlags()['myexport'])
self.assertEqual(rc.customRenderingFlags()['omitgeometries'], 'points')
# test that custom flags are correctly copied from settings
settings = QgsMapSettings()
settings.setCustomRenderingFlag('myexport', True)
settings.setCustomRenderingFlag('omitgeometries', 'points')
rc = QgsRenderContext.fromMapSettings(settings)
self.assertTrue(rc.customRenderingFlags()['myexport'])
self.assertEqual(rc.customRenderingFlags()['omitgeometries'], 'points')
def testTemporalState(self):
rc = QgsRenderContext()
self.assertEqual(rc.isTemporal(), False)
self.assertIsNotNone(rc.temporalRange())
def testClippingRegion(self):
ms = QgsMapSettings()
rc = QgsRenderContext.fromMapSettings(ms)
self.assertFalse(rc.clippingRegions())
ms.addClippingRegion(QgsMapClippingRegion(QgsGeometry.fromWkt('Polygon(( 0 0, 1 0 , 1 1 , 0 1, 0 0 ))')))
ms.addClippingRegion(QgsMapClippingRegion(QgsGeometry.fromWkt('Polygon(( 10 0, 11 0 , 11 1 , 10 1, 10 0 ))')))
rc = QgsRenderContext.fromMapSettings(ms)
self.assertEqual(len(rc.clippingRegions()), 2)
self.assertEqual(rc.clippingRegions()[0].geometry().asWkt(), 'Polygon ((0 0, 1 0, 1 1, 0 1, 0 0))')
self.assertEqual(rc.clippingRegions()[1].geometry().asWkt(), 'Polygon ((10 0, 11 0, 11 1, 10 1, 10 0))')
def testFeatureClipGeometry(self):
rc = QgsRenderContext()
self.assertTrue(rc.featureClipGeometry().isNull())
rc.setFeatureClipGeometry(QgsGeometry.fromWkt('Polygon(( 0 0, 1 0 , 1 1 , 0 1, 0 0 ))'))
self.assertEqual(rc.featureClipGeometry().asWkt(), 'Polygon ((0 0, 1 0, 1 1, 0 1, 0 0))')
rc2 = QgsRenderContext(rc)
self.assertEqual(rc2.featureClipGeometry().asWkt(), 'Polygon ((0 0, 1 0, 1 1, 0 1, 0 0))')
def testSetPainterFlags(self):
rc = QgsRenderContext()
p = QPainter()
im = QImage(1000, 600, QImage.Format_RGB32)
p.begin(im)
rc.setPainterFlagsUsingContext(p)
self.assertFalse(p.testRenderHint(QPainter.Antialiasing))
try:
self.assertFalse(p.testRenderHint(QPainter.LosslessImageRendering))
except AttributeError:
pass
rc.setPainter(p)
rc.setFlag(QgsRenderContext.Antialiasing, True)
rc.setFlag(QgsRenderContext.LosslessImageRendering, True)
rc.setPainterFlagsUsingContext(p)
self.assertTrue(p.testRenderHint(QPainter.Antialiasing))
try:
self.assertTrue(p.testRenderHint(QPainter.LosslessImageRendering))
except AttributeError:
pass
p.end()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
kevin-zhaoshuai/zun | zun/common/context.py | 1 | 5431 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from eventlet.green import threading
from oslo_context import context
from zun.common import policy
class RequestContext(context.RequestContext):
"""Extends security contexts from the OpenStack common library."""
def __init__(self, auth_token=None, auth_url=None, domain_id=None,
domain_name=None, user_name=None, user_id=None,
user_domain_name=None, user_domain_id=None,
project_name=None, project_id=None, roles=None,
is_admin=None, read_only=False, show_deleted=False,
request_id=None, trust_id=None, auth_token_info=None,
all_tenants=False, password=None, **kwargs):
"""Stores several additional request parameters:
:param domain_id: The ID of the domain.
:param domain_name: The name of the domain.
:param user_domain_id: The ID of the domain to
authenticate a user against.
:param user_domain_name: The name of the domain to
authenticate a user against.
"""
super(RequestContext, self).__init__(auth_token=auth_token,
user=user_name,
tenant=project_name,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id=request_id,
roles=roles)
self.user_name = user_name
self.user_id = user_id
self.project_name = project_name
self.project_id = project_id
self.domain_id = domain_id
self.domain_name = domain_name
self.user_domain_id = user_domain_id
self.user_domain_name = user_domain_name
self.auth_url = auth_url
self.auth_token_info = auth_token_info
self.trust_id = trust_id
self.all_tenants = all_tenants
self.password = password
if is_admin is None:
self.is_admin = policy.check_is_admin(self)
else:
self.is_admin = is_admin
def to_dict(self):
value = super(RequestContext, self).to_dict()
value.update({'auth_token': self.auth_token,
'auth_url': self.auth_url,
'domain_id': self.domain_id,
'domain_name': self.domain_name,
'user_domain_id': self.user_domain_id,
'user_domain_name': self.user_domain_name,
'user_name': self.user_name,
'user_id': self.user_id,
'project_name': self.project_name,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_only': self.read_only,
'roles': self.roles,
'show_deleted': self.show_deleted,
'request_id': self.request_id,
'trust_id': self.trust_id,
'auth_token_info': self.auth_token_info,
'password': self.password,
'all_tenants': self.all_tenants})
return value
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
# context.roles must be deepcopied to leave original roles
# without changes
context.roles = copy.deepcopy(self.roles)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
return context
def make_context(*args, **kwargs):
return RequestContext(*args, **kwargs)
def get_admin_context(show_deleted=False, all_tenants=False):
"""Create an administrator context.
:param show_deleted: if True, will show deleted items when query db
"""
context = RequestContext(user_id=None,
project=None,
is_admin=True,
show_deleted=show_deleted,
all_tenants=all_tenants)
return context
_CTX_STORE = threading.local()
_CTX_KEY = 'current_ctx'
def has_ctx():
return hasattr(_CTX_STORE, _CTX_KEY)
def ctx():
return getattr(_CTX_STORE, _CTX_KEY)
def set_ctx(new_ctx):
if not new_ctx and has_ctx():
delattr(_CTX_STORE, _CTX_KEY)
if hasattr(context._request_store, 'context'):
delattr(context._request_store, 'context')
if new_ctx:
setattr(_CTX_STORE, _CTX_KEY, new_ctx)
setattr(context._request_store, 'context', new_ctx)
| apache-2.0 |
modelblocks/modelblocks-release | resource-incrsem/scripts/semprocpredrs.py | 1 | 4026 | import sys, os, re, collections
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'resource-gcg', 'scripts'))
import tree
from storestate import Sign, StoreState, getArity #, getParentContexts, getRchildContexts
def arityGivenCat(cat):
arity = getArity(cat)
return arity+1 if cat[0]=='N' and not cat.startswith('N-b{N-aD}') else arity
def getOp(cat,siblingcat,parentcat):
if '-l' not in cat: return 'I'
if '-lM' in cat: return 'M'
if '-lA' in cat and '\\' in parentcat: return str(arityGivenCat(parentcat.partition('\\')[2]))
if '-lA' in cat and '\\' not in parentcat: return str(arityGivenCat(siblingcat))
sys.stderr.write('WARN: illegal cat in '+cat+'\n')
return 'I'
def getBaseForm(cat, w):
baseCat = ""
m = re.match("(.*)\-xX[^\*]*\*([^\*]*)\*([^ ]*)", cat)
if m != None and m.group(1) != None and m.group(2) != None and m.group(3) != None:
if m.group(1)[0] in ["V","B","L","G"]:
baseCat = "B"+m.group(1)[1:]
baseW = re.sub(m.group(3)+'$',m.group(2),w) #w[:-len(m.group(3))]+m.group(2)
return (baseCat, baseW)
else:
return (cat, w)
def calccontext ( tr, Cx, t=0, s=0, d=0 ):
#### at preterminal node...
if len(tr.ch)==1 and len(tr.ch[0].ch)==0:
## increment time step at word encounter...
t += 1
## set fork value for operation after word at t...
Cx[t,'f']=1-s
## calc Cx[t,d,s]...
if tr.c == ':': tr.c = 'Pk'
if tr.ch[0].c[0].isdigit(): tr.ch[0].c = '!num!'
if ',' in tr.ch[0].c: tr.ch[0].c = '!containscomma!'
category,predicate = getBaseForm ( tr.c, tr.ch[0].c.lower() if all(ord(c) < 128 for c in tr.ch[0].c) else '!loanword!' )
Cx[t,'p'] = Sign( [ re.sub(':','Pk',category) + ':' + predicate + ('_1' if category.startswith('N') and not category.startswith('N-b') else '_0') ], tr.c ) if tr.c[0]>='A' and tr.c[0]<='Z' else\
Sign( [ ], re.sub(':','Pk',category) )
print 'F', ','.join(Cx[t-1].calcForkPredictors()), ':', ('f1' if Cx[t,'f']==1 else 'f0') + '&' + (Cx[t,'p'].sk[0] if len(Cx[t,'p'].sk)>0 else 'bot')
print 'P', ' '.join(Cx[t-1].calcPretrmCatPredictors(Cx[t,'f'],Cx[t,'p'].sk[0] if len(Cx[t,'p'].sk)>0 else 'bot')), ':', Cx[t,'p'].l
print 'W', (Cx[t,'p'].sk[0] if len(Cx[t,'p'].sk)>0 else 'bot'), Cx[t,'p'].l, ':', tr.ch[0].c if all(ord(c) < 128 for c in tr.ch[0].c) else '!loanword!'
# ## if f==1, merge p contexts into b...
# if Cx[t,'f']==0:
# Cx[t-1][-1].b.sk += Cx[t,'p'].sk
# if '"' in Cx[t-1][-1].a.sk: Cx[t-1][-1].a.sk = Cx[t-1][-1].b.sk
#### at non-preterminal unary node...
elif len(tr.ch)==1:
t = calccontext ( tr.ch[0], Cx, t, s, d )
#### at binary nonterminal node...
elif len(tr.ch)==2:
## traverse left child...
t = calccontext ( tr.ch[0], Cx, t, 0, d if s==0 else d+1 )
f = Cx[t,'f']
j = s
pretrm = Cx[t,'p']
ancstr = Cx[t-1].getAncstr(f)
lchild = Cx[t-1].getLchild(f,pretrm)
opL = getOp( tr.ch[0].c, tr.ch[1].c, tr.c )
opR = getOp( tr.ch[1].c, tr.ch[0].c, tr.c )
print 'J', ','.join(Cx[t-1].calcJoinPredictors(f,pretrm)), ':', ('j1' if j==1 else 'j0') + '&' + opL + '&' + opR
print 'A', ' '.join(Cx[t-1].calcApexCatPredictors(f,j,opL,pretrm)), ':', re.sub('-l.','',tr.c)
print 'B', ' '.join(Cx[t-1].calcBrinkCatPredictors(f,j,opL,opR,pretrm,re.sub('-l.','',tr.c))), ':', re.sub('-l.','',tr.ch[1].c)
Cx[t] = StoreState( Cx[t-1], f, j, opL, opR, re.sub('-l.','',tr.c), re.sub(':','Pk',re.sub('-l.','',tr.ch[1].c)), pretrm )
# print str(Cx[t-1]) + ' ===(' + str(lchild.sk) + ')==> ' + str(Cx[t])
## traverse right child...
t = calccontext ( tr.ch[1], Cx, t, 1, d )
return t
for line in sys.stdin:
# print line
tr = tree.Tree()
tr.read ( line )
Cx = collections.defaultdict(list)
Cx[0] = StoreState()
t = calccontext ( tr, Cx )
print 'J', ','.join(Cx[t-1].calcJoinPredictors(Cx[t,'f'],Cx[t,'p'])), ': j1&S&I'
# for k in sorted(Cx):
# print str(k) + str(Cx[k])
| gpl-3.0 |
oihane/odoo | addons/stock/wizard/stock_move.py | 242 | 4332 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_scrap(osv.osv_memory):
_name = "stock.move.scrap"
_description = "Scrap Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
_defaults = {
'location_id': lambda *x: False
}
def default_get(self, cr, uid, fields, context=None):
""" Get default values
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for default value
@param context: A standard dictionary
@return: default values of fields
"""
if context is None:
context = {}
res = super(stock_move_scrap, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
location_obj = self.pool.get('stock.location')
scrap_location_id = location_obj.search(cr, uid, [('scrap_location','=',True)])
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'location_id' in fields:
if scrap_location_id:
res.update({'location_id': scrap_location_id[0]})
else:
res.update({'location_id': False})
return res
def move_scrap(self, cr, uid, ids, context=None):
""" To move scrapped products
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs if we want more than one
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
move_ids = context['active_ids']
for data in self.browse(cr, uid, ids):
move_obj.action_scrap(cr, uid, move_ids,
data.product_qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id,
context=context)
if context.get('active_id'):
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if move.picking_id:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.picking',
'type': 'ir.actions.act_window',
'res_id': move.picking_id.id,
'context': context
}
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rsvip/Django | django/utils/datetime_safe.py | 535 | 2836 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
import re
import time as ttime
from datetime import (
date as real_date, datetime as real_datetime, time as real_time,
)
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
@classmethod
def combine(cls, date, time):
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second,
time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
class time(real_time):
pass
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = ttime.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
| bsd-3-clause |
juharris/tensorflow | tensorflow/python/platform/flags.py | 7 | 4414 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
_global_parser = argparse.ArgumentParser()
class _FlagValues(object):
def __init__(self):
"""Global container and accessor for flags and their values."""
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self):
result, unparsed = _global_parser.parse_known_args()
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument("--" + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
| apache-2.0 |
dgarros/ansible | lib/ansible/modules/network/a10/a10_service_group.py | 32 | 13714 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>,
Eric Chou <ericc@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups.
description:
- Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
- When a server doesn't exist and is added to the service-group the server will be created.
extends_documentation_fragment: a10
options:
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
service_group:
description:
- The SLB (Server Load Balancing) service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
service_group_protocol:
description:
- The SLB service-group protocol of TCP or UDP.
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- The SLB service-group load balancing method, such as round-robin or weighted-rr.
required: false
default: round-robin
aliases: ['method']
choices:
- 'round-robin'
- 'weighted-rr'
- 'least-connection'
- 'weighted-least-connection'
- 'service-least-connection'
- 'service-weighted-least-connection'
- 'fastest-response'
- 'least-request'
- 'round-robin-strict'
- 'src-ip-only-hash'
- 'src-ip-hash'
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_service_group
returned: success
type: string
sample: "mynewservicegroup"
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we select the active-partition
slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
valhallasw/phabricator-tools | testbed/lockfile/looping_writer.py | 5 | 1514 | import os
import sys
# append our module dirs to sys.path, which is the list of paths to search
# for modules this is so we can import our libraries directly
# N.B. this magic is only really passable up-front in the entrypoint module
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
BASE_DIR = os.path.dirname(PARENT_DIR)
sys.path.append(os.path.join(BASE_DIR, "py", "phl"))
import phlsys_fs
import phlsys_signal
phlsys_signal.set_exit_on_sigterm()
while True:
filename = 'testfile'
try:
with phlsys_fs.lockfile_context('lockfile'):
handle = os.open(filename, os.O_CREAT | os.O_EXCL)
os.close(handle)
os.remove(filename)
except phlsys_fs.LockfileExistsError:
pass
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 |
lin-credible/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
mheap/ansible | lib/ansible/modules/storage/netapp/netapp_e_storagepool.py | 22 | 36782 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_storagepool
short_description: Manage disk groups and disk pools
version_added: '2.2'
description:
- Create or remove disk groups and disk pools for NetApp E-series storage arrays.
extends_documentation_fragment:
- netapp.eseries
options:
state:
required: true
description:
- Whether the specified storage pool should exist or not.
- Note that removing a storage pool currently requires the removal of all defined volumes first.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the storage pool to manage
criteria_drive_count:
description:
- The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
criteria_drive_type:
description:
- The type of disk (hdd or ssd) to use when searching for candidates to use.
choices: ['hdd','ssd']
criteria_size_unit:
description:
- The unit used to interpret size parameters
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
criteria_drive_min_size:
description:
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
criteria_min_usable_capacity:
description:
- The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
criteria_drive_interface_type:
description:
- The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
criteria_drive_require_fde:
description:
- Whether full disk encryption ability is required for drives to be added to the storage pool
raid_level:
required: true
choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
description:
- "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
erase_secured_drives:
required: false
type: bool
description:
- Whether to erase secured disks before adding to storage pool
secure_pool:
required: false
type: bool
description:
- Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
reserve_drive_count:
required: false
description:
- Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
remove_volumes:
required: false
default: False
description:
- Prior to removing a storage pool, delete all volumes in the pool.
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No disk groups
netapp_e_storagepool:
ssid: "{{ ssid }}"
name: "{{ item }}"
state: absent
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: Json facts for the pool that was created.
'''
import json
import logging
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
def select(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
def _identity(obj):
return obj
class GroupBy(object):
# python 2, 3 generic grouping.
def __init__(self, iterable, key=None):
self.keyfunc = key if key else _identity
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
class NetAppESeriesStoragePool(object):
def __init__(self):
self._sp_drives_cached = None
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
criteria_size_unit=dict(default='gb', type='str'),
criteria_drive_count=dict(type='int'),
criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
type='str'),
criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
criteria_drive_min_size=dict(type='int'),
criteria_drive_require_fde=dict(type='bool'),
criteria_min_usable_capacity=dict(type='int'),
raid_level=dict(
choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
erase_secured_drives=dict(type='bool'),
log_path=dict(type='str'),
remove_drives=dict(type='list'),
secure_pool=dict(type='bool', default=False),
reserve_drive_count=dict(type='int'),
remove_volumes=dict(type='bool', default=False)
))
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['raid_level'])
],
mutually_exclusive=[
],
# TODO: update validation for various selection criteria
supports_check_mode=True
)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.validate_certs = p['validate_certs']
self.criteria_drive_count = p['criteria_drive_count']
self.criteria_drive_type = p['criteria_drive_type']
self.criteria_size_unit = p['criteria_size_unit']
self.criteria_drive_min_size = p['criteria_drive_min_size']
self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
self.criteria_drive_interface_type = p['criteria_drive_interface_type']
self.criteria_drive_require_fde = p['criteria_drive_require_fde']
self.raid_level = p['raid_level']
self.erase_secured_drives = p['erase_secured_drives']
self.remove_drives = p['remove_drives']
self.secure_pool = p['secure_pool']
self.reserve_drive_count = p['reserve_drive_count']
self.remove_volumes = p['remove_volumes']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
self.post_headers = dict(Accept="application/json")
self.post_headers['Content-Type'] = 'application/json'
# Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
# Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
# can copy/paste to other modules more easily.
# Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
# the first set that matches the specified count and/or aggregate capacity.
# class DriveSelector(object):
def filter_drives(
self,
drives, # raw drives resp
interface_type=None, # sas, sata, fibre, etc
drive_type=None, # ssd/hdd
spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
min_drive_size=None,
max_drive_size=None,
fde_required=None,
size_unit='gb',
min_total_capacity=None,
min_drive_count=None,
exact_drive_count=None,
raid_level=None
):
if min_total_capacity is None and exact_drive_count is None:
raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
if min_total_capacity:
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
# filter clearly invalid/unavailable drives first
drives = select(self._is_valid_drive, drives)
if interface_type:
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
if drive_type:
drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
if spindle_speed is not None: # 0 is valid for ssds
drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
if min_drive_size:
min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
if max_drive_size:
max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
if fde_required:
drives = select(lambda d: d['fdeCapable'], drives)
# initial implementation doesn't have a preference for any of these values...
# just return the first set we find that matches the requested disk count and/or minimum total capacity
for (cur_capacity, drives_by_capacity) in GroupBy(drives, lambda d: int(d['rawCapacity'])):
for (cur_interface_type, drives_by_interface_type) in GroupBy(drives_by_capacity,
lambda d: d['phyDriveType']):
for (cur_drive_type, drives_by_drive_type) in GroupBy(drives_by_interface_type,
lambda d: d['driveMediaType']):
# listify so we can consume more than once
drives_by_drive_type = list(drives_by_drive_type)
candidate_set = list() # reset candidate list on each iteration of the innermost loop
if exact_drive_count:
if len(drives_by_drive_type) < exact_drive_count:
continue # we know this set is too small, move on
for drive in drives_by_drive_type:
candidate_set.append(drive)
if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return candidate_set
raise Exception("couldn't find an available set of disks to match specified criteria")
def _is_valid_drive(self, d):
is_valid = d['available'] \
and d['status'] == 'optimal' \
and not d['pfa'] \
and not d['removed'] \
and not d['uncertified'] \
and not d['invalidDriveData'] \
and not d['nonRedundantAccess']
return is_valid
def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
exact_drive_count=None, raid_level=None):
if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return False
# TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
len(candidate_set),
raid_level=raid_level) < min_capacity_bytes:
return False
return True
def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
if raid_level in [None, 'raid0']:
return disk_size_bytes * disk_count
if raid_level == 'raid1':
return (disk_size_bytes * disk_count) // 2
if raid_level in ['raid3', 'raid5']:
return (disk_size_bytes * disk_count) - disk_size_bytes
if raid_level in ['raid6', 'raidDiskPool']:
return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
raise Exception("unsupported raid_level: %s" % raid_level)
def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
if exact_drive_count and exact_drive_count != drive_count:
return False
if raid_level == 'raidDiskPool':
if drive_count < 11:
return False
if raid_level == 'raid1':
if drive_count % 2 != 0:
return False
if raid_level in ['raid3', 'raid5']:
if drive_count < 3:
return False
if raid_level == 'raid6':
if drive_count < 4:
return False
if min_drive_count and drive_count < min_drive_count:
return False
return True
def get_storage_pool(self, storage_pool_name):
# global ifilter
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception as err:
rc = err.args[0]
if rc == 404 and self.state == 'absent':
self.module.exit_json(
msg="Storage pool [%s] did not exist." % (self.name))
else:
self.module.exit_json(
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
(self.ssid, to_native(err), self.state, rc))
self.debug("searching for storage pool '%s'", storage_pool_name)
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
found = 'found'
else:
found = 'not found'
self.debug(found)
return pool_detail
def get_candidate_disks(self):
self.debug("getting candidate disks...")
# driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
# switch back to commented code below if it gets fixed
# drives_req = dict(
# driveCount = self.criteria_drive_count,
# sizeUnit = 'mb',
# raidLevel = self.raid_level
# )
#
# if self.criteria_drive_type:
# drives_req['driveType'] = self.criteria_drive_type
# if self.criteria_disk_min_aggregate_size_mb:
# drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
#
# # TODO: this arg appears to be ignored, uncomment if it isn't
# #if self.criteria_disk_min_size_gb:
# # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
# (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers,
# method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
#
# if rc == 204:
# self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
# disk_ids = [d['id'] for d in drives_resp]
try:
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except Exception as err:
self.module.exit_json(
msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, to_native(err)))
try:
candidate_set = self.filter_drives(drives_resp,
exact_drive_count=self.criteria_drive_count,
drive_type=self.criteria_drive_type,
min_drive_size=self.criteria_drive_min_size,
raid_level=self.raid_level,
size_unit=self.criteria_size_unit,
min_total_capacity=self.criteria_min_usable_capacity,
interface_type=self.criteria_drive_interface_type,
fde_required=self.criteria_drive_require_fde
)
except Exception as err:
self.module.fail_json(
msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
disk_ids = [d['id'] for d in candidate_set]
return disk_ids
def create_storage_pool(self):
self.debug("creating storage pool...")
sp_add_req = dict(
raidLevel=self.raid_level,
diskDriveIds=self.disk_ids,
name=self.name
)
if self.erase_secured_drives:
sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
to_native(err)))
self.pool_detail = self.get_storage_pool(self.name)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
to_native(err)))
@property
def needs_raid_level_migration(self):
current_raid_level = self.pool_detail['raidLevel']
needs_migration = self.raid_level != current_raid_level
if needs_migration: # sanity check some things so we can fail early/check-mode
if current_raid_level == 'raidDiskPool':
self.module.fail_json(msg="raid level cannot be changed for disk pools")
return needs_migration
def migrate_raid_level(self):
self.debug("migrating storage pool to raid level '%s'...", self.raid_level)
sp_raid_migrate_req = dict(
raidLevel=self.raid_level
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
self.name),
data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, to_native(err)))
@property
def sp_drives(self, exclude_hotspares=True):
if not self._sp_drives_cached:
self.debug("fetching drive list...")
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, to_native(err)))
sp_id = self.pool_detail['id']
if exclude_hotspares:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
else:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
return self._sp_drives_cached
@property
def reserved_drive_count_differs(self):
if int(self.pool_detail['volumeGroupData']['diskPoolData']['reconstructionReservedDriveCount']) != self.reserve_drive_count:
return True
return False
@property
def needs_expansion(self):
if self.criteria_drive_count > len(self.sp_drives):
return True
# TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
if self.criteria_min_usable_capacity and \
(self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
return True
return False
def get_expansion_candidate_drives(self):
# sanity checks; don't call this if we can't/don't need to expand
if not self.needs_expansion:
self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
self.debug("fetching expansion candidate drives...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, to_native(err)))
current_drive_count = len(self.sp_drives)
current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
if self.criteria_min_usable_capacity:
requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
else:
requested_capacity_bytes = current_capacity_bytes
if self.criteria_drive_count:
minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
else:
minimum_disks_to_add = 1
minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
# FUTURE: allow more control over expansion candidate selection?
# loop over candidate disk sets and add until we've met both criteria
added_drive_count = 0
added_capacity_bytes = 0
drives_to_add = set()
for s in resp:
# don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
candidate_drives = s['drives']
if len(drives_to_add.intersection(candidate_drives)) != 0:
# duplicate, skip
continue
drives_to_add.update(candidate_drives)
added_drive_count += len(candidate_drives)
added_capacity_bytes += int(s['usableCapacity'])
if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
break
if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
self.module.fail_json(
msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
minimum_disks_to_add, minimum_bytes_to_add))
return list(drives_to_add)
def expand_storage_pool(self):
drives_to_add = self.get_expansion_candidate_drives()
self.debug("adding %s drives to storage pool...", len(drives_to_add))
sp_expand_req = dict(
drives=drives_to_add
)
try:
request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
to_native(err)))
# TODO: check response
# TODO: support blocking wait?
def reduce_drives(self, drive_list):
if all(drive in drive_list for drive in self.sp_drives):
# all the drives passed in are present in the system
pass
else:
self.module.fail_json(
msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, to_native(err)))
def update_reserve_drive_count(self, qty):
data = dict(reservedDriveCount=qty)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except Exception as err:
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
to_native(err)))
def apply(self):
changed = False
pool_exists = False
self.pool_detail = self.get_storage_pool(self.name)
if self.pool_detail:
pool_exists = True
pool_id = self.pool_detail['id']
if self.state == 'absent':
self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# sanity checks first- we can't change these, so we'll bomb if they're specified
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
self.module.fail_json(
msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
self.criteria_drive_type))
# now the things we can change...
if self.needs_expansion:
self.debug("CHANGED: storage pool needs expansion")
changed = True
if self.needs_raid_level_migration:
self.debug(
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'",
self.pool_detail['raidLevel'], self.raid_level)
changed = True
# if self.reserved_drive_count_differs:
# changed = True
# TODO: validate other state details? (pool priority, alert threshold)
# per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
# presents a difficult parameter issue, as the disk count can increase due to expansion, so we
# can't just use disk count > criteria_drive_count.
else: # pool does not exist
if self.state == 'present':
self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
changed = True
# ensure we can get back a workable set of disks
# (doing this early so candidate selection runs under check mode)
self.disk_ids = self.get_candidate_disks()
else:
self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
if changed and not self.module.check_mode:
# apply changes
if self.state == 'present':
if not pool_exists:
self.create_storage_pool()
else: # pool exists but differs, modify...
if self.needs_expansion:
self.expand_storage_pool()
if self.remove_drives:
self.reduce_drives(self.remove_drives)
if self.needs_raid_level_migration:
self.migrate_raid_level()
# if self.reserved_drive_count_differs:
# self.update_reserve_drive_count(self.reserve_drive_count)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
self.pool_detail[
'id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except Exception as err:
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, to_native(err)))
if int(retc) == 422:
self.module.fail_json(
msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
elif self.state == 'absent':
# delete the storage pool
try:
remove_vol_opt = ''
if self.remove_volumes:
remove_vol_opt = '?delete-volumes=true'
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
remove_vol_opt),
method='DELETE',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
to_native(err)))
self.module.exit_json(changed=changed, **self.pool_detail)
def main():
sp = NetAppESeriesStoragePool()
try:
sp.apply()
except Exception as e:
sp.debug("Exception in apply(): \n%s", format_exc())
raise
if __name__ == '__main__':
main()
| gpl-3.0 |
bhearsum-bors/servo | src/etc/tidy.py | 1 | 2251 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
#!/usr/bin/env python
import fileinput, sys, os
from licenseck import *
err = 0
def report_error_name_no(name, no, s):
global err
print("%s:%d: %s" % (name, no, s))
err=1
def report_err(s):
report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def report_warn(s):
print("%s:%d: %s" % (fileinput.filename(),
fileinput.filelineno(),
s))
def do_license_check(name, contents):
if not check_license(name, contents):
report_error_name_no(name, 1, "incorrect license")
exceptions = [
"src/support", # Upstream
"src/platform", # Upstream
"src/compiler", # Upstream
"src/components/servo/dom/bindings/codegen", # Generated and upstream code combined with our own. Could use cleanup
]
def should_check(name):
if ".#" in name:
return False
if not (name.endswith(".rs")
or name.endswith(".rc")
or name.endswith(".cpp")
or name.endswith(".c")
or name.endswith(".h")
or name.endswith(".py")):
return False
for exception in exceptions:
if exception in name:
return False
return True
file_names = []
for root, dirs, files in os.walk(sys.argv[1]):
for myfile in files:
file_name = root + "/" + myfile
if should_check(file_name):
file_names.append(file_name)
current_name = ""
current_contents = ""
for line in fileinput.input(file_names):
if fileinput.isfirstline() and current_name != "":
do_license_check(current_name, current_contents)
if fileinput.isfirstline():
current_name = fileinput.filename()
current_contents = ""
current_contents += line
if current_name != "":
do_license_check(current_name, current_contents)
sys.exit(err)
| mpl-2.0 |
raschuetz/foundations-homework | 02/homework-2-schuetz_graded.py | 1 | 3937 | # Grade: 14.5 / 5
#Rebecca Schuetz
#May 25, 2016
#Homework 2
#1) Make a list of the following numbers: 22, 90, 0, -10, 3, 22, and 48
numbers = [22, 90, 0, -10, 3, 22, 48]
#1) Display the number of elements in the list
print(len(numbers))
#2) Display the 4th element of this list.
print(numbers[3])
#3) Display the sum of the 2nd and 4th element of the list.
print(numbers[1] + numbers[3])
#4) Display the 2nd-largest value in the list.
print(sorted(numbers)[-2])
#5) Display the last element of the original unsorted list
print(list(numbers)[-1])
#6) For each number, display a number: if your original number is less than 10,
#multiply it by thirty. If it's also even, add six.
#If it's greater than 50 subtract ten.
#If it's not negative ten, subtract one.
#(For example: 2 is less than 10, so 2 * 30 = 60, 2 is also even,
#so 60 + 6 = 66, 2 is not negative ten, so 66 - 1 = 65.)
#print('The answers I know are right')
#for number in numbers:
# if number < 10:
# number_less_than_10 = number * 30
# if number % 2 == 0:
# if number == -10:
# print(number_less_than_10 + 6)
# else:
# print(number_less_than_10 + 5)
# else:
# print(number_less_than_10 - 1)
# elif number > 50:
# print(number - 11)
# else:
# print(number - 1)
#print('A way of doing it without the awkward minus ones')
# TA-COMMENT: Beautifullll!
for number in numbers:
newnumber = number
if number < 10:
newnumber = number * 30
if number % 2 == 0:
newnumber = newnumber + 6
elif number > 50:
newnumber = number - 10
if number == -10: # TA-COMMENT: No need to include an else if you make your condition != -10.
print(newnumber)
else:
print(newnumber - 1)
#7) Sum the result of each of the numbers divided by two.
print(sum(numbers) / 2)
#DICTIONARIES
#8) Sometimes dictionaries are used to describe multiple aspects of a single object.
#Like, say, a movie. Define a dictionary called movie that works with the following code.
movie = {'title': 'The Life Aquatic', 'year': 2004, 'director': 'Wes Anderson',
'budget': 50000000, 'revenue': 34806726}
# TA-COMMENT: (-0.5) We can add entries to a dictionary AFTER making it. We wanted to see:
# movie['budget'] = 65000000
# rather than "hard coding" budget and revenue into the initial dictionary.
print("My favorite movie is", movie['title'], "which was released in", movie['year'],
"and was directed by", movie['director'])
#9) Add entries to the movie dictionary for budget and revenue
#(you'll use code like movie['budget'] = 1000), and print out the difference between the two.
#10) If the movie cost more to make than it made in theaters, print "It was a flop".
#If the film's revenue was more than five times the amount it cost to make, print "That was a good investment."
if movie['revenue'] < movie['budget']:
print("It was a flop.")
if movie['revenue'] > (movie['budget'] * 5):
print("That was a good investment.")
#11) Sometimes dictionaries are used to describe the same aspects of many different objects.
#Make ONE dictionary that describes the population of the boroughs of NYC.
#Manhattan has 1.6 million residents,
#Brooklyn has 2.6m,
#Bronx has 1.4m,
#Queens has 2.3m and
#Staten Island has 470,000.
#(Tip: keeping it all in either millions or thousands is a good idea)
population = {'Manhattan': 1.6, 'Brooklyn': 2.6, 'Bronx': 1.4, 'Queens': 2.3,
'Staten Island': .47 }
#12) Display the population of Brooklyn.
print("Brooklyn has", population['Brooklyn'], 'million people.')
#13) Display the combined population of all five boroughs.
print("All five buroughs have", round(sum(population.values()),2), 'million people.')
#14) Display what percent of NYC's population lives in Manhattan.
print(round(population['Manhattan'] / sum(population.values()) * 100,2), "percent of NYC's population lives in Manhattan.")
| mit |
lukas/ml-class | videos/text-gen/char-gen.py | 2 | 3208 | import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM, SimpleRNN, GRU
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import io
import wandb
from wandb.keras import WandbCallback
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("text", type=str)
args = parser.parse_args()
run = wandb.init()
config = run.config
config.hidden_nodes = 128
config.batch_size = 256
config.file = args.text
config.maxlen = 200
config.step = 3
text = io.open(config.file, encoding='utf-8').read()
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# build a sequence for every <config.step>-th character in the text
sentences = []
next_chars = []
for i in range(0, len(text) - config.maxlen, config.step):
sentences.append(text[i: i + config.maxlen])
next_chars.append(text[i + config.maxlen])
# build up one-hot encoded input x and output y where x is a character
# in the text y is the next character in the text
x = np.zeros((len(sentences), config.maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
model = Sequential()
model.add(SimpleRNN(128, input_shape=(config.maxlen, len(chars))))
model.add(Dense(len(chars), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer="rmsprop")
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
class SampleText(keras.callbacks.Callback):
def on_epoch_end(self, batch, logs={}):
start_index = random.randint(0, len(text) - config.maxlen - 1)
for diversity in [0.5, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + config.maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(200):
x_pred = np.zeros((1, config.maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
model.fit(x, y, batch_size=config.batch_size,
epochs=100, callbacks=[SampleText(), WandbCallback()])
| gpl-2.0 |
MSusik/invenio | setup.py | 1 | 8823 | ## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio is Fun.
Links
-----
* `website <http://invenio-software.org/>`_
* `documentation <http://invenio.readthedocs.org/en/latest/>`_
* `development version <https://github.com/inveniosoftware/invenio>`_
"""
import os
import sys
from setuptools import setup, find_packages
install_requires = [
"alembic==0.6.2",
"Babel==1.3",
"BeautifulSoup==3.2.1",
"BeautifulSoup4==4.3.2",
"celery==3.1.12",
"Cerberus==0.7",
"dictdiffer==0.0.3",
"feedparser==5.1.3",
"fixture==1.5",
"Flask==0.10.1",
"Flask-Admin==1.0.7",
"Flask-Assets==0.10",
"Flask-Babel==0.9",
"Flask-Breadcrumbs==0.1",
"Flask-Cache==0.12",
"Flask-Collect>=0.2.3",
"Flask-Email==1.4.4",
"Flask-Gravatar==0.4.0",
"Flask-Login==0.2.7",
"Flask-Menu==0.1",
"Flask-OAuthlib==0.4.3",
"Flask-Principal==0.4.0",
"Flask-Registry>0.1",
"Flask-RESTful==0.2.12",
"Flask-Script>=2.0.5",
"Flask-SQLAlchemy>1.9",
"Flask-WTF==0.9.5",
"fs==0.4.0",
"intbitset==2.0",
"jellyfish>=0.2",
"Jinja2==2.7.3",
"libmagic==1.0",
"lxml==3.1.2",
"mechanize==0.2.5",
"msgpack-python==0.3.0",
"MySQL-python==1.2.5",
"numpy==1.7.0",
"pyparsing==2.0.1",
"python-twitter==0.8.7",
"pyPDF==1.13",
"pyPDF2",
"PyLD>=0.5.2",
"pyRXP==1.16",
"pyStemmer==1.3.0",
# python-dateutil>=2.0 is only for Python3
"python-dateutil>=1.5,<2.0",
"python-magic==0.4.6",
"pytz",
"rauth",
"raven==4.2.1",
"rdflib==2.4.2",
"redis==2.8.0", # Is it explicitly required?
"reportlab==2.5",
"requests==1.2.3",
"setuptools>=2.0", # dad?
"setuptools-bower>=0.2,<1.0",
"six>=1.7.2",
"Sphinx",
"SQLAlchemy==0.8.3",
"SQLAlchemy-Utils>=0.23.5,<0.24",
"unidecode",
"workflow==1.1.0",
# Flask-WTF 0.9.5 doesn't support WTForms 2.0 as of yet.
"WTForms>=1.0.5,<2.0",
"wtforms-alchemy==0.12.6"
]
extras_require = {
"docs": [
"sphinx_rtd_theme"
],
"development": [
"Flask-DebugToolbar==0.9.0"
],
"elasticsearch": [
"pyelasticsearch>=0.6.1"
],
"img": [
"qrcode",
"Pillow"
],
"mongo": [
"pymongo"
],
"misc": [ # was requirements-extras
"apiclient", # extra=cloud?
"dropbox", # extra=cloud?
"gnuplot-py==1.8",
"flake8", # extra=kwalitee?
"pep8", # extra=kwalitee?
"pychecker==0.8.19", # extra=kwalitee?
"pylint", # extra=kwalitee?
"nosexcover", # test?
"oauth2client", # extra=cloud?
"python-onedrive", # extra=cloud?
"python-openid", # extra=sso?
"urllib3", # extra=cloud?
],
"sso": [
"Flask-SSO>=0.1"
]
}
extras_require["docs"] += extras_require["elasticsearch"]
extras_require["docs"] += extras_require["img"]
extras_require["docs"] += extras_require["mongo"]
extras_require["docs"] += extras_require["sso"]
tests_require = [
"httpretty==0.8.0",
"Flask-Testing==0.4.1",
"mock",
"nose",
"selenium",
"unittest2==0.5.1",
]
# Compatibility with Python 2.6
if sys.version_info < (2, 7):
install_requires += [
"argparse",
"importlib"
]
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join("invenio", "version.py"), "rt") as fp:
exec(fp.read(), g)
version = g["__version__"]
packages = find_packages(exclude=['docs'])
packages.append('invenio_docs')
setup(
name='Invenio',
version=version,
url='https://github.com/inveniosoftware/invenio',
license='GPLv2',
author='CERN',
author_email='info@invenio-software.org',
description='Digital library software',
long_description=__doc__,
packages=packages,
package_dir={'invenio_docs': 'docs'},
include_package_data=True,
zip_safe=False,
platforms='any',
entry_points={
'console_scripts': [
'inveniomanage = invenio.base.manage:main',
'plotextractor = invenio.utils.scripts.plotextractor:main',
# Legacy
'alertengine = invenio.legacy.webalert.scripts.alertengine:main',
'batchuploader = invenio.legacy.bibupload.scripts.batchuploader',
'bibcircd = invenio.legacy.bibcirculation.scripts.bibcircd:main',
'bibauthorid = invenio.legacy.bibauthorid.scripts.bibauthorid:main',
'bibclassify = invenio.modules.classifier.scripts.classifier:main',
'bibconvert = invenio.legacy.bibconvert.scripts.bibconvert:main',
'bibdocfile = invenio.legacy.bibdocfile.scripts.bibdocfile:main',
'bibedit = invenio.legacy.bibedit.scripts.bibedit:main',
'bibencode = invenio.modules.encoder.scripts.encoder:main',
'bibindex = invenio.legacy.bibindex.scripts.bibindex:main',
'bibmatch = invenio.legacy.bibmatch.scripts.bibmatch:main',
'bibrank = invenio.legacy.bibrank.scripts.bibrank:main',
'bibrankgkb = invenio.legacy.bibrank.scripts.bibrankgkb:main',
'bibreformat = invenio.legacy.bibformat.scripts.bibreformat:main',
'bibsort = invenio.legacy.bibsort.scripts.bibsort:main',
'bibsched = invenio.legacy.bibsched.scripts.bibsched:main',
'bibstat = invenio.legacy.bibindex.scripts.bibstat:main',
'bibtaskex = invenio.legacy.bibsched.scripts.bibtaskex:main',
'bibtasklet = invenio.legacy.bibsched.scripts.bibtasklet:main',
'bibupload = invenio.legacy.bibupload.scripts.bibupload:main',
'dbexec = invenio.legacy.miscutil.scripts.dbexec:main',
'dbdump = invenio.legacy.miscutil.scripts.dbdump:main',
'docextract = invenio.legacy.docextract.scripts.docextract:main',
'elmsubmit = invenio.legacy.elmsubmit.scripts.elmsubmit:main',
'gotoadmin = invenio.modules.redirector.scripts.redirector:main',
'inveniocfg = invenio.legacy.inveniocfg:main',
'inveniogc = invenio.legacy.websession.scripts.inveniogc:main',
'inveniounoconv = invenio.legacy.websubmit.scripts.inveniounoconv:main',
'oaiharvest = invenio.legacy.oaiharvest.scripts.oaiharvest:main',
'oairepositoryupdater = invenio.legacy.oairepository.scripts.oairepositoryupdater:main',
'arxiv-pdf-checker = invenio.legacy.pdfchecker:main',
'refextract = invenio.legacy.refextract.scripts.refextract:main',
'textmarc2xmlmarc = invenio.legacy.bibrecord.scripts.textmarc2xmlmarc:main',
'webaccessadmin = invenio.modules.access.scripts.webaccessadmin:main',
'webauthorprofile = invenio.legacy.webauthorprofile.scripts.webauthorprofile:main',
'webcoll = invenio.legacy.websearch.scripts.webcoll:main',
'webmessageadmin = invenio.legacy.webmessage.scripts.webmessageadmin:main',
'webstatadmin = invenio.legacy.webstat.scripts.webstatadmin:main',
'websubmitadmin = invenio.legacy.websubmit.scripts.websubmitadmin:main',
'xmlmarc2textmarc = invenio.legacy.bibrecord.scripts.xmlmarc2textmarc:main',
'xmlmarclint = invenio.legacy.bibrecord.scripts.xmlmarclint:main',
],
"distutils.commands": [
"inveniomanage = invenio.base.setuptools:InvenioManageCommand",
]
},
install_requires=install_requires,
extras_require=extras_require,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPLv2 License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
test_suite='invenio.testsuite.suite',
tests_require=tests_require
)
| gpl-2.0 |
matthappens/taskqueue | taskqueue/venv_tq/lib/python2.7/site-packages/boto/gs/bucket.py | 18 | 42084 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import urllib
import xml.sax
import boto
from boto import handler
from boto.resultset import ResultSet
from boto.exception import GSResponseError
from boto.exception import InvalidAclError
from boto.gs.acl import ACL, CannedACLStrings
from boto.gs.acl import SupportedPermissions as GSPermissions
from boto.gs.bucketlistresultset import VersionedBucketListResultSet
from boto.gs.cors import Cors
from boto.gs.lifecycle import LifecycleConfig
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
from boto.utils import get_utf8_value
# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
STANDARD_ACL = 'acl'
CORS_ARG = 'cors'
LIFECYCLE_ARG = 'lifecycle'
ERROR_DETAILS_REGEX = re.compile(r'<Details>(?P<details>.*)</Details>')
class Bucket(S3Bucket):
"""Represents a Google Cloud Storage bucket."""
VersioningBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<VersioningConfiguration><Status>%s</Status>'
'</VersioningConfiguration>')
WebsiteBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<WebsiteConfiguration>%s%s</WebsiteConfiguration>')
WebsiteMainPageFragment = '<MainPageSuffix>%s</MainPageSuffix>'
WebsiteErrorFragment = '<NotFoundPage>%s</NotFoundPage>'
def __init__(self, connection=None, name=None, key_class=GSKey):
super(Bucket, self).__init__(connection, name, key_class)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'CreationDate':
self.creation_date = value
else:
setattr(self, name, value)
def get_key(self, key_name, headers=None, version_id=None,
response_headers=None, generation=None):
"""Returns a Key instance for an object in this bucket.
Note that this method uses a HEAD request to check for the existence of
the key.
:type key_name: string
:param key_name: The name of the key to retrieve
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/06N3b for details.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: A specific generation number to fetch the key at. If
not specified, the latest generation is fetched.
:rtype: :class:`boto.gs.key.Key`
:returns: A Key object from this bucket.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
if response_headers:
for rk, rv in response_headers.iteritems():
query_args_l.append('%s=%s' % (rk, urllib.quote(rv)))
try:
key, resp = self._get_key_internal(key_name, headers,
query_args_l=query_args_l)
except GSResponseError, e:
if e.status == 403 and 'Forbidden' in e.reason:
# If we failed getting an object, let the user know which object
# failed rather than just returning a generic 403.
e.reason = ("Access denied to 'gs://%s/%s'." %
(self.name, key_name))
raise
return key
def copy_key(self, new_key_name, src_bucket_name, src_key_name,
metadata=None, src_version_id=None, storage_class='STANDARD',
preserve_acl=False, encrypt_key=False, headers=None,
query_args=None, src_generation=None):
"""Create a new key in the bucket by copying an existing key.
:type new_key_name: string
:param new_key_name: The name of the new key
:type src_bucket_name: string
:param src_bucket_name: The name of the source bucket
:type src_key_name: string
:param src_key_name: The name of the source key
:type src_generation: int
:param src_generation: The generation number of the source key to copy.
If not specified, the latest generation is copied.
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type version_id: string
:param version_id: Unused in this subclass.
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
Possible values are: STANDARD | DURABLE_REDUCED_AVAILABILITY
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to GCS, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL (or if you have a default ACL set
on the bucket), a value of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: Included for compatibility with S3. This argument is
ignored.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type query_args: string
:param query_args: A string of additional querystring arguments
to append to the request
:rtype: :class:`boto.gs.key.Key`
:returns: An instance of the newly created key object
"""
if src_generation:
headers = headers or {}
headers['x-goog-copy-source-generation'] = str(src_generation)
return super(Bucket, self).copy_key(
new_key_name, src_bucket_name, src_key_name, metadata=metadata,
storage_class=storage_class, preserve_acl=preserve_acl,
encrypt_key=encrypt_key, headers=headers, query_args=query_args)
def list_versions(self, prefix='', delimiter='', marker='',
generation_marker='', headers=None):
"""
List versioned objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
handles all of the result paging, etc. from GCS. You just need
to keep iterating until there are no more results. Called
with no arguments, this will return an iterator object across
all keys within the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See:
https://developers.google.com/storage/docs/reference-headers#delimiter
for more details.
:type marker: string
:param marker: The "marker" of where you are in the result set
:type generation_marker: string
:param generation_marker: The "generation marker" of where you are in
the result set.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:rtype:
:class:`boto.gs.bucketlistresultset.VersionedBucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc.
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
marker, generation_marker,
headers)
def validate_get_all_versions_params(self, params):
"""
See documentation in boto/s3/bucket.py.
"""
self.validate_kwarg_names(params,
['version_id_marker', 'delimiter', 'marker',
'generation_marker', 'prefix', 'max_keys'])
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None, generation=None):
"""
Deletes a key from the bucket.
:type key_name: string
:param key_name: The key name to delete
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type version_id: string
:param version_id: Unused in this subclass.
:type mfa_token: tuple or list of strings
:param mfa_token: Unused in this subclass.
:type generation: int
:param generation: The generation number of the key to delete. If not
specified, the latest generation number will be deleted.
:rtype: :class:`boto.gs.key.Key`
:returns: A key object holding information on what was
deleted.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
self._delete_key_internal(key_name, headers=headers,
version_id=version_id, mfa_token=mfa_token,
query_args_l=query_args_l)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None,
generation=None, if_generation=None, if_metageneration=None):
"""Sets or changes a bucket's or key's ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_def_acl(self, acl_or_str, headers=None):
"""Sets or changes a bucket's default ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers)
else:
self.set_def_canned_acl(acl_or_str, headers=headers)
def _get_xml_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_xml_acl and _get_acl_helper."""
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
if response.status == 403:
match = ERROR_DETAILS_REGEX.search(body)
details = match.group('details') if match else None
if details:
details = (('<Details>%s. Note that Full Control access'
' is required to access ACLs.</Details>') %
details)
body = re.sub(ERROR_DETAILS_REGEX, details, body)
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def _get_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_acl and get_def_acl."""
body = self._get_xml_acl_helper(key_name, headers, query_args)
acl = ACL(self)
h = handler.XmlHandler(acl, self)
xml.sax.parseString(body, h)
return acl
def get_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: :class:`.gs.acl.ACL`
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_acl_helper(key_name, headers, query_args)
def get_xml_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL string of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: str
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_xml_acl_helper(key_name, headers, query_args)
def get_def_acl(self, headers=None):
"""Returns the bucket's default ACL.
:param dict headers: Additional headers to set during the request.
:rtype: :class:`.gs.acl.ACL`
"""
return self._get_acl_helper('', headers, DEF_OBJ_ACL)
def _set_acl_helper(self, acl_or_str, key_name, headers, query_args,
generation, if_generation, if_metageneration,
canned=False):
"""Provides common functionality for set_acl, set_xml_acl,
set_canned_acl, set_def_acl, set_def_xml_acl, and
set_def_canned_acl()."""
headers = headers or {}
data = ''
if canned:
headers[self.connection.provider.acl_header] = acl_or_str
else:
data = acl_or_str
if generation:
query_args += '&generation=%s' % generation
if if_metageneration is not None and if_generation is None:
raise ValueError("Received if_metageneration argument with no "
"if_generation argument. A metageneration has no "
"meaning without a content generation.")
if not key_name and (if_generation or if_metageneration):
raise ValueError("Received if_generation or if_metageneration "
"parameter while setting the ACL of a bucket.")
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if if_metageneration is not None:
headers['x-goog-if-metageneration-match'] = str(if_metageneration)
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), get_utf8_value(key_name),
data=get_utf8_value(data), headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
query_args='acl', generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type query_args: str
:param query_args: The query parameters to pass with the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
return self._set_acl_helper(acl_str, key_name=key_name, headers=headers,
query_args=query_args,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None, generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = STANDARD_ACL
return self._set_acl_helper(acl_str, key_name, headers, query_args,
generation, if_generation,
if_metageneration, canned=True)
def set_def_canned_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = DEF_OBJ_ACL
return self._set_acl_helper(acl_str, '', headers, query_args,
generation=None, if_generation=None,
if_metageneration=None, canned=True)
def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL)
def get_cors(self, headers=None):
"""Returns a bucket's CORS XML document.
:param dict headers: Additional headers to send with the request.
:rtype: :class:`~.cors.Cors`
"""
response = self.connection.make_request('GET', self.name,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status == 200:
# Success - parse XML and return Cors object.
cors = Cors()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors, headers=None):
"""Sets a bucket's CORS XML document.
:param str cors: A string containing the CORS XML.
:param dict headers: Additional headers to send with the request.
"""
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(cors),
query_args=CORS_ARG, headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_storage_class(self):
"""
Returns the StorageClass for the bucket.
:rtype: str
:return: The StorageClass for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='storageClass')
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs.StorageClass
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
# Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
# to allow polymorphic treatment at application layer.
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the GS
account your are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
# Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
# to allow polymorphic treatment at application layer.
def add_user_grant(self, permission, user_id, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add a canonical user
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUTs the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
:type user_id: string
:param user_id: The canonical user id associated with the GS account
you are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_user_grant(permission, user_id)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers)
def add_group_email_grant(self, permission, email_address, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|WRITE|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_group_email_grant(permission, email_address,
headers=headers)
# Method with same input signature as boto.s3.bucket.Bucket.list_grants()
# (but returning different object type), to allow polymorphic treatment
# at application layer.
def list_grants(self, headers=None):
"""Returns the ACL entries applied to this bucket.
:param dict headers: Additional headers to send with the request.
:rtype: list containing :class:`~.gs.acl.Entry` objects.
"""
acl = self.get_acl(headers=headers)
return acl.entries
def disable_logging(self, headers=None):
"""Disable logging on this bucket.
:param dict headers: Additional headers to send with the request.
"""
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>'
self.set_subresource('logging', xml_str, headers=headers)
def enable_logging(self, target_bucket, target_prefix=None, headers=None):
"""Enable logging on a bucket.
:type target_bucket: bucket or string
:param target_bucket: The bucket to log to.
:type target_prefix: string
:param target_prefix: The prefix which should be prepended to the
generated log files written to the target_bucket.
:param dict headers: Additional headers to send with the request.
"""
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>'
xml_str = (xml_str + '<LogBucket>%s</LogBucket>' % target_bucket)
if target_prefix:
xml_str = (xml_str +
'<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix)
xml_str = xml_str + '</Logging>'
self.set_subresource('logging', xml_str, headers=headers)
def get_logging_config_with_xml(self, headers=None):
"""Returns the current status of logging configuration on the bucket as
unparsed XML.
:param dict headers: Additional headers to send with the request.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing the parsed XML response from GCS. The
overall structure is:
* Logging
* LogObjectPrefix: Prefix that is prepended to log objects.
* LogBucket: Target bucket for log objects.
2) Unparsed XML describing the bucket's logging configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='logging',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def get_logging_config(self, headers=None):
"""Returns the current status of logging configuration on the bucket.
:param dict headers: Additional headers to send with the request.
:rtype: dict
:returns: A dictionary containing the parsed XML response from GCS. The
overall structure is:
* Logging
* LogObjectPrefix: Prefix that is prepended to log objects.
* LogBucket: Target bucket for log objects.
"""
return self.get_logging_config_with_xml(headers)[0]
def configure_website(self, main_page_suffix=None, error_key=None,
headers=None):
"""Configure this bucket to act as a website
:type main_page_suffix: str
:param main_page_suffix: Suffix that is appended to a request that is
for a "directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/ the data
that is returned will be for the object with the key name
images/index.html). The suffix must not be empty and must not
include a slash character. This parameter is optional and the
property is disabled if excluded.
:type error_key: str
:param error_key: The object key name to use when a 400 error occurs.
This parameter is optional and the property is disabled if excluded.
:param dict headers: Additional headers to send with the request.
"""
if main_page_suffix:
main_page_frag = self.WebsiteMainPageFragment % main_page_suffix
else:
main_page_frag = ''
if error_key:
error_frag = self.WebsiteErrorFragment % error_key
else:
error_frag = ''
body = self.WebsiteBody % (main_page_frag, error_frag)
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(body),
query_args='websiteConfig', headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""Returns the current status of website configuration on the bucket.
:param dict headers: Additional headers to send with the request.
:rtype: dict
:returns: A dictionary containing the parsed XML response from GCS. The
overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that
is for a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404.
"""
return self.get_website_configuration_with_xml(headers)[0]
def get_website_configuration_with_xml(self, headers=None):
"""Returns the current status of website configuration on the bucket as
unparsed XML.
:param dict headers: Additional headers to send with the request.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing the parsed XML response from GCS. The
overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that is for
a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404
2) Unparsed XML describing the bucket's website configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='websiteConfig', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def delete_website_configuration(self, headers=None):
"""Remove the website configuration from this bucket.
:param dict headers: Additional headers to send with the request.
"""
self.configure_website(headers=headers)
def get_versioning_status(self, headers=None):
"""Returns the current status of versioning configuration on the bucket.
:rtype: bool
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
resp_json = boto.jsonresponse.Element()
boto.jsonresponse.XmlHandler(resp_json, None).parse(body)
resp_json = resp_json['VersioningConfiguration']
return ('Status' in resp_json) and (resp_json['Status'] == 'Enabled')
def configure_versioning(self, enabled, headers=None):
"""Configure versioning for this bucket.
:param bool enabled: If set to True, enables versioning on this bucket.
If set to False, disables versioning.
:param dict headers: Additional headers to send with the request.
"""
if enabled == True:
req_body = self.VersioningBody % ('Enabled')
else:
req_body = self.VersioningBody % ('Suspended')
self.set_subresource('versioning', req_body, headers=headers)
def get_lifecycle_config(self, headers=None):
"""
Returns the current lifecycle configuration on the bucket.
:rtype: :class:`boto.gs.lifecycle.LifecycleConfig`
:returns: A LifecycleConfig object that describes all current
lifecycle rules in effect for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args=LIFECYCLE_ARG, headers=headers)
body = response.read()
boto.log.debug(body)
if response.status == 200:
lifecycle_config = LifecycleConfig()
h = handler.XmlHandler(lifecycle_config, self)
xml.sax.parseString(body, h)
return lifecycle_config
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def configure_lifecycle(self, lifecycle_config, headers=None):
"""
Configure lifecycle for this bucket.
:type lifecycle_config: :class:`boto.gs.lifecycle.LifecycleConfig`
:param lifecycle_config: The lifecycle configuration you want
to configure for this bucket.
"""
xml = lifecycle_config.to_xml()
response = self.connection.make_request(
'PUT', get_utf8_value(self.name), data=get_utf8_value(xml),
query_args=LIFECYCLE_ARG, headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
| mit |
uiri/pxqz | venv/lib/python2.7/site-packages/django/conf/locale/id/formats.py | 355 | 1818 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i.s"
TIME_FORMAT = 'G.i.s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i.s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/compiler/syntax.py | 369 | 1444 | """Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
| gpl-3.0 |
microcom/odoo | addons/crm_claim/report/crm_claim_report.py | 42 | 3581 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields,osv
from openerp import tools
AVAILABLE_PRIORITIES = [
('0', 'Low'),
('1', 'Normal'),
('2', 'High')
]
class crm_claim_report(osv.osv):
""" CRM Claim Report"""
_name = "crm.claim.report"
_auto = False
_description = "CRM Claim Report"
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'team_id':fields.many2one('crm.team', 'Team', oldname='section_id', readonly=True),
'nbr': fields.integer('# of Claims', readonly=True), # TDE FIXME master: rename into nbr_claims
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'claim_date': fields.datetime('Claim Date', readonly=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', readonly=True,domain="[('team_ids','=',team_id)]"),
'categ_id': fields.many2one('crm.claim.category', 'Category',readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'date_closed': fields.datetime('Close Date', readonly=True, select=True),
'date_deadline': fields.date('Deadline', readonly=True, select=True),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'email': fields.integer('# Emails', size=128, readonly=True),
'subject': fields.char('Claim Subject', readonly=True)
}
def init(self, cr):
""" Display Number of cases And Team Name
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_claim_report')
cr.execute("""
create or replace view crm_claim_report as (
select
min(c.id) as id,
c.date as claim_date,
c.date_closed as date_closed,
c.date_deadline as date_deadline,
c.user_id,
c.stage_id,
c.team_id,
c.partner_id,
c.company_id,
c.categ_id,
c.name as subject,
count(*) as nbr,
c.priority as priority,
c.type_action as type_action,
c.create_date as create_date,
avg(extract('epoch' from (c.date_closed-c.create_date)))/(3600*24) as delay_close,
(SELECT count(id) FROM mail_message WHERE model='crm.claim' AND res_id=c.id) AS email,
extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected
from
crm_claim c
group by c.date,\
c.user_id,c.team_id, c.stage_id,\
c.categ_id,c.partner_id,c.company_id,c.create_date,
c.priority,c.type_action,c.date_deadline,c.date_closed,c.id
)""")
| agpl-3.0 |
Aliced3645/DataCenterMarketing | impacket/examples/os_ident.py | 6 | 76129 | #--
# $Id$
#
# Copyright (c) 2001-2003 CORE Security Technologies, CORE SDI Inc.
# All rights reserved.
#
# This computer software is owned by Core SDI Inc. and is
# protected by U.S. copyright laws and other laws and by international
# treaties. This computer software is furnished by CORE SDI Inc.
# pursuant to a written license agreement and may be used, copied,
# transmitted, and stored only in accordance with the terms of such
# license and with the inclusion of the above copyright notice. This
# computer software or any other copies thereof may not be provided or
# otherwise made available to any other person.
#
#`
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED. IN NO EVENT SHALL CORE SDI Inc. BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR
# CONSEQUENTIAL DAMAGES RESULTING FROM THE USE OR MISUSE OF
# THIS SOFTWARE
#
#--
from impacket.ImpactPacket import *
from impacket.ImpactDecoder import *
g_nmap1_signature_filename="nmap-os-fingerprints"
g_nmap2_signature_filename="nmap-os-db"
class os_id_exception:
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class os_id_test:
def __init__(self, id):
self.__id = id
self.__my_packet = None
self.__result_dict = {}
def test_id(self):
return self.__class__.__name__
def get_test_packet(self):
return self.__my_packet.get_packet()
def set_packet(self, packet):
self.__my_packet = packet
def get_packet(self):
return self.__my_packet
def process(self, packet):
pass
def add_result(self, name, value):
self.__result_dict[name] = value
def get_id(self):
return self.__id
def is_mine(self, packet):
pass
def get_result_dict(self):
return self.__result_dict;
def get_final_result(self):
"Returns a string representation of the final result of this test or None if no response was received"
pass
class icmp_request(os_id_test):
type_filter = { ICMP.ICMP_ECHO : ICMP.ICMP_ECHOREPLY,
ICMP.ICMP_IREQ : ICMP.ICMP_IREQREPLY,
ICMP.ICMP_MASKREQ : ICMP.ICMP_MASKREPLY,
ICMP.ICMP_TSTAMP : ICMP.ICMP_TSTAMPREPLY }
def __init__(self, id, addresses, type):
os_id_test.__init__(self, id)
self.e = Ethernet()
self.i = IP()
self.icmp = ICMP()
self.i.set_ip_src(addresses[0])
self.i.set_ip_dst(addresses[1])
self.__type = type
self.icmp.set_icmp_type(type)
self.e.contains(self.i)
self.i.contains(self.icmp)
self.set_packet(self.e)
def is_mine(self, packet):
if packet.get_ether_type() != ImpactPacket.IP.ethertype:
return 0
ip = packet.child()
if not ip or ip.get_ip_p() != ImpactPacket.ICMP.protocol:
return 0
icmp = ip.child()
# icmp_request.type_filter is a dictionary that maps request
# type codes to the reply codes
if not icmp or \
icmp.get_icmp_type() != icmp_request.type_filter[self.__type]:
return 0
if icmp.get_icmp_id() != self.get_id():
return 0
return 1
def process(self, packet):
pass
class nmap2_icmp_echo_probe_1(icmp_request):
# The first one has the IP DF bit set, a type-of-service (TOS) byte
# value of zero, a code of nine (even though it should be zero),
# the sequence number 295, a random IP ID and ICMP request identifier,
# and a random character repeated 120 times for the data payload.
sequence_number = 295
id = 0x5678
def __init__(self, id, addresses):
icmp_request.__init__(self, id, addresses, ICMP.ICMP_ECHO)
self.i.set_ip_df(True)
self.i.set_ip_tos(0)
self.icmp.set_icmp_code(9)
self.icmp.set_icmp_seq(nmap2_icmp_echo_probe_1.sequence_number)
self.i.set_ip_id(nmap2_icmp_echo_probe_1.id)
self.icmp.set_icmp_id(nmap2_icmp_echo_probe_1.id)
self.icmp.contains(Data("I" * 120))
def process(self, packet):
pass
class nmap2_icmp_echo_probe_2(icmp_request):
# The second ping query is similar, except a TOS of four
# (IP_TOS_RELIABILITY) is used, the code is zero, 150 bytes of data is
# sent, and the IP ID, request ID, and sequence numbers are incremented
# by one from the previous query values.
def __init__(self, id, addresses):
icmp_request.__init__(self, id, addresses, ICMP.ICMP_ECHO)
self.i.set_ip_df(False)
self.i.set_ip_tos(4)
self.icmp.set_icmp_code(0)
self.icmp.set_icmp_seq(nmap2_icmp_echo_probe_1.sequence_number + 1)
self.i.set_ip_id(nmap2_icmp_echo_probe_1.id + 1)
self.icmp.set_icmp_id(nmap2_icmp_echo_probe_1.id + 1)
self.icmp.contains(Data("I" * 150))
def process(self, packet):
pass
class udp_closed_probe(os_id_test):
ip_id = 0x1234 # HARDCODED
def __init__(self, id, addresses, udp_closed ):
os_id_test.__init__(self, id )
self.e = Ethernet()
self.i = IP()
self.u = UDP()
self.i.set_ip_src(addresses[0])
self.i.set_ip_dst(addresses[1])
self.i.set_ip_id(udp_closed_probe.ip_id)
self.u.set_uh_sport(id)
self.u.set_uh_dport( udp_closed )
self.e.contains(self.i)
self.i.contains(self.u)
self.set_packet(self.e)
def is_mine(self, packet):
if packet.get_ether_type() != ImpactPacket.IP.ethertype:
return 0
ip = packet.child()
if not ip or ip.get_ip_p() != ImpactPacket.ICMP.protocol:
return 0
icmp = ip.child()
if not icmp or icmp.get_icmp_type() != ICMP.ICMP_UNREACH:
return 0
if icmp.get_icmp_code() != ICMP.ICMP_UNREACH_PORT:
return 0;
self.err_data = icmp.child()
if not self.err_data:
return 0
return 1
class tcp_probe(os_id_test):
def __init__(self, id, addresses, tcp_ports, open_port ):
self.result_string = "[]"
os_id_test.__init__(self, id)
self.e = Ethernet()
self.i = IP()
self.t = TCP()
self.i.set_ip_src(addresses[0])
self.i.set_ip_dst(addresses[1])
self.i.set_ip_id(0x2323) # HARDCODED
self.t.set_th_sport(id)
if open_port:
self.target_port = tcp_ports[0]
else:
self.target_port = tcp_ports[1]
self.t.set_th_dport(self.target_port)
self.e.contains(self.i)
self.i.contains(self.t)
self.set_packet(self.e)
self.source_ip = addresses[0]
self.target_ip = addresses[1]
def socket_match(self, ip, tcp):
# scr ip and port
if (ip.get_ip_src() != self.target_ip) or (tcp.get_th_sport() != self.target_port):
return 0
# dst ip and port
if(ip.get_ip_dst() != self.source_ip) or (tcp.get_th_dport() != self.get_id()):
return 0
return 1
def is_mine(self, packet):
if packet.get_ether_type() != ImpactPacket.IP.ethertype:
return 0
ip = packet.child()
if not ip or ip.get_ip_p() != ImpactPacket.TCP.protocol:
return 0
tcp = ip.child()
if self.socket_match(ip, tcp):
return 1
return 0
class nmap_tcp_probe(tcp_probe):
def __init__(self, id, addresses, tcp_ports, open_port, sequence, options):
tcp_probe.__init__(self, id, addresses, tcp_ports, open_port)
self.t.set_th_seq(sequence)
self.set_resp(False)
for op in options:
self.t.add_option(op)
def set_resp(self,resp):
pass
class nmap1_tcp_probe(nmap_tcp_probe):
sequence = 0x8453 # 0xBASE, obviously
mss = 265
# From: http://nmap.org/nmap-fingerprinting-old.html
# [...]
# Nmap sends these options along with almost every probe packet:
# Window Scale=10; NOP; Max Segment Size = 265; Timestamp; End of Ops;
# [...]
# From nmap-4.22SOC8/osscan.cc:get_fingerprint(...)
# [...]
# "\003\003\012\001\002\004\001\011\010\012\077\077\077\077\000\000\000\000\000\000"
# [...]
tcp_options = [
TCPOption(TCPOption.TCPOPT_WINDOW, 012), #\003\003\012
TCPOption(TCPOption.TCPOPT_NOP), #\001
TCPOption(TCPOption.TCPOPT_MAXSEG, mss), #\002\004\001\011
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0x3F3F3F3F), #\010\012\077\077\077\077\000\000\000\000
TCPOption(TCPOption.TCPOPT_EOL), #\000
TCPOption(TCPOption.TCPOPT_EOL) #\000
]
def __init__(self, id, addresses, tcp_ports, open_port):
nmap_tcp_probe.__init__(self, id, addresses, tcp_ports, open_port,
self.sequence, self.tcp_options)
def set_resp(self,resp):
if resp:
self.add_result("Resp", "Y")
else:
self.add_result("Resp", "N")
def process(self, packet):
ip = packet.child()
tcp = ip.child()
self.set_resp(True)
if ip.get_ip_df():
self.add_result("DF", "Y")
else:
self.add_result("DF", "N")
self.add_result("W", tcp.get_th_win())
if tcp.get_th_ack() == self.sequence + 1:
self.add_result("ACK", "S++")
elif tcp.get_th_ack() == self.sequence:
self.add_result("ACK", "S")
else:
self.add_result("ACK", "O")
flags = []
# TCP flags
if tcp.get_ECE():
flags.append("B")
if tcp.get_URG():
flags.append("U")
if tcp.get_ACK():
flags.append("A")
if tcp.get_PSH():
flags.append("P")
if tcp.get_RST():
flags.append("R")
if tcp.get_SYN():
flags.append("S")
if tcp.get_FIN():
flags.append("F")
self.add_result("FLAGS", flags)
options = []
for op in tcp.get_options():
if op.get_kind() == TCPOption.TCPOPT_EOL:
options.append("L")
elif op.get_kind() == TCPOption.TCPOPT_MAXSEG:
options.append("M")
if op.get_mss() == self.mss:
options.append("E") # Echoed
elif op.get_kind() == TCPOption.TCPOPT_NOP:
options.append("N")
elif op.get_kind() == TCPOption.TCPOPT_TIMESTAMP:
options.append("T")
elif op.get_kind() == TCPOption.TCPOPT_WINDOW:
options.append("W")
self.add_result("OPTIONS", options)
def get_final_result(self):
return {self.test_id(): self.get_result_dict()}
class nmap2_tcp_probe(nmap_tcp_probe):
acknowledgment = 0x181d4f7b
def __init__(self, id, addresses, tcp_ports, open_port, sequence, options):
nmap_tcp_probe.__init__(self, id, addresses, tcp_ports, open_port,
sequence, options)
self.t.set_th_ack(self.acknowledgment)
def set_resp(self,resp):
# Responsiveness (R)
# This test simply records whether the target responded to a given probe.
# Possible values are Y and N. If there is no reply, remaining fields
# for the test are omitted.
if resp:
self.add_result("R", "Y")
else:
self.add_result("R", "N")
def process(self, packet):
ip = packet.child()
tcp = ip.child()
# R, DF, T*, TG*, W, S, A, F, O, RD*, Q
self.set_resp(True)
tests = nmap2_tcp_tests(ip, tcp, self.sequence, self.acknowledgment)
self.add_result("DF", tests.get_df())
self.add_result("W", tests.get_win())
self.add_result("S", tests.get_seq())
self.add_result("A", tests.get_ack())
self.add_result("F", tests.get_flags())
self.add_result("O", tests.get_options())
self.add_result("Q", tests.get_quirks())
def get_final_result(self):
return {self.test_id() : self.get_result_dict()}
class nmap2_ecn_probe(nmap_tcp_probe):
# From nmap-4.22SOC8/osscan2.cc:
# [...]
# "\003\003\012\001\002\004\005\264\004\002\001\001"
# [...]
# From: http://nmap.org/book/osdetect-methods.html
# [...]
# This probe tests for explicit congestion notification (ECN) support
# in the target TCP stack. ECN is a method for improving Internet
# performance by allowing routers to signal congestion problems before
# they start having to drop packets. It is documented in RFC 3168.
# Nmap tests this by sending a SYN packet which also has the ECN CWR
# and ECE congestion control flags set. For an unrelated (to ECN) test,
# the urgent field value of 0xF7F5 is used even though the urgent flag
# is not set. The acknowledgment number is zero, sequence number is
# random, window size field is three, and the reserved bit which
# immediately precedes the CWR bit is set. TCP options are WScale (10),
# NOP, MSS (1460), SACK permitted, NOP, NOP. The probe is sent to an
# open port.
# [...]
tcp_options = [
TCPOption(TCPOption.TCPOPT_WINDOW, 012), #\003\003\012
TCPOption(TCPOption.TCPOPT_NOP), #\001
TCPOption(TCPOption.TCPOPT_MAXSEG, 1460), #\002\004\005\0264
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED), #\004\002
TCPOption(TCPOption.TCPOPT_NOP), #\001
TCPOption(TCPOption.TCPOPT_NOP) #\001
]
def __init__(self, id, addresses, tcp_ports):
nmap_tcp_probe.__init__(self, id, addresses, tcp_ports, 1,
0x8b6a, self.tcp_options)
self.t.set_SYN()
self.t.set_CWR()
self.t.set_ECE()
self.t.set_flags(0x800)
self.t.set_th_urp(0xF7F5)
self.t.set_th_ack(0)
self.t.set_th_win(3)
#self.t.set_th_flags(self.t.get_th_flags() | 0x0100) # 0000 0001 00000000
def test_id(self):
return "ECN"
def set_resp(self,resp):
if resp:
self.add_result("R", "Y")
else:
self.add_result("R", "N")
def process(self, packet):
ip = packet.child()
tcp = ip.child()
# R, DF, T*, TG*, W, O, CC, Q
self.set_resp(True)
tests = nmap2_tcp_tests(ip, tcp, 0, 0)
self.add_result("DF", tests.get_df())
self.add_result("W", tests.get_win())
self.add_result("O", tests.get_options())
self.add_result("CC", tests.get_cc())
self.add_result("Q", tests.get_quirks())
def get_final_result(self):
return {self.test_id() : self.get_result_dict()}
class nmap2_tcp_tests:
def __init__(self, ip, tcp, sequence, acknowledgment):
self.__ip = ip
self.__tcp = tcp
self.__sequence = sequence
self.__acknowledgment = acknowledgment
def get_df(self):
# IP don't fragment bit (DF)
# The IP header contains a single bit which forbids routers from fragmenting
# a packet. If the packet is too large for routers to handle, they will just
# have to drop it (and ideally return a "destination unreachable,
# fragmentation needed" response). This test records Y if the bit is set,
# and N if it isn't.
if self.__ip.get_ip_df():
return "Y"
else:
return "N"
def get_win(self):
# TCP initial window size (W, W1-W6)
# This test simply records the 16-bit TCP window size of the received packet.
return "%X" % self.__tcp.get_th_win()
def get_ack(self):
# TCP acknowledgment number (A)
# This test is the same as S except that it tests how the acknowledgment
# number in the response compares to the sequence number in the
# respective probe.
# Value Description
# Z Acknowledgment number is zero.
# S Acknowledgment number is the same as the sequence number in the probe.
# S+ Acknowledgment number is the same as the sequence number in the probe plus one.
# O Acknowledgment number is something else (other).
if self.__tcp.get_th_ack() == self.__sequence + 1:
return "S+"
elif self.__tcp.get_th_ack() == self.__sequence:
return "S"
elif self.__tcp.get_th_ack() == 0:
return "Z"
else:
return "O"
def get_seq(self):
# TCP sequence number (S)
# This test examines the 32-bit sequence number field in the TCP
# header. Rather than record the field value as some other tests
# do, this one examines how it compares to the TCP acknowledgment
# number from the probe that elicited the response.
# Value Description
# Z Sequence number is zero.
# A Sequence number is the same as the acknowledgment number in the probe.
# A+ Sequence number is the same as the acknowledgment number in the probe plus one.
# O Sequence number is something else (other).
if self.__tcp.get_th_seq() == self.__acknowledgment + 1:
return "A+"
elif self.__tcp.get_th_seq() == self.__acknowledgment:
return "A"
elif self.__tcp.get_th_seq() == 0:
return "Z"
else:
return "O"
def get_flags(self):
# TCP flags (F)
# This field records the TCP flags in the response. Each letter represents
# one flag, and they occur in the same order as in a TCP packet (from
# high-bit on the left, to the low ones). So the value SA represents the
# SYN and ACK bits set, while the value AS is illegal (wrong order).
# The possible flags are shown in Table 8.7.
# Character Flag name Flag byte value
# E ECN Echo (ECE) 64
# U Urgent Data (URG) 32
# A Acknowledgment (ACK) 16
# P Push (PSH) 8
# R Reset (RST) 4
# S Synchronize (SYN) 2
# F Final (FIN) 1
flags = ""
if self.__tcp.get_ECE():
flags += "E"
if self.__tcp.get_URG():
flags += "U"
if self.__tcp.get_ACK():
flags += "A"
if self.__tcp.get_PSH():
flags += "P"
if self.__tcp.get_RST():
flags += "R"
if self.__tcp.get_SYN():
flags += "S"
if self.__tcp.get_FIN():
flags += "F"
return flags
def get_options(self):
# Option Name Character Argument (if any)
# End of Options List (EOL) L
# No operation (NOP) N
# Maximum Segment Size (MSS) M The value is appended. Many systems
# echo the value used in the corresponding probe.
# Window Scale (WS) W The actual value is appended.
# Timestamp (TS) T The T is followed by two binary characters
# representing the TSval and TSecr values respectively.
# The characters are 0 if the field is zero
# and 1 otherwise.
# Selective ACK permitted (SACK) S
options = ""
for op in self.__tcp.get_options():
if op.get_kind() == TCPOption.TCPOPT_EOL:
options += "L"
elif op.get_kind() == TCPOption.TCPOPT_MAXSEG:
options += "M%X" % (op.get_mss())
elif op.get_kind() == TCPOption.TCPOPT_NOP:
options += "N"
elif op.get_kind() == TCPOption.TCPOPT_TIMESTAMP:
options += "T%i%i" % (int(op.get_ts()!=0),
int(op.get_ts_echo()!=0))
elif op.get_kind() == TCPOption.TCPOPT_WINDOW:
options += "W%X" % (op.get_shift_cnt())
elif op.get_kind() == TCPOption.TCPOPT_SACK_PERMITTED:
options += "S"
return options
def get_cc(self):
# Explicit congestion notification (CC)
# This test is only used for the ECN probe. That probe is a SYN packet
# which includes the CWR and ECE congestion control flags. When the
# response SYN/ACK is received, those flags are examined to set the
# CC (congestion control) test value as described in Table 8.3.
# Table 8.3. CC test values
# Value Description
# Y Only the ECE bit is set (not CWR). This host supports ECN.
# N Neither of these two bits is set. The target does not support
# ECN.
# S Both bits are set. The target does not support ECN, but it
# echoes back what it thinks is a reserved bit.
# O The one remaining combination of these two bits (other).
ece, cwr = self.__tcp.get_ECE(), self.__tcp.get_CWR()
if ece and not cwr:
return "Y"
elif not ece and not cwr:
return "N"
elif ece and cwr:
return "S"
else:
return "O"
def get_quirks(self):
# TCP miscellaneous quirks (Q)
# This tests for two quirks that a few implementations have in their
# TCP stack. The first is that the reserved field in the TCP header
# (right after the header length) is nonzero. This is particularly
# likely to happen in response to the ECN test as that one sets a
# reserved bit in the probe. If this is seen in a packet, an "R"
# is recorded in the Q string.
# The other quirk Nmap tests for is a nonzero urgent pointer field
# value when the URG flag is not set. This is also particularly
# likely to be seen in response to the ECN probe, which sets a
# non-zero urgent field. A "U" is appended to the Q string when
# this is seen.
# The Q string must always be generated in alphabetical order.
# If no quirks are present, the Q test is empty but still shown.
quirks = ""
if ((self.__tcp.get_th_flags() >> 8) & 0x0f) != 0:
quirks += "R"
if self.__tcp.get_URG() == 0 and self.__tcp.get_th_urp() != 0:
quirks += "U"
return quirks
class nmap2_tcp_probe_2_6(nmap2_tcp_probe):
sequence = 0x8453 # 0xBASE, obviously
mss = 265
# From nmap-4.22SOC8/osscan2.cc:
# [...]
# "\003\003\012\001\002\004\001\011\010\012\377\377\377\377\000\000\000\000\004\002"
# [...]
# From: http://nmap.org/book/osdetect-methods.html
# [...]
# The six T2 through T7 tests each send one TCP probe packet.
# With one exception, the TCP options data in each case is (in hex)
# 03030A0102040109080AFFFFFFFF000000000402.
# Those 20 bytes correspond to window scale (10), NOP, MSS (265),
# Timestamp (TSval: 0xFFFFFFFF; TSecr: 0), then SACK permitted.
# (...
tcp_options = [
TCPOption(TCPOption.TCPOPT_WINDOW, 012), #\003\003\012
TCPOption(TCPOption.TCPOPT_NOP), #\001
TCPOption(TCPOption.TCPOPT_MAXSEG, mss), #\002\004\001\011
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF), #\010\012\377\377\377\377\000\000\000\000
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED) #\004\002
]
def __init__(self, id, addresses, tcp_ports, open_port):
nmap2_tcp_probe.__init__(self, id, addresses, tcp_ports, open_port,
self.sequence, self.tcp_options)
class nmap2_tcp_probe_7(nmap2_tcp_probe):
sequence = 0x8453 # 0xBASE, obviously
mss = 265
# ...)
# The exception is that T7 uses a Window scale value of 15 rather than 10
# [...]
tcp_options = [
TCPOption(TCPOption.TCPOPT_WINDOW, 017), #\003\003\017
TCPOption(TCPOption.TCPOPT_NOP), #\001
TCPOption(TCPOption.TCPOPT_MAXSEG, mss), #\002\004\001\011
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF), #\010\012\377\377\377\377\000\000\000\000
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED) #\004\002
]
def __init__(self, id, addresses, tcp_ports, open_port):
nmap2_tcp_probe.__init__(self, id, addresses, tcp_ports, open_port,
self.sequence, self.tcp_options)
class nmap_port_unreachable(udp_closed_probe):
def __init__(self, id, addresses, ports):
udp_closed_probe.__init__(self, id, addresses, ports[2])
self.set_resp(False)
def test_id(self):
pass
def set_resp(self, resp):
pass
def process(self, packet):
pass
class nmap1_port_unreachable(nmap_port_unreachable):
def __init__(self, id, addresses, ports):
nmap_port_unreachable.__init__(self, id, addresses, ports)
self.u.contains(Data("A" * 300))
def test_id(self):
return "PU"
def set_resp(self,resp):
if resp:
self.add_result("Resp", "Y")
else:
self.add_result("Resp", "N")
def process(self, packet):
ip_orig = self.err_data
if ip_orig.get_ip_p() != ImpactPacket.UDP.protocol:
return
udp = ip_orig.child()
if not udp:
return
ip = packet.child()
self.set_resp(True)
if ip.get_ip_df():
self.add_result("DF", "Y")
else:
self.add_result("DF", "N")
self.add_result("TOS", ip.get_ip_tos())
self.add_result("IPLEN", ip.get_ip_len())
self.add_result("RIPTL", ip_orig.get_ip_len()) # Some systems return a different IPLEN
recv_ip_id = ip_orig.get_ip_id()
if 0 == recv_ip_id:
self.add_result("RID", "0")
elif udp_closed_probe.ip_id == recv_ip_id:
self.add_result("RID", "E")
else:
self.add_result("RID", "F")
ip_sum = ip_orig.get_ip_sum()
ip_orig.set_ip_sum(0)
checksum = ip_orig.compute_checksum(ip_orig.get_bytes())
if 0 == checksum:
self.add_result("RIPCK", "0")
elif checksum == ip_sum:
self.add_result("RIPCK", "E")
else:
self.add_result("RIPCK", "F")
udp_sum = udp.get_uh_sum()
udp.set_uh_sum(0)
udp.auto_checksum = 1
udp.calculate_checksum()
if 0 == udp_sum:
self.add_result("UCK", "0")
elif self.u.get_uh_sum() == udp_sum:
self.add_result("UCK", "E")
else:
self.add_result("UCK", "F")
self.add_result("ULEN", udp.get_uh_ulen())
if ip.child().child().child().child() == udp.child(): # Some systems meddle with the data
self.add_result("DAT", "E")
else:
self.add_result("DAT", "F")
def get_final_result(self):
return {self.test_id(): self.get_result_dict()}
class nmap2_port_unreachable(nmap_port_unreachable):
# UDP (U1)
# This probe is a UDP packet sent to a closed port. The character 'C'
# (0x43) is repeated 300 times for the data field. The IP ID value is
# set to 0x1042 for operating systems which allow us to set this. If
# the port is truly closed and there is no firewall in place, Nmap
# expects to receive an ICMP port unreachable message in return.
# That response is then subjected to the R, DF, T, TG, TOS, IPL, UN,
# RIPL, RID, RIPCK, RUCK, RUL, and RUD tests.
def __init__(self, id, addresses, ports):
nmap_port_unreachable.__init__(self, id, addresses, ports)
self.u.contains(Data("C" * 300))
self.i.set_ip_id(0x1042)
def test_id(self):
return "U1"
def set_resp(self,resp):
if resp:
self.add_result("R", "Y")
else:
self.add_result("R", "N")
def process(self, packet):
ip_orig = self.err_data
if ip_orig.get_ip_p() != ImpactPacket.UDP.protocol:
return
udp = ip_orig.child()
if not udp:
return
ip = packet.child()
icmp = ip.child()
if ip.get_ip_df():
self.add_result("DF", "Y")
else:
self.add_result("DF", "N")
# XXX T
# IP initial time-to-live (T)
# IP packets contain a field named time-to-live (TTL) which is
# decremented every time they traverse a router. If the field
# reaches zero, the packet must be discarded. This prevents
# packets from looping endlessly. Because operating systems differ
# on which TTL they start with, it can be used for OS detection.
# Nmap determines how many hops away it is from the target by
# examining the ICMP port unreachable response to the U1 probe.
# That response includes the original IP packet, including the
# already-decremented TTL field, received by the target. By
# subtracting that value from our as-sent TTL, we learn how many
# hops away the machine is. Nmap then adds that hop distance to
# the probe response TTL to determine what the initial TTL was
# when that ICMP probe response packet was sent. That initial TTL
# value is stored in the fingerprint as the T result.
# Even though an eight-bit field like TTL can never hold values
# greater than 0xFF, this test occasionally results in values of
# 0x100 or higher. This occurs when a system (could be the source,
# a target, or a system in between) corrupts or otherwise fails to
# correctly decrement the TTL. It can also occur due to asymmetric
# routes.
# XXX TG
# IP initial time-to-live guess (TG)
# It is not uncommon for Nmap to receive no response to the U1 probe,
# which prevents Nmap from learning how many hops away a target is.
# Firewalls and NAT devices love to block unsolicited UDP packets.
# But since common TTL values are spread well apart and targets are
# rarely more than 20 hops away, Nmap can make a pretty good guess
# anyway. Most systems send packets with an initial TTL of 32, 60, 64,
# 128, or 255. So the TTL value received in the response is rounded
# up to the next value out of 32, 64, 128, or 255. 60 is not in that
# list because it cannot be reliably distinguished from 64. It is
# rarely seen anyway.
# The resulting guess is stored in the TG field. This TTL guess field
# is not printed in a subject fingerprint if the actual TTL (T) value
# was discovered.
# IP type of service (TOS)
# This test simply records the type of service byte from the
# IP header of ICMP port unreachable packets.
# This byte is described in RFC 791
self.add_result("TOS", "%X" % ip.get_ip_tos())
# IP total length (IPL)
# This test records the total length (in octets) of an IP packet.
# It is only used for the port unreachable response elicited by the
# U1 test.
self.add_result("IPL", "%X" % ip.get_ip_len())
# Unused port unreachable field nonzero (UN)
# An ICMP port unreachable message header is eight bytes long, but
# only the first four are used. RFC 792 states that the last four
# bytes must be zero. A few implementations (mostly ethernet switches
# and some specialized embedded devices) set it anyway. The value of
# those last four bytes is recorded in this field.
self.add_result("UN", "%X" % icmp.get_icmp_void())
# Returned probe IP total length value (RIPL)
# ICMP port unreachable messages (as are sent in response to the U1
# probe) are required to include the IP header which generated them.
# This header should be returned just as they received it, but some
# implementations send back a corrupted version due to changes they
# made during IP processing. This test simply records the returned
# IP total length value. If the correct value of 0x148 (328) is
# returned, the value G (for good) is stored instead of the actual value.
if ip_orig.get_ip_len() == 0x148:
self.add_result("RIPL","G")
else:
self.add_result("RIPL", "%X" % ip_orig.get_ip_len())
# Returned probe IP ID value (RID)
# The U1 probe has a static IP ID value of 0x1042. If that value is
# returned in the port unreachable message, the value G is stored for
# this test. Otherwise the exact value returned is stored. Some systems,
# such as Solaris, manipulate IP ID values for raw IP packets that
# Nmap sends. In such cases, this test is skipped. We have found
# that some systems, particularly HP and Xerox printers, flip the bytes
# and return 0x4210 instead.
if 0x1042 == ip_orig.get_ip_id():
self.add_result("RID", "G")
else:
self.add_result("RID", "%X" % ip_orig.get_ip_id())
# Integrity of returned probe IP checksum value (RIPCK)
# The IP checksum is one value that we don't expect to remain the same
# when returned in a port unreachable message. After all, each network
# hop during transit changes the checksum as the TTL is decremented.
# However, the checksum we receive should match the enclosing IP packet.
# If it does, the value G (good) is stored for this test. If the returned
# value is zero, then Z is stored. Otherwise the result is I (invalid).
ip_sum = ip_orig.get_ip_sum()
ip_orig.set_ip_sum(0)
checksum = ip_orig.compute_checksum(ip_orig.get_bytes())
if 0 == checksum:
self.add_result("RIPCK", "Z")
elif checksum == ip_sum:
self.add_result("RIPCK", "G")
else:
self.add_result("RIPCK", "I")
# Integrity of returned probe UDP length and checksum (RUL and RUCK)
# The UDP header length and checksum values should be returned exactly
# as they were sent. If so, G is recorded for these tests. Otherwise
# the value actually returned is recorded. The proper length is 0x134 (308).
udp_sum = udp.get_uh_sum()
udp.set_uh_sum(0)
udp.auto_checksum = 1
udp.calculate_checksum()
if self.u.get_uh_sum() == udp_sum:
self.add_result("RUCK", "G")
else:
self.add_result("RUCK", "%X" % udp_sum)
if udp.get_uh_ulen() == 0x134:
self.add_result("RUL","G")
else:
self.add_result("RUL", "%X" % udp.get_uh_ulen())
# Integrity of returned UDP data (RUD)
# If the UDP payload returned consists of 300 'C' (0x43)
# characters as expected, a G is recorded for this test.
# Otherwise I (invalid) is recorded.
if ip.child().child().child().child() == udp.child():
self.add_result("RUD", "G")
else:
self.add_result("RUD", "I")
def get_final_result(self):
return {self.test_id(): self.get_result_dict()}
class OS_ID:
def __init__(self, target, ports):
pcap_dev = pcap.lookupdev()
self.p = pcap.open_live(pcap_dev, 600, 0, 3000)
self.__source = self.p.getlocalip()
self.__target = target
self.p.setfilter("src host %s and dst host %s" % (target, self.__source), 1, 0xFFFFFF00)
self.p.setmintocopy(10)
self.decoder = EthDecoder()
self.tests_sent = []
self.outstanding_count = 0
self.results = {}
self.current_id = 12345
self.__ports = ports
def releasePcap(self):
if not (self.p is None):
self.p.close()
def get_new_id(self):
id = self.current_id
self.current_id += 1
self.current_id &= 0xFFFF
return id
def send_tests(self, tests):
self.outstanding_count = 0
for t_class in tests:
# Ok, I need to know if the constructor accepts the parameter port
# We could ask also by co_varnames, but the port parameters is not a standarized... asking by args count :(
if t_class.__init__.im_func.func_code.co_argcount == 4:
test = t_class(self.get_new_id(), [self.__source, self.__target], self.__ports )
else:
test = t_class(self.get_new_id(), [self.__source, self.__target] )
self.p.sendpacket(test.get_test_packet())
self.outstanding_count += 1
self.tests_sent.append(test)
while self.p.readready():
self.p.dispatch(1, self.packet_handler)
while self.outstanding_count > 0:
data = self.p.next()[0]
if data:
self.packet_handler(0, data)
else:
break
def run(self):
pass
def get_source(self):
return self.__source
def get_target(self):
return self.__target
def get_ports(self):
return self.__ports
def packet_handler(self, len, data):
packet = self.decoder.decode(data)
for t in self.tests_sent:
if t.is_mine(packet):
t.process(packet)
self.outstanding_count -= 1
class nmap1_tcp_open_1(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_ECE()
self.t.set_SYN()
def test_id(self):
return "T1"
def is_mine(self, packet):
if tcp_probe.is_mine(self, packet):
ip = packet.child()
if not ip:
return 0
tcp = ip.child()
if not tcp:
return 0
if tcp.get_SYN() and tcp.get_ACK():
return 1
else:
return 0
else:
return 0
class nmap1_tcp_open_2(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 1)
def test_id(self):
return "T2"
class nmap2_tcp_open_2(nmap2_tcp_probe_2_6):
# From: http://nmap.org/book/osdetect-methods.html
# [...]
# T2 sends a TCP null (no flags set) packet with the IP DF bit set and a
# window field of 128 to an open port.
# ...
def __init__(self, id, addresses, tcp_ports):
nmap2_tcp_probe_2_6.__init__(self, id, addresses, tcp_ports, 1)
self.i.set_ip_df(1)
self.t.set_th_win(128)
def test_id(self):
return "T2"
class nmap1_tcp_open_3(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports ):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_SYN()
self.t.set_FIN()
self.t.set_URG()
self.t.set_PSH()
def test_id(self):
return "T3"
class nmap2_tcp_open_3(nmap2_tcp_probe_2_6):
# ...
# T3 sends a TCP packet with the SYN, FIN, URG, and PSH flags set and a
# window field of 256 to an open port. The IP DF bit is not set.
# ...
def __init__(self, id, addresses, tcp_ports ):
nmap2_tcp_probe_2_6.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_SYN()
self.t.set_FIN()
self.t.set_URG()
self.t.set_PSH()
self.t.set_th_win(256)
self.i.set_ip_df(0)
def test_id(self):
return "T3"
class nmap1_tcp_open_4(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_ACK()
def test_id(self):
return "T4"
class nmap2_tcp_open_4(nmap2_tcp_probe_2_6):
# ...
# T4 sends a TCP ACK packet with IP DF and a window field of 1024 to
# an open port.
# ...
def __init__(self, id, addresses, tcp_ports ):
nmap2_tcp_probe_2_6.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_ACK()
self.i.set_ip_df(1)
self.t.set_th_win(1024)
def test_id(self):
return "T4"
class nmap1_seq(nmap1_tcp_probe):
SEQ_UNKNOWN = 0
SEQ_64K = 1
SEQ_TD = 2
SEQ_RI = 4
SEQ_TR = 8
SEQ_i800 = 16
SEQ_CONSTANT = 32
TS_SEQ_UNKNOWN = 0
TS_SEQ_ZERO = 1 # At least one of the timestamps we received back was 0
TS_SEQ_2HZ = 2
TS_SEQ_100HZ = 3
TS_SEQ_1000HZ = 4
TS_SEQ_UNSUPPORTED = 5 # System didn't send back a timestamp
IPID_SEQ_UNKNOWN = 0
IPID_SEQ_INCR = 1 # simple increment by one each time
IPID_SEQ_BROKEN_INCR = 2 # Stupid MS -- forgot htons() so it counts by 256 on little-endian platforms
IPID_SEQ_RPI = 3 # Goes up each time but by a "random" positive increment
IPID_SEQ_RD = 4 # Appears to select IPID using a "random" distributions (meaning it can go up or down)
IPID_SEQ_CONSTANT = 5 # Contains 1 or more sequential duplicates
IPID_SEQ_ZERO = 6 # Every packet that comes back has an IP.ID of 0 (eg Linux 2.4 does this)
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 1)
self.t.set_SYN()
self.t.set_th_seq(id) # Used to match results with sent packets.
def process(self, p):
raise Exception("Method process is meaningless for class %s." % self.__class__.__name__)
class nmap2_seq(nmap2_tcp_probe):
TS_SEQ_UNKNOWN = 0
TS_SEQ_ZERO = 1 # At least one of the timestamps we received back was 0
TS_SEQ_UNSUPPORTED = 5 # System didn't send back a timestamp
IPID_SEQ_UNKNOWN = 0
IPID_SEQ_INCR = 1 # simple increment by one each time
IPID_SEQ_BROKEN_INCR = 2 # Stupid MS -- forgot htons() so it counts by 256 on little-endian platforms
IPID_SEQ_RPI = 3 # Goes up each time but by a "random" positive increment
IPID_SEQ_RD = 4 # Appears to select IPID using a "random" distributions (meaning it can go up or down)
IPID_SEQ_CONSTANT = 5 # Contains 1 or more sequential duplicates
IPID_SEQ_ZERO = 6 # Every packet that comes back has an IP.ID of 0 (eg Linux 2.4 does this)
def __init__(self, id, addresses, tcp_ports, options):
nmap2_tcp_probe.__init__(self, id, addresses, tcp_ports, 1,
id, options)
self.t.set_SYN()
def process(self, p):
raise Exception("Method process is meaningless for class %s." % self.__class__.__name__)
class nmap2_seq_1(nmap2_seq):
# Packet #1: window scale (10),
# NOP,
# MSS (1460),
# timestamp (TSval: 0xFFFFFFFF; TSecr: 0),
# SACK permitted.
# The window field is 1.
tcp_options = [
TCPOption(TCPOption.TCPOPT_WINDOW, 10),
TCPOption(TCPOption.TCPOPT_NOP),
TCPOption(TCPOption.TCPOPT_MAXSEG, 1460),
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF),
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(1)
class nmap2_seq_2(nmap2_seq):
# Packet #2: MSS (1400),
# window scale (0),
# SACK permitted,
# timestamp (TSval: 0xFFFFFFFF; TSecr: 0),
# EOL.
# The window field is 63.
tcp_options = [
TCPOption(TCPOption.TCPOPT_MAXSEG, 1400),
TCPOption(TCPOption.TCPOPT_WINDOW, 0),
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED),
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF),
TCPOption(TCPOption.TCPOPT_EOL)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(63)
class nmap2_seq_3(nmap2_seq):
# Packet #3: Timestamp (TSval: 0xFFFFFFFF; TSecr: 0),
# NOP,
# NOP,
# window scale (5),
# NOP,
# MSS (640).
# The window field is 4.
tcp_options = [
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF),
TCPOption(TCPOption.TCPOPT_NOP),
TCPOption(TCPOption.TCPOPT_NOP),
TCPOption(TCPOption.TCPOPT_WINDOW, 5),
TCPOption(TCPOption.TCPOPT_NOP),
TCPOption(TCPOption.TCPOPT_MAXSEG, 640)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(4)
class nmap2_seq_4(nmap2_seq):
# Packet #4: SACK permitted,
# Timestamp (TSval: 0xFFFFFFFF; TSecr: 0),
# window scale (10),
# EOL.
# The window field is 4.
tcp_options = [
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED),
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF),
TCPOption(TCPOption.TCPOPT_WINDOW, 10),
TCPOption(TCPOption.TCPOPT_EOL)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(4)
class nmap2_seq_5(nmap2_seq):
# Packet #5: MSS (536),
# SACK permitted,
# Timestamp (TSval: 0xFFFFFFFF; TSecr: 0),
# window scale (10),
# EOL.
# The window field is 16.
tcp_options = [
TCPOption(TCPOption.TCPOPT_MAXSEG, 536),
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED),
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF),
TCPOption(TCPOption.TCPOPT_WINDOW, 10),
TCPOption(TCPOption.TCPOPT_EOL)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(16)
class nmap2_seq_6(nmap2_seq):
# Packet #6: MSS (265),
# SACK permitted,
# Timestamp (TSval: 0xFFFFFFFF; TSecr: 0).
# The window field is 512.
tcp_options = [
TCPOption(TCPOption.TCPOPT_MAXSEG, 265),
TCPOption(TCPOption.TCPOPT_SACK_PERMITTED),
TCPOption(TCPOption.TCPOPT_TIMESTAMP, 0xFFFFFFFF)
]
def __init__(self, id, addresses, tcp_ports):
nmap2_seq.__init__(self, id, addresses, tcp_ports, self.tcp_options)
self.t.set_th_win(512)
class nmap1_seq_container(os_id_test):
def __init__(self, num_seq_samples, responses, seq_diffs, ts_diffs, time_diffs):
os_id_test.__init__(self, 0)
self.num_seq_samples = num_seq_samples
self.seq_responses = responses
self.seq_num_responses = len(responses)
self.seq_diffs = seq_diffs
self.ts_diffs = ts_diffs
self.time_diffs = time_diffs
self.pre_ts_seqclass = nmap1_seq.TS_SEQ_UNKNOWN
def test_id(self):
return "TSEQ"
def set_ts_seqclass(self, ts_seqclass):
self.pre_ts_seqclass = ts_seqclass
def process(self):
ipid_seqclass = self.ipid_sequence()
if nmap1_seq.TS_SEQ_UNKNOWN != self.pre_ts_seqclass:
ts_seqclass = self.pre_ts_seqclass
else:
ts_seqclass = self.ts_sequence()
if self.seq_num_responses >= 4:
seq_seqclass = self.seq_sequence()
if nmap1_seq.SEQ_UNKNOWN != seq_seqclass: self.add_seqclass(seq_seqclass)
if nmap1_seq.IPID_SEQ_UNKNOWN != ipid_seqclass: self.add_ipidclass(ipid_seqclass)
if nmap1_seq.TS_SEQ_UNKNOWN != ts_seqclass: self.add_tsclass(ts_seqclass)
else:
PyImpact.t_log(1, "Insufficient responses for TCP sequencing (%d out of %d), OS detection may be less accurate."
% (self.seq_num_responses, self.num_seq_samples))
def get_final_result(self):
"Returns a string representation of the final result of this test or None if no response was received"
return {self.test_id(): self.get_result_dict()}
def ipid_sequence(self):
if self.seq_num_responses < 2: return nmap1_seq.IPID_SEQ_UNKNOWN
ipid_diffs = array.array('H', [0] * (self.seq_num_responses - 1))
null_ipids = 1
for i in xrange(1, self.seq_num_responses):
prev_ipid = self.seq_responses[i-1].get_ipid()
cur_ipid = self.seq_responses[i].get_ipid()
if cur_ipid < prev_ipid and (cur_ipid > 500 or prev_ipid < 65000):
return nmap1_seq.IPID_SEQ_RD
if prev_ipid != 0 or cur_ipid != 0: null_ipids = 0
ipid_diffs[i-1] = abs(cur_ipid - prev_ipid)
if null_ipids: return nmap1_seq.IPID_SEQ_ZERO
# Battle plan:
# If any diff is > 1000, set to random, if 0, set to constant.
# If any of the diffs are 1, or all are less than 9, set to incremental.
for i in xrange(0, self.seq_num_responses - 1):
if ipid_diffs[i] > 1000: return nmap1_seq.IPID_SEQ_RPI
if ipid_diffs[i] == 0: return nmap1_seq.IPID_SEQ_CONSTANT
is_incremental = 1 # All diferences are less than 9
is_ms = 1 # All diferences are multiples of 256
for i in xrange(0, self.seq_num_responses - 1):
if ipid_diffs[i] == 1: return nmap1_seq.IPID_SEQ_INCR
if is_ms and ipid_diffs[i] < 2560 and (ipid_diffs[i] % 256) != 0: is_ms = 0
if ipid_diffs[i] > 9: is_incremental = 0
if is_ms: return nmap1_seq.IPID_SEQ_BROKEN_INCR
if is_incremental: return nmap1_seq.IPID_SEQ_INCR
return nmap1_seq.IPID_SEQ_UNKNOWN
def ts_sequence(self):
if self.seq_num_responses < 2: return nmap1_seq.TS_SEQ_UNKNOWN
# Battle plan:
# 1) Compute average increments per second, and variance in incr. per second.
# 2) If any are 0, set to constant.
# 3) If variance is high, set to random incr. [ skip for now ]
# 4) if ~10/second, set to appropriate thing.
# 5) Same with ~100/s.
avg_freq = 0.0
for i in xrange(0, self.seq_num_responses - 1):
dhz = self.ts_diffs[i] / self.time_diffs[i]
avg_freq += dhz / (self.seq_num_responses - 1)
PyImpact.t_log(2, "The avg TCP TS HZ is: %f" % avg_freq)
if 0 < avg_freq and avg_freq < 3.9: return nmap1_seq.TS_SEQ_2HZ
if 85 < avg_freq and avg_freq < 115: return nmap1_seq.TS_SEQ_100HZ
if 900 < avg_freq and avg_freq < 1100: return nmap1_seq.TS_SEQ_1000HZ
return nmap1_seq.TS_SEQ_UNKNOWN
def seq_sequence(self):
self.seq_gcd = reduce(my_gcd, self.seq_diffs)
avg_incr = 0
seqclass = nmap1_seq.SEQ_UNKNOWN
if 0 != self.seq_gcd:
map(lambda x, gcd = self.seq_gcd: x / gcd, self.seq_diffs)
for i in xrange(0, self.seq_num_responses - 1):
if abs(self.seq_responses[i+1].get_seq() - self.seq_responses[i].get_seq()) > 50000000:
seqclass = nmap1_seq.SEQ_TR;
self.index = 9999999
break
avg_incr += self.seq_diffs[i]
if 0 == self.seq_gcd:
seqclass = nmap1_seq.SEQ_CONSTANT
self.index = 0
elif 0 == self.seq_gcd % 64000:
seqclass = nmap1_seq.SEQ_64K
self.index = 1
elif 0 == self.seq_gcd % 800:
seqclass = nmap1_seq.SEQ_i800
self.index = 10
elif nmap1_seq.SEQ_UNKNOWN == seqclass:
avg_incr = int(.5 + avg_incr / (self.seq_num_responses - 1))
sum_incr = 0.0
for i in range(0, self.seq_num_responses - 1):
d = abs(self.seq_diffs[i] - avg_incr)
sum_incr += float(d * d)
sum_incr /= self.seq_num_responses - 1
self.index = int(.5 + math.sqrt(sum_incr))
if self.index < 75:
seqclass = nmap1_seq.SEQ_TD
else:
seqclass = nmap1_seq.SEQ_RI
return seqclass
seqclasses = {
nmap1_seq.SEQ_64K: '64K',
nmap1_seq.SEQ_TD: 'TD',
nmap1_seq.SEQ_RI: 'RI',
nmap1_seq.SEQ_TR: 'TR',
nmap1_seq.SEQ_i800: 'i800',
nmap1_seq.SEQ_CONSTANT: 'C',
}
def add_seqclass(self, id):
self.add_result('CLASS', nmap1_seq_container.seqclasses[id])
if nmap1_seq.SEQ_CONSTANT == id:
self.add_result('VAL', '%i' % self.seq_responses[0].get_seq())
elif id in (nmap1_seq.SEQ_TD, nmap1_seq.SEQ_RI):
self.add_result('GCD', '%i' % self.seq_gcd)
self.add_result('SI', '%i' % self.index)
tsclasses = {
nmap1_seq.TS_SEQ_ZERO: '0',
nmap1_seq.TS_SEQ_2HZ: '2HZ',
nmap1_seq.TS_SEQ_100HZ: '100HZ',
nmap1_seq.TS_SEQ_1000HZ: '1000HZ',
nmap1_seq.TS_SEQ_UNSUPPORTED: 'U',
}
def add_tsclass(self, id):
self.add_result('TS', nmap1_seq_container.tsclasses[id])
ipidclasses = {
nmap1_seq.IPID_SEQ_INCR: 'I',
nmap1_seq.IPID_SEQ_BROKEN_INCR: 'BI',
nmap1_seq.IPID_SEQ_RPI: 'RPI',
nmap1_seq.IPID_SEQ_RD: 'RD',
nmap1_seq.IPID_SEQ_CONSTANT: 'C',
nmap1_seq.IPID_SEQ_ZERO: 'Z',
}
def add_ipidclass(self, id):
self.add_result('IPID', nmap1_seq_container.ipidclasses[id])
class nmap2_seq_container(os_id_test):
def __init__(self, num_seq_samples, responses, seq_diffs, ts_diffs, time_diffs):
os_id_test.__init__(self, 0)
self.num_seq_samples = num_seq_samples
self.seq_responses = responses
self.seq_num_responses = len(responses)
self.seq_diffs = seq_diffs
self.ts_diffs = ts_diffs
self.time_diffs = time_diffs
self.pre_ts_seqclass = nmap2_seq.TS_SEQ_UNKNOWN
def test_id(self):
return "SEQ"
def set_ts_seqclass(self, ts_seqclass):
self.pre_ts_seqclass = ts_seqclass
def process(self):
if self.seq_num_responses >= 4:
self.calc_ti()
self.calc_ts()
self.calc_sp()
else:
self.add_result('R', 'N')
PyImpact.t_log(1, "Insufficient responses for TCP sequencing (%d out of %d), OS detection may be less accurate."
% (self.seq_num_responses, self.num_seq_samples))
def get_final_result(self):
return {self.test_id(): self.get_result_dict()}
def calc_ti(self):
if self.seq_num_responses < 2:
return
ipidclasses = {
nmap2_seq.IPID_SEQ_INCR: 'I',
nmap2_seq.IPID_SEQ_BROKEN_INCR: 'BI',
nmap2_seq.IPID_SEQ_RPI: 'RI',
nmap2_seq.IPID_SEQ_RD: 'RD',
nmap2_seq.IPID_SEQ_CONSTANT: 'C',
nmap2_seq.IPID_SEQ_ZERO: 'Z',
}
ipid_diffs = array.array('H', [0] * (self.seq_num_responses - 1))
# Random and zero
null_ipids = 1
for i in xrange(1, self.seq_num_responses):
prev_ipid = self.seq_responses[i-1].get_ipid()
cur_ipid = self.seq_responses[i].get_ipid()
if prev_ipid != 0 or cur_ipid != 0:
null_ipids = 0
if prev_ipid <= cur_ipid:
ipid_diffs[i-1] = cur_ipid - prev_ipid
else:
ipid_diffs[i-1] = (cur_ipid - prev_ipid + 65536) & 0xffff
if self.seq_num_responses > 2 and ipid_diffs[i-1] > 20000:
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_RD])
return
if null_ipids:
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_ZERO])
return
# Constant
all_zero = 1
for i in xrange(0, self.seq_num_responses - 1):
if ipid_diffs[i] != 0:
all_zero = 0
break
if all_zero:
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_CONSTANT])
return
# Random positive increments
for i in xrange(0, self.seq_num_responses - 1):
if ipid_diffs[i] > 1000 and \
((ipid_diffs[i] % 256 != 0) or \
((ipid_diffs[i] % 256 == 0) and (ipid_diffs[i] >= 25600))):
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_RPI])
return
# Broken Increment and Incremental
is_incremental = 1 # All diferences are less than 10
is_ms = 1 # All diferences are multiples of 256 and no greater than 5120
for i in xrange(0, self.seq_num_responses - 1):
if is_ms and ((ipid_diffs[i] > 5120) or (ipid_diffs[i] % 256) != 0):
is_ms = 0
if is_incremental and ipid_diffs[i] > 9:
is_incremental = 0
if is_ms:
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_BROKEN_INCR])
elif is_incremental:
self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_INCR])
def calc_ts(self):
# 1. If any of the responses have no timestamp option, TS
# is set to U (unsupported).
# 2. If any of the timestamp values are zero, TS is set to 0.
# 3. If the average increments per second falls within the
# ranges 0-5.66, 70-150, or 150-350, TS is set to 1, 7, or 8,
# respectively. These three ranges get special treatment
# because they correspond to the 2 Hz, 100 Hz, and 200 Hz
# frequencies used by many hosts.
# 4. In all other cases, Nmap records the binary logarithm of
# the average increments per second, rounded to the nearest
# integer. Since most hosts use 1,000 Hz frequencies, A is
# a common result.
if self.pre_ts_seqclass == nmap2_seq.TS_SEQ_ZERO:
self.add_result('TS', '0')
elif self.pre_ts_seqclass == nmap2_seq.TS_SEQ_UNSUPPORTED:
self.add_result('TS', 'U')
elif self.seq_num_responses < 2:
return
avg_freq = 0.0
for i in xrange(0, self.seq_num_responses - 1):
dhz = self.ts_diffs[i] / self.time_diffs[i]
avg_freq += dhz / (self.seq_num_responses - 1)
PyImpact.t_log(2, "The avg TCP TS HZ is: %f" % avg_freq)
if avg_freq <= 5.66:
self.add_result('TS', "1")
elif 70 < avg_freq and avg_freq <= 150:
self.add_result('TS', "7")
elif 150 < avg_freq and avg_freq <= 350:
self.add_result('TS', "8")
else:
ts = int(round(.5 + math.log(avg_freq)/math.log(2)))
self.add_result('TS', "%X" % ts)
def calc_sp(self):
seq_gcd = reduce(my_gcd, self.seq_diffs)
seq_avg_rate = 0.0
for i in xrange(0, self.seq_num_responses - 1):
seq_avg_rate += self.seq_diffs[i] / self.time_diffs[i]
seq_avg_rate /= (self.seq_num_responses - 1)
seq_rate = seq_avg_rate
si_index = 0
seq_stddev = 0
if 0 == seq_gcd:
seq_rate = 0
else:
seq_rate = int(round(.5 + (math.log(seq_rate) / math.log(2)) * 8))
div_gcd = 1
if seq_gcd > 9:
div_gcd = seq_gcd
for i in xrange(0, self.seq_num_responses - 1):
rtmp = (self.seq_diffs[i] / self.time_diffs[i]) / div_gcd - \
seq_avg_rate / div_gcd
seq_stddev += rtmp * rtmp
seq_stddev /= self.seq_num_responses - 2
seq_stddev = math.sqrt(seq_stddev)
if seq_stddev <= 1:
si_index = 0
else:
si_index = int(round(.5 + (math.log(seq_stddev) / math.log(2)) * 8.0))
self.add_result('SP', "%X" % si_index)
self.add_result('GCD', "%X" % seq_gcd)
self.add_result('ISR', "%X" % seq_rate)
class nmap2_ops_container(os_id_test):
def __init__(self, responses):
os_id_test.__init__(self, 0)
self.seq_responses = responses
self.seq_num_responses = len(responses)
def test_id(self):
return "OPS"
def process(self):
if self.seq_num_responses != 6:
self.add_result('R', 'N')
return
for i in xrange(0, self.seq_num_responses):
tests = nmap2_tcp_tests(self.seq_responses[i].get_ip(),
self.seq_responses[i].get_tcp(),
0,
0)
self.add_result("O%i" % (i+1), tests.get_options())
def get_final_result(self):
if not self.get_result_dict():
return None
else:
return {self.test_id(): self.get_result_dict()}
class nmap2_win_container(os_id_test):
def __init__(self, responses):
os_id_test.__init__(self, 0)
self.seq_responses = responses
self.seq_num_responses = len(responses)
def test_id(self):
return "WIN"
def process(self):
if self.seq_num_responses != 6:
self.add_result('R', 'N')
return
for i in xrange(0, self.seq_num_responses):
tests = nmap2_tcp_tests(self.seq_responses[i].get_ip(),
self.seq_responses[i].get_tcp(),
0,
0)
self.add_result("W%i" % (i+1), tests.get_win())
def get_final_result(self):
if not self.get_result_dict():
return None
else:
return {self.test_id(): self.get_result_dict()}
class nmap2_t1_container(os_id_test):
def __init__(self, responses, seq_base):
os_id_test.__init__(self, 0)
self.seq_responses = responses
self.seq_num_responses = len(responses)
self.seq_base = seq_base
def test_id(self):
return "T1"
def process(self):
# R, DF, T*, TG*, W-, S, A, F, O-, RD*, Q
if self.seq_num_responses < 1:
self.add_result("R","N")
return
response = self.seq_responses[0]
tests = nmap2_tcp_tests(response.get_ip(),
response.get_tcp(),
self.seq_base,
nmap2_tcp_probe.acknowledgment)
self.add_result("R", "Y")
self.add_result("DF", tests.get_df())
self.add_result("S", tests.get_seq())
self.add_result("A", tests.get_ack())
self.add_result("F", tests.get_flags())
self.add_result("Q", tests.get_quirks())
def get_final_result(self):
if not self.get_result_dict():
return None
else:
return {self.test_id(): self.get_result_dict()}
class nmap2_icmp_container(os_id_test):
def __init__(self, responses):
os_id_test.__init__(self, 0)
self.icmp_responses = responses
self.icmp_num_responses = len(responses)
def test_id(self):
return "IE"
def process(self):
# R, DFI, T*, TG*, TOSI, CD, SI, DLI*
if self.icmp_num_responses != 2:
self.add_result("R","N")
return
ip1 = self.icmp_responses[0].child()
ip2 = self.icmp_responses[1].child()
icmp1 = ip1.child()
icmp2 = ip2.child()
self.add_result("R", "Y")
# Value Description
# N Neither of the ping responses have the DF bit set.
# S Both responses echo the DF value of the probe.
# Y Both of the response DF bits are set.
# O The one remaining other combination-both responses have the DF bit toggled.
if not ip1.get_ip_df() and not ip2.get_ip_df():
self.add_result("DFI","N")
elif ip1.get_ip_df() and not ip2.get_ip_df():
self.add_result("DFI","S")
elif ip1.get_ip_df() and ip2.get_ip_df():
self.add_result("DFI","Y")
else:
self.add_result("DFI","O")
# Value Description
# Z Both TOS values are zero.
# S Both TOS values are each the same as in the corresponding probe.
# <NN> When they both use the same non-zero number, it is recorded here.
# O Any other combination.
if ip1.get_ip_tos() == 0 and ip2.get_ip_tos() == 0:
self.add_result("TOSI","Z")
elif ip1.get_ip_tos() == 0 and ip2.get_ip_tos() == 4:
self.add_result("TOSI","S")
elif ip1.get_ip_tos() == ip2.get_ip_tos():
self.add_result("TOSI","%X" % ip1.get_ip_tos())
else:
self.add_result("TOSI","O")
# Value Description
# Z Both code values are zero.
# S Both code values are the same as in the corresponding probe.
# <NN> When they both use the same non-zero number, it is shown here.
# O Any other combination.
if icmp1.get_icmp_code() == 0 and icmp2.get_icmp_code() == 0:
self.add_result("CD","Z")
elif icmp1.get_icmp_code() == 9 and icmp2.get_icmp_code() == 0:
self.add_result("CD","S")
elif icmp1.get_icmp_code() == icmp2.get_icmp_code():
self.add_result("CD","%X" % icmp1.get_icmp_code())
else:
self.add_result("CD","O")
# Value Description
# Z Both sequence numbers are set to 0.
# S Both sequence numbers echo the ones from the probes.
# <NNNN> When they both use the same non-zero number, it is recorded here.
# O Any other combination.
if icmp1.get_icmp_seq() == 0 and icmp2.get_icmp_seq() == 0:
self.add_result("SI","Z")
elif (icmp1.get_icmp_seq() == nmap2_icmp_echo_probe_1.sequence_number and
icmp2.get_icmp_seq() == nmap2_icmp_echo_probe_1.sequence_number + 1):
self.add_result("SI","S")
elif icmp1.get_icmp_seq() == icmp2.get_icmp_seq():
self.add_result("SI","%X" % icmp1.get_icmp_code())
else:
self.add_result("SI","O")
def get_final_result(self):
if not self.get_result_dict():
return None
else:
return {self.test_id(): self.get_result_dict()}
class nmap1_tcp_closed_1(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_SYN()
def test_id(self):
return "T5"
def is_mine(self, packet):
if tcp_probe.is_mine(self, packet):
ip = packet.child()
if not ip:
return 0
tcp = ip.child()
if not tcp:
return 0
if tcp.get_RST():
return 1
else:
return 0
else:
return 0
class nmap2_tcp_closed_1(nmap2_tcp_probe_2_6):
# ...
# T5 sends a TCP SYN packet without IP DF and a window field of
# 31337 to a closed port
# ...
def __init__(self, id, addresses, tcp_ports):
nmap2_tcp_probe_2_6.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_SYN()
self.i.set_ip_df(0)
self.t.set_th_win(31337)
def test_id(self):
return "T5"
class nmap1_tcp_closed_2(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_ACK()
def test_id(self):
return "T6"
class nmap2_tcp_closed_2(nmap2_tcp_probe_2_6):
# ...
# T6 sends a TCP ACK packet with IP DF and a window field of
# 32768 to a closed port.
# ...
def __init__(self, id, addresses, tcp_ports):
nmap2_tcp_probe_2_6.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_ACK()
self.i.set_ip_df(1)
self.t.set_th_win(32768)
def test_id(self):
return "T6"
class nmap1_tcp_closed_3(nmap1_tcp_probe):
def __init__(self, id, addresses, tcp_ports):
nmap1_tcp_probe.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_FIN()
self.t.set_URG()
self.t.set_PSH()
def test_id(self):
return "T7"
class nmap2_tcp_closed_3(nmap2_tcp_probe_7):
# ...
# T7 sends a TCP packet with the FIN, PSH, and URG flags set and a
# window field of 65535 to a closed port. The IP DF bit is not set.
# ...
def __init__(self, id, addresses, tcp_ports):
nmap2_tcp_probe_7.__init__(self, id, addresses, tcp_ports, 0)
self.t.set_FIN()
self.t.set_URG()
self.t.set_PSH()
self.t.set_th_win(65535)
self.i.set_ip_df(0)
def test_id(self):
return "T7"
class NMAP2_OS_Class:
def __init__(self, vendor, name, family, device_type):
self.__vendor = vendor
self.__name = name
self.__family = family
self.__device_type = device_type
def get_vendor(self): return self.__vendor
def get_name(self): return self.__name
def get_family(self): return self.__family
def get_device_type(self): return self.__device_type
class NMAP2_Fingerprint:
def __init__(self, id, os_class, tests):
self.__id = id
self.__os_class = os_class
self.__tests = tests
def get_id(self): return self.__id
def get_os_class(self): return self.__os_class
def get_tests(self): return self.__tests
def __str__(self):
ret = "FP: [%s]" % self.__id
ret += "\n vendor: %s" % self.__os_class.get_vendor()
ret += "\n name: %s" % self.__os_class.get_name()
ret += "\n family: %s" % self.__os_class.get_family()
ret += "\n device_type: %s" % self.__os_class.get_device_type()
for test in self.__tests:
ret += "\n test: %s" % test
for pair in self.__tests[test]:
ret += "\n %s = [%s]" % (pair, self.__tests[test][pair])
return ret
literal_conv = { "RIPL" : { "G" : 0x148 },
"RID" : { "G" : 0x1042 },
"RUL" : { "G" : 0x134 } }
def parse_int(self, field, value):
try:
return int(value, 16)
except ValueError, err:
if NMAP2_Fingerprint.literal_conv.has_key( field ):
if NMAP2_Fingerprint.literal_conv[field].has_key(value):
return NMAP2_Fingerprint.literal_conv[field][value]
return 0
def match(self, field, ref, value):
options = ref.split("|")
for option in options:
if option.startswith(">"):
if self.parse_int(field, value) > \
self.parse_int(field, option[1:]):
return True
elif option.startswith("<"):
if self.parse_int(field, value) < \
self.parse_int(field, option[1:]):
return True
elif option.find("-") > -1:
range = option.split("-")
if (self.parse_int(field, value) >= \
self.parse_int(field, range[0]) and \
self.parse_int(field, value) <= \
self.parse_int(field, range[1])):
return True
else:
if str(value) == str(option):
return True
return False
def compare(self, sample, mp):
max_points = 0
total_points = 0
for test in self.__tests:
# ignore unknown response lines:
if not sample.has_key(test):
continue
for field in self.__tests[test]:
# ignore unsupported fields:
if not sample[test].has_key(field) or \
not mp.has_key(test) or \
not mp[test].has_key(field):
continue
ref = self.__tests[test][field]
value = sample[test][field]
points = int(mp[test][field])
max_points += points
if self.match(field, ref, value):
total_points += points
return (total_points / float(max_points)) * 100
class NMAP2_Fingerprint_Matcher:
def __init__(self, filename):
self.__filename = filename
def find_matches(self, res, threshold):
output = []
try:
infile = open(self.__filename,"r")
mp = self.parse_mp(self.matchpoints(infile))
for fingerprint in self.fingerprints(infile):
fp = self.parse_fp(fingerprint)
similarity = fp.compare(res, mp)
if similarity >= threshold:
print "\"%s\" matches with an accuracy of %.2f%%" \
% (fp.get_id(), similarity)
output.append((similarity / 100,
fp.get_id(),
(fp.get_os_class().get_vendor(),
fp.get_os_class().get_name(),
fp.get_os_class().get_family(),
fp.get_os_class().get_device_type())))
infile.close()
except IOError, err:
print "IOError: %s", err
return output
def sections(self, infile, token):
OUT = 0
IN = 1
state = OUT
output = []
for line in infile:
line = line.strip()
if state == OUT:
if line.startswith(token):
state = IN
output = [line]
elif state == IN:
if line:
output.append(line)
else:
state = OUT
yield output
output = []
if output:
yield output
def fingerprints(self, infile):
for section in self.sections(infile,"Fingerprint"):
yield section
def matchpoints(self, infile):
return self.sections(infile,"MatchPoints").next()
def parse_line(self, line):
name = line[:line.find("(")]
pairs = line[line.find("(") + 1 : line.find(")")]
test = {}
for pair in pairs.split("%"):
pair = pair.split("=")
test[pair[0]] = pair[1]
return (name, test)
def parse_fp(self, fp):
tests = {}
for line in fp:
if line.startswith("#"):
continue
elif line.startswith("Fingerprint"):
fingerprint = line[len("Fingerprint") + 1:]
elif line.startswith("Class"):
(vendor,
name,
family,
device_type) = line[len("Class") + 1:].split("|")
os_class = NMAP2_OS_Class(vendor.strip(),
name.strip(),
family.strip(),
device_type.strip())
else:
test = self.parse_line(line)
tests[test[0]] = test[1]
return NMAP2_Fingerprint(fingerprint, os_class, tests)
def parse_mp(self, fp):
tests = {}
for line in fp:
if line.startswith("#"):
continue
elif line.startswith("MatchPoints"):
continue
else:
test = self.parse_line(line)
tests[test[0]] = test[1]
return tests
| apache-2.0 |
NL66278/odoo | addons/sale/report/invoice_report.py | 336 | 1680 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_inherit = 'account.invoice.report'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_depends = {
'account.invoice': ['section_id'],
}
def _select(self):
return super(account_invoice_report, self)._select() + ", sub.section_id as section_id"
def _sub_select(self):
return super(account_invoice_report, self)._sub_select() + ", ai.section_id as section_id"
def _group_by(self):
return super(account_invoice_report, self)._group_by() + ", ai.section_id"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kevin8909/xjerp | openerp/addons/account/wizard/account_report_general_ledger.py | 56 | 3202 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape']:
return { 'type': 'ir.actions.report.xml', 'report_name': 'account.general.ledger_landscape', 'datas': data}
return { 'type': 'ir.actions.report.xml', 'report_name': 'account.general.ledger', 'datas': data}
account_report_general_ledger()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/constants.py | 1 | 5094 | """OpenGL-wide constant types (not OpenGL.GL-specific)
These are basically the fundamental data-types that OpenGL
uses (note, doesn't include the OpenGL-ES types!)
"""
import ctypes
from OpenGL.constant import Constant
GL_FALSE = Constant( 'GL_FALSE', 0x0 )
GL_TRUE = Constant( 'GL_TRUE', 0x1 )
GL_BYTE = Constant( 'GL_BYTE', 0x1400 )
GL_UNSIGNED_BYTE = Constant( 'GL_UNSIGNED_BYTE', 0x1401 )
GL_SHORT = Constant( 'GL_SHORT', 0x1402 )
GL_UNSIGNED_SHORT = Constant( 'GL_UNSIGNED_SHORT', 0x1403 )
GL_INT = Constant( 'GL_INT', 0x1404 )
GL_UNSIGNED_INT = Constant( 'GL_UNSIGNED_INT', 0x1405 )
GL_UNSIGNED_INT64 = Constant( 'GL_UNSIGNED_INT64_AMD', 0x8BC2 )
GL_FLOAT = Constant( 'GL_FLOAT', 0x1406 )
GL_DOUBLE = Constant( 'GL_DOUBLE', 0x140a )
GL_CHAR = str
GL_HALF_NV = Constant( 'GL_HALF_NV', 0x1401 )
GL_VOID_P = object()
ctypes_version = [int(i) for i in ctypes.__version__.split('.')[:3]]
# Basic OpenGL data-types as ctypes declarations...
def _defineType( name, baseType, convertFunc = long ):
import OpenGL
do_wrapping = (
OpenGL.ALLOW_NUMPY_SCALARS or # explicitly require
(( # or we are using Python 2.5.x ctypes which doesn't support uint type numpy scalars
ctypes_version < [1,1,0]
and baseType in (ctypes.c_uint,ctypes.c_uint64,ctypes.c_ulong,ctypes.c_ushort)
) or
( # or we are using Python 2.5.x (x < 2) ctypes which doesn't support any numpy int scalars
ctypes_version < [1,0,2]
and baseType in (ctypes.c_int,ctypes.c_int64,ctypes.c_long,ctypes.c_short)
))
)
if do_wrapping:
original = baseType.from_param
if not getattr( original, 'from_param_numpy_scalar', False ):
def from_param( x, typeCode=None ):
try:
return original( x )
except TypeError, err:
try:
return original( convertFunc(x) )
except TypeError, err2:
raise err
from_param = staticmethod( from_param )
setattr( baseType, 'from_param', from_param )
baseType.from_param_numpy_scalar = True
return baseType
else:
return baseType
GLvoid = None
GLboolean = _defineType( 'GLboolean', ctypes.c_ubyte, bool )
GLenum = _defineType( 'GLenum', ctypes.c_uint )
GLfloat = _defineType( 'GLfloat', ctypes.c_float, float )
GLfloat_2 = GLfloat * 2
GLfloat_3 = GLfloat * 3
GLfloat_4 = GLfloat * 4
GLdouble = _defineType( 'GLdouble', ctypes.c_double, float )
GLdouble_2 = GLdouble * 2
GLdouble_3 = GLdouble * 3
GLdouble_4 = GLdouble * 4
GLbyte = ctypes.c_byte
GLshort = _defineType( 'GLshort', ctypes.c_short, int )
GLint = _defineType( 'GLint', ctypes.c_int, int )
GLuint = _defineType( 'GLuint', ctypes.c_uint, long )
GLsizei = _defineType( 'GLsizei', ctypes.c_int, int )
GLubyte = ctypes.c_ubyte
GLubyte_3 = GLubyte * 3
GLushort = _defineType( 'GLushort', ctypes.c_ushort, int )
GLhandleARB = _defineType( 'GLhandleARB', ctypes.c_uint, long )
GLhandle = _defineType( 'GLhandle', ctypes.c_uint, long )
GLchar = GLcharARB = ctypes.c_char
GLbitfield = _defineType( 'GLbitfield', ctypes.c_uint, long )
GLclampd = _defineType( 'GLclampd', ctypes.c_double, float )
GLclampf = _defineType( 'GLclampf', ctypes.c_float, float )
GLuint64 = GLuint64EXT = _defineType('GLuint64', ctypes.c_uint64, long )
GLint64 = GLint64EXT = _defineType('GLint64', ctypes.c_int64, long )
# ptrdiff_t, actually...
GLsizeiptrARB = GLsizeiptr = GLsizei
GLvdpauSurfaceNV = GLintptrARB = GLintptr = GLint
size_t = ctypes.c_ulong
void = None
GLhalfNV = GLhalfARB = ctypes.c_ushort
# GL.ARB.sync extension, GLsync is an opaque pointer to a struct
# in the extensions header, basically just a "token" that can be
# passed to the various operations...
class _GLsync( ctypes.Structure ):
"""Opaque structure definition to fool ctypes into treating us as a real structure"""
GLsync = ctypes.POINTER( _GLsync ) # ctypes.c_void_p does *not* work as a return type...
GLvoidp = ctypes.c_void_p
ARRAY_TYPE_TO_CONSTANT = [
('GLclampd', GL_DOUBLE),
('GLclampf', GL_FLOAT),
('GLfloat', GL_FLOAT),
('GLdouble', GL_DOUBLE),
('GLbyte', GL_BYTE),
('GLshort', GL_SHORT),
('GLint', GL_INT),
('GLubyte', GL_UNSIGNED_BYTE),
('GLushort', GL_UNSIGNED_SHORT),
('GLuint', GL_UNSIGNED_INT),
('GLenum', GL_UNSIGNED_INT),
]
from OpenGL.platform import PLATFORM
_FUNCTION_TYPE = PLATFORM.functionTypeFor(PLATFORM.GL)
GLDEBUGPROCARB = _FUNCTION_TYPE(
void,
GLenum, # source,
GLenum, #type,
GLuint, # id
GLenum, # severity
GLsizei, # length
ctypes.c_char_p, # message
GLvoidp, # userParam
)
class _cl_context( ctypes.Structure ):
"""Placeholder/empty structure for _cl_context"""
class _cl_event( ctypes.Structure ):
"""Placeholder/empty structure for _cl_event"""
GLDEBUGPROCAMD = _FUNCTION_TYPE(
void,
GLuint,# id,
GLenum,# category,
GLenum,# severity,
GLsizei,# length,
ctypes.c_char_p,# message,
GLvoidp,# userParam
)
| mit |
just4jin/3D_Scanner | Python/Step6 GUI/HHSL3DScanner.fish/camlcoloc.py | 3 | 1563 | grayimg=np.zeros((vertlino, horzlino), dtype=np.int16)
rightcamcode=np.zeros((vertlino, horzlino,2), dtype=np.int16)
##=======================================================
#Horizontal gray code
for ii in range(3,22,2):
xx=ii-3
xx=xx//2
filename1 = img_names[ii]
filename2 = img_names[ii-1]
ff=imgdesig(filename1,filename2)
print 'processing %s...' % filename1, (2**xx)
grayimg=grayimg+(2**xx)*ff
imgbin3=np.zeros((vertlino, horzlino,3), dtype=np.uint8)
for ii in range(0, horzlino):
for jj in range(0, vertlino):
rightcamcode[jj][ii][0]=grayimg[jj][ii]
imgbin3[jj][ii][1]= grayimg[jj][ii]%256
imgbin3[jj][ii][2]= 40*grayimg[jj][ii]//256
imgbin3[jj][ii][0]= 4
img1=(grayimg%255)
cv2.imshow("PWindow2",imgbin3)
cv2.waitKey(100)
##=======================================================
#Vertical gray code
img1=cv2.imread(img_names[0],cv2.IMREAD_GRAYSCALE)
grayimg=(img1*0)+1023
grayimg=grayimg*0
for ii in range(23,42,2):
xx=ii-22
xx=xx//2
filename1 = img_names[ii]
filename2 = img_names[ii-1]
ff=imgdesig(filename1,filename2)
print 'processing %s...' % filename1, (2**xx)
grayimg=grayimg+(2**xx)*ff
for ii in range(0, horzlino):
for jj in range(0, vertlino):
rightcamcode[jj][ii][1]=grayimg[jj][ii]
imgbin3[jj][ii][0]= (imgbin3[jj][ii][0]+grayimg[jj][ii]%256)%256
imgbin3[jj][ii][2]= 40*(imgbin3[jj][ii][2]+grayimg[jj][ii]%256)//80
imgbin3[jj][ii][1]= 4
img1=(grayimg%255)
cv2.imshow("PWindow2",imgbin3)
cv2.waitKey(2000)
| gpl-3.0 |
ehocchen/trading-with-python | lib/classes.py | 76 | 7847 | """
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
| bsd-3-clause |
redhat-openstack/neutron | neutron/tests/unit/midonet/test_midonet_lib.py | 5 | 7151 | # Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
import testtools
import webob.exc as w_exc
from neutron.openstack.common import uuidutils
with mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()}):
from neutron.plugins.midonet import midonet_lib
import neutron.tests.unit.midonet.mock_lib as mock_lib
def _create_test_chain(id, name, tenant_id):
return {'id': id, 'name': name, 'tenant_id': tenant_id}
def _create_test_port_group(id, name, tenant_id):
return {"id": id, "name": name, "tenant_id": tenant_id}
class MidoClientTestCase(testtools.TestCase):
def setUp(self):
super(MidoClientTestCase, self).setUp()
self._tenant_id = 'test-tenant'
self.mock_api = mock.Mock()
self.mock_api_cfg = mock_lib.MidoClientMockConfig(self.mock_api)
self.mock_api_cfg.setup()
self.client = midonet_lib.MidoClient(self.mock_api)
def test_delete_chains_by_names(self):
tenant_id = uuidutils.generate_uuid()
chain1_id = uuidutils.generate_uuid()
chain1 = _create_test_chain(chain1_id, "chain1", tenant_id)
chain2_id = uuidutils.generate_uuid()
chain2 = _create_test_chain(chain2_id, "chain2", tenant_id)
calls = [mock.call.delete_chain(chain1_id),
mock.call.delete_chain(chain2_id)]
self.mock_api_cfg.chains_in = [chain2, chain1]
self.client.delete_chains_by_names(tenant_id, ["chain1", "chain2"])
self.mock_api.assert_has_calls(calls, any_order=True)
def test_delete_port_group_by_name(self):
tenant_id = uuidutils.generate_uuid()
pg1_id = uuidutils.generate_uuid()
pg1 = _create_test_port_group(pg1_id, "pg1", tenant_id)
pg2_id = uuidutils.generate_uuid()
pg2 = _create_test_port_group(pg2_id, "pg2", tenant_id)
self.mock_api_cfg.port_groups_in = [pg1, pg2]
self.client.delete_port_group_by_name(tenant_id, "pg1")
self.mock_api.delete_port_group.assert_called_once_with(pg1_id)
def test_create_dhcp(self):
bridge = mock.Mock()
gateway_ip = "192.168.1.1"
cidr = "192.168.1.0/24"
host_rts = [{'destination': '10.0.0.0/24', 'nexthop': '10.0.0.1'},
{'destination': '10.0.1.0/24', 'nexthop': '10.0.1.1'}]
dns_servers = ["8.8.8.8", "8.8.4.4"]
dhcp_call = mock.call.add_bridge_dhcp(bridge, gateway_ip, cidr,
host_rts=host_rts,
dns_nservers=dns_servers)
self.client.create_dhcp(bridge, gateway_ip, cidr, host_rts=host_rts,
dns_servers=dns_servers)
self.mock_api.assert_has_calls([dhcp_call])
def test_delete_dhcp(self):
bridge = mock.Mock()
subnet = mock.Mock()
subnet.get_subnet_prefix.return_value = "10.0.0.0"
subnets = mock.MagicMock(return_value=[subnet])
bridge.get_dhcp_subnets.side_effect = subnets
self.client.delete_dhcp(bridge, "10.0.0.0/24")
bridge.assert_has_calls(mock.call.get_dhcp_subnets)
subnet.assert_has_calls([mock.call.get_subnet_prefix(),
mock.call.delete()])
def test_add_dhcp_host(self):
bridge = mock.Mock()
dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24")
ip_addr_call = dhcp_subnet_call.add_dhcp_host().ip_addr("10.0.0.10")
mac_addr_call = ip_addr_call.mac_addr("2A:DB:6B:8C:19:99")
calls = [dhcp_subnet_call, ip_addr_call, mac_addr_call,
mac_addr_call.create()]
self.client.add_dhcp_host(bridge, "10.0.0.0/24", "10.0.0.10",
"2A:DB:6B:8C:19:99")
bridge.assert_has_calls(calls, any_order=True)
def test_add_dhcp_route_option(self):
bridge = mock.Mock()
subnet = bridge.get_dhcp_subnet.return_value
subnet.get_opt121_routes.return_value = None
dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24")
dst_ip = "10.0.0.3/24"
gw_ip = "10.0.0.1"
prefix, length = dst_ip.split("/")
routes = [{'destinationPrefix': prefix, 'destinationLength': length,
'gatewayAddr': gw_ip}]
opt121_routes_call = dhcp_subnet_call.opt121_routes(routes)
calls = [dhcp_subnet_call, opt121_routes_call,
opt121_routes_call.update()]
self.client.add_dhcp_route_option(bridge, "10.0.0.0/24",
gw_ip, dst_ip)
bridge.assert_has_calls(calls, any_order=True)
def test_get_router_error(self):
self.mock_api.get_router.side_effect = w_exc.HTTPInternalServerError()
self.assertRaises(midonet_lib.MidonetApiException,
self.client.get_router, uuidutils.generate_uuid())
def test_get_router_not_found(self):
self.mock_api.get_router.side_effect = w_exc.HTTPNotFound()
self.assertRaises(midonet_lib.MidonetResourceNotFound,
self.client.get_router, uuidutils.generate_uuid())
def test_get_bridge_error(self):
self.mock_api.get_bridge.side_effect = w_exc.HTTPInternalServerError()
self.assertRaises(midonet_lib.MidonetApiException,
self.client.get_bridge, uuidutils.generate_uuid())
def test_get_bridge_not_found(self):
self.mock_api.get_bridge.side_effect = w_exc.HTTPNotFound()
self.assertRaises(midonet_lib.MidonetResourceNotFound,
self.client.get_bridge, uuidutils.generate_uuid())
def test_get_bridge(self):
bridge_id = uuidutils.generate_uuid()
bridge = self.client.get_bridge(bridge_id)
self.assertIsNotNone(bridge)
self.assertEqual(bridge.get_id(), bridge_id)
self.assertTrue(bridge.get_admin_state_up())
def test_add_bridge_port(self):
bridge_id = uuidutils.generate_uuid()
bridge = self.client.get_bridge(bridge_id)
self.assertIsNotNone(bridge)
port = self.client.add_bridge_port(bridge)
self.assertEqual(bridge.get_id(), port.get_bridge_id())
self.assertTrue(port.get_admin_state_up())
def test_get_router(self):
router_id = uuidutils.generate_uuid()
router = self.client.get_router(router_id)
self.assertIsNotNone(router)
self.assertEqual(router.get_id(), router_id)
self.assertTrue(router.get_admin_state_up())
| apache-2.0 |
mccheung/kbengine | kbe/res/scripts/common/Lib/trace.py | 85 | 31475 | #!/usr/bin/env python3
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
__all__ = ['Trace', 'CoverageResults']
import linecache
import os
import re
import sys
import token
import tokenize
import inspect
import gc
import dis
import pickle
from warnings import warn as _warn
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def _usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
self._dirs = [] if not dirs else [os.path.normpath(d)
for d in dirs]
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list.
if modulename in self._mods: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
for mod in self._mods:
# Need to take some care since ignoring
# "cmp" mustn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
if modulename.startswith(mod + '.'):
self._ignore[modulename] = 1
return 1
# Now check that filename isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def _modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def _fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
def is_ignored_filename(self, filename):
"""Return True if the filename does not refer to a file
we want to have reported.
"""
return filename.startswith('<') and filename.endswith('>')
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts:
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs:
calledfuncs[key] = 1
for key in other_callers:
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print()
print("functions called:")
calls = self.calledfuncs
for filename, modulename, funcname in sorted(calls):
print(("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname)))
if self.callers:
print()
print("calling relationships:")
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
in sorted(self.callers):
if pfile != lastfile:
print()
print("***", pfile, "***")
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print(" -->", cfile)
lastcfile = cfile
print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts:
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.items():
if self.is_ignored_filename(filename):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = _modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = _fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count, encoding)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
print("lines cov% module (path)")
for m in sorted(sums):
n_lines, percent, modulename, filename = sums[m]
print("%5d %3d%% %s (%s)" % sums[m])
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w", encoding=encoding)
except OSError as err:
print(("trace: Could not open %r for writing: %s"
"- skipping" % (path, err)), file=sys.stderr)
return 0, 0
n_lines = 0
n_hits = 0
for lineno, line in enumerate(lines, 1):
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in line:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(line.expandtabs(8))
outfile.close()
return n_hits, n_lines
def _find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def _find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = _find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(_find_lines(c, strs))
return linenos
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
def _find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
with tokenize.open(filename) as f:
prog = f.read()
encoding = f.encoding
except OSError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)
return {}
code = compile(prog, filename, "exec")
strs = _find_strings(filename, encoding)
return _find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = _Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = _time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec(cmd, globals, locals)
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX _modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = _modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print((" --- modulename: %s, funcname: %s"
% (modulename, code.co_name)))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error as msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
_usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.base_prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.base_exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except OSError as err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
# Deprecated API
def usage(outfile):
_warn("The trace.usage() function is deprecated",
DeprecationWarning, 2)
_usage(outfile)
class Ignore(_Ignore):
def __init__(self, modules=None, dirs=None):
_warn("The class trace.Ignore is deprecated",
DeprecationWarning, 2)
_Ignore.__init__(self, modules, dirs)
def modname(path):
_warn("The trace.modname() function is deprecated",
DeprecationWarning, 2)
return _modname(path)
def fullmodname(path):
_warn("The trace.fullmodname() function is deprecated",
DeprecationWarning, 2)
return _fullmodname(path)
def find_lines_from_code(code, strs):
_warn("The trace.find_lines_from_code() function is deprecated",
DeprecationWarning, 2)
return _find_lines_from_code(code, strs)
def find_lines(code, strs):
_warn("The trace.find_lines() function is deprecated",
DeprecationWarning, 2)
return _find_lines(code, strs)
def find_strings(filename, encoding=None):
_warn("The trace.find_strings() function is deprecated",
DeprecationWarning, 2)
return _find_strings(filename, encoding=None)
def find_executable_linenos(filename):
_warn("The trace.find_executable_linenos() function is deprecated",
DeprecationWarning, 2)
return _find_executable_linenos(filename)
if __name__=='__main__':
main()
| lgpl-3.0 |
GenericStudent/home-assistant | tests/components/uvc/test_camera.py | 13 | 13925 | """The tests for UVC camera module."""
import socket
import unittest
from unittest import mock
import pytest
import requests
from uvcclient import camera, nvr
from homeassistant.components.camera import SUPPORT_STREAM
from homeassistant.components.uvc import camera as uvc
from homeassistant.exceptions import PlatformNotReady
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestUVCSetup(unittest.TestCase):
"""Test the UVC camera platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_full_config(self, mock_uvc, mock_remote):
"""Test the setup with full configuration."""
config = {
"platform": "uvc",
"nvr": "foo",
"password": "bar",
"port": 123,
"key": "secret",
}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
{"uuid": "three", "name": "Old AirCam", "id": "id3"},
]
def mock_get_camera(uuid):
"""Create a mock camera."""
if uuid == "id3":
return {"model": "airCam"}
return {"model": "UVC"}
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.side_effect = mock_get_camera
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 123, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "bar"),
mock.call(mock_remote.return_value, "id2", "Back", "bar"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config(self, mock_uvc, mock_remote):
"""Test the setup with partial configuration."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "ubnt"),
mock.call(mock_remote.return_value, "id2", "Back", "ubnt"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config_v31x(self, mock_uvc, mock_remote):
"""Test the setup with a v3.1.x server."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 1, 3)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "one", "Front", "ubnt"),
mock.call(mock_remote.return_value, "two", "Back", "ubnt"),
]
)
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_incomplete_config(self, mock_uvc):
"""Test the setup with incomplete configuration."""
assert setup_component(self.hass, "camera", {"platform": "uvc", "nvr": "foo"})
self.hass.block_till_done()
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "key": "secret"}
)
self.hass.block_till_done()
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "port": "invalid"}
)
self.hass.block_till_done()
assert not mock_uvc.called
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote")
def setup_nvr_errors_during_indexing(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during indexing."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value.index.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert not mock_uvc.called
def test_setup_nvr_error_during_indexing_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_indexing(nvr.NotAuthorized)
def test_setup_nvr_error_during_indexing_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_indexing(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_indexing_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_indexing(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote.__init__")
def setup_nvr_errors_during_initialization(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during initialization."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value = None
mock_remote.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert not mock_remote.index.called
assert not mock_uvc.called
def test_setup_nvr_error_during_initialization_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_initialization(nvr.NotAuthorized)
def test_setup_nvr_error_during_initialization_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_initialization(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_initialization_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_initialization(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
class TestUVC(unittest.TestCase):
"""Test class for UVC."""
def setup_method(self, method):
"""Set up the mock camera."""
self.nvr = mock.MagicMock()
self.uuid = "uuid"
self.name = "name"
self.password = "seekret"
self.uvc = uvc.UnifiVideoCamera(self.nvr, self.uuid, self.name, self.password)
self.nvr.get_camera.return_value = {
"model": "UVC Fake",
"recordingSettings": {"fullTimeRecordEnabled": True},
"host": "host-a",
"internalHost": "host-b",
"username": "admin",
"channels": [
{
"id": "0",
"width": 1920,
"height": 1080,
"fps": 25,
"bitrate": 6000000,
"isRtspEnabled": True,
"rtspUris": [
"rtsp://host-a:7447/uuid_rtspchannel_0",
"rtsp://foo:7447/uuid_rtspchannel_0",
],
},
{
"id": "1",
"width": 1024,
"height": 576,
"fps": 15,
"bitrate": 1200000,
"isRtspEnabled": False,
"rtspUris": [
"rtsp://host-a:7447/uuid_rtspchannel_1",
"rtsp://foo:7447/uuid_rtspchannel_1",
],
},
],
}
self.nvr.server_version = (3, 2, 0)
self.uvc.update()
def test_properties(self):
"""Test the properties."""
assert self.name == self.uvc.name
assert self.uvc.is_recording
assert "Ubiquiti" == self.uvc.brand
assert "UVC Fake" == self.uvc.model
assert SUPPORT_STREAM == self.uvc.supported_features
def test_stream(self):
"""Test the RTSP stream URI."""
stream_source = yield from self.uvc.stream_source()
assert stream_source == "rtsp://foo:7447/uuid_rtspchannel_0"
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login(self, mock_camera, mock_store):
"""Test the login."""
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClient")
def test_login_v31x(self, mock_camera, mock_store):
"""Test login with v3.1.x server."""
self.nvr.server_version = (3, 1, 3)
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):
"""Test the login tries."""
responses = [0]
def mock_login(*a):
"""Mock login."""
try:
responses.pop(0)
raise OSError
except IndexError:
pass
mock_store.return_value.get_camera_password.return_value = None
mock_camera.return_value.login.side_effect = mock_login
self.uvc._login()
assert 2 == mock_camera.call_count
assert "host-b" == self.uvc._connect_addr
mock_camera.reset_mock()
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-b", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_fails_both_properly(self, mock_camera, mock_store):
"""Test if login fails properly."""
mock_camera.return_value.login.side_effect = socket.error
assert self.uvc._login() is None
assert self.uvc._connect_addr is None
def test_camera_image_tries_login_bails_on_failure(self):
"""Test retrieving failure."""
with mock.patch.object(self.uvc, "_login") as mock_login:
mock_login.return_value = False
assert self.uvc.camera_image() is None
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
def test_camera_image_logged_in(self):
"""Test the login state."""
self.uvc._camera = mock.MagicMock()
assert self.uvc._camera.get_snapshot.return_value == self.uvc.camera_image()
def test_camera_image_error(self):
"""Test the camera image error."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraConnectError
assert self.uvc.camera_image() is None
def test_camera_image_reauths(self):
"""Test the re-authentication."""
responses = [0]
def mock_snapshot():
"""Mock snapshot."""
try:
responses.pop()
raise camera.CameraAuthError()
except IndexError:
pass
return "image"
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = mock_snapshot
with mock.patch.object(self.uvc, "_login") as mock_login:
assert "image" == self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
assert [] == responses
def test_camera_image_reauths_only_once(self):
"""Test if the re-authentication only happens once."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraAuthError
with mock.patch.object(self.uvc, "_login") as mock_login:
with pytest.raises(camera.CameraAuthError):
self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
| apache-2.0 |
bocaaust/FreshLife | django_project/env/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| apache-2.0 |
flipchan/LayerProx | extra_editions/2600-hacker-edition/marionette_tg/channel_test.py | 10 | 2287 | import sys
import time
import threading
from twisted.internet import reactor
sys.path.append('.')
import marionette_tg.channel as channel
class ServerThread(threading.Thread):
def run(self):
print 'Starting server...'
self.channel_ = None
while True:
ch = channel.accept_new_channel(
'udp', 8080)
if ch:
self.channel_ = ch
break
def recv(self):
return self.channel_.recv()
class ClientThread(threading.Thread):
channel_ = None
def run(self):
print 'Starting client...'
self.channel_ = None
channel.open_new_channel(
'udp', 8080, self.set_channel)
def send(self, data):
return self.channel_.send(data)
def set_channel(self, ch):
self.channel_ = ch
finished = False
already_sent = False
def test_udp_send(msg_lens):
global already_sent
expected_msg = 'X'*(msg_lens[0]-28) # Subtract 28 for IP (20) and UDP (8) headers
if client.channel_ and server.channel_:
if not already_sent:
try:
print "Test: sending message %d bytes" % msg_lens[0]
client.send(expected_msg)
already_sent = True
except:
print "FAILURE: Error sending message"
reactor.stop()
return
recvd = server.recv()
if len(recvd) != 0:
assert len(recvd) == len(expected_msg)
print 'SUCCESS'
already_sent = False
if len(msg_lens) > 1:
reactor.callFromThread(test_udp_send, msg_lens[1:])
else:
reactor.stop()
return
else:
reactor.callFromThread(test_udp_send, msg_lens)
else:
reactor.callFromThread(test_udp_send, msg_lens)
def timeout_failure():
print "FAILURE: time out"
reactor.stop()
if __name__ == '__main__':
server = ServerThread()
client = ClientThread()
reactor.callFromThread(server.start)
reactor.callFromThread(client.start)
msg_lens = [512, 1024, 2048, 4096, 8192, 16384, 32768, 65535]
reactor.callInThread(test_udp_send, msg_lens)
reactor.callLater(30, timeout_failure)
reactor.run()
| apache-2.0 |
Lujeni/ansible | lib/ansible/plugins/cliconf/slxos.py | 31 | 3651 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: slxos
short_description: Use slxos cliconf to run command on Extreme SLX-OS platform
description:
- This slxos plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme SLX-OS network devices.
version_added: "2.6"
"""
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'slxos'
reply = self.get('show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'SLX\-OS Operating System Version: (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
reply = self.get('show chassis')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M)
if match:
device_info['network_os_model'] = match.group(2)
reply = self.get('show running-config | inc "switch-attributes host-name"')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_config(self, source='running', flags=None):
if source not in ('running', 'startup'):
raise ValueError("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = 'show running-config'
else:
cmd = 'show startup-config'
flags = [] if flags is None else flags
cmd += ' '.join(flags)
cmd = cmd.strip()
return self.send_command(cmd)
def edit_config(self, command):
for cmd in chain(['configure terminal'], to_list(command), ['end']):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
newline = cmd.get('newline', True)
else:
command = cmd
prompt = None
answer = None
newline = True
self.send_command(command, prompt, answer, False, newline)
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
return json.dumps(result)
| gpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/django/template/backends/django.py | 240 | 5574 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import warnings
from importlib import import_module
from pkgutil import walk_packages
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.context import Context, RequestContext, make_context
from django.template.engine import Engine, _dirs_undefined
from django.template.library import InvalidTemplateLibrary
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from .base import BaseEngine
class DjangoTemplates(BaseEngine):
app_dirname = 'templates'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
options.setdefault('debug', settings.DEBUG)
options.setdefault('file_charset', settings.FILE_CHARSET)
libraries = options.get('libraries', {})
options['libraries'] = self.get_templatetag_libraries(libraries)
super(DjangoTemplates, self).__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name, dirs=_dirs_undefined):
try:
return Template(self.engine.get_template(template_name, dirs), self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
"""
Return a collation of template tag libraries from installed
applications and the supplied custom_libraries argument.
"""
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
class Template(object):
def __init__(self, template, backend):
self.template = template
self.backend = backend
@property
def origin(self):
return self.template.origin
def render(self, context=None, request=None):
# A deprecation path is required here to cover the following usage:
# >>> from django.template import Context
# >>> from django.template.loader import get_template
# >>> template = get_template('hello.html')
# >>> template.render(Context({'name': 'world'}))
# In Django 1.7 get_template() returned a django.template.Template.
# In Django 1.8 it returns a django.template.backends.django.Template.
# In Django 1.10 the isinstance checks should be removed. If passing a
# Context or a RequestContext works by accident, it won't be an issue
# per se, but it won't be officially supported either.
if isinstance(context, RequestContext):
if request is not None and request is not context.request:
raise ValueError(
"render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
warnings.warn(
"render() must be called with a dict, not a RequestContext.",
RemovedInDjango110Warning, stacklevel=2)
elif isinstance(context, Context):
warnings.warn(
"render() must be called with a dict, not a Context.",
RemovedInDjango110Warning, stacklevel=2)
else:
context = make_context(context, request)
try:
return self.template.render(context)
except TemplateDoesNotExist as exc:
reraise(exc, self.backend)
def reraise(exc, backend):
"""
Reraise TemplateDoesNotExist while maintaining template debug information.
"""
new = exc.__class__(*exc.args, tried=exc.tried, backend=backend)
if hasattr(exc, 'template_debug'):
new.template_debug = exc.template_debug
six.reraise(exc.__class__, new, sys.exc_info()[2])
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
libraries = {}
candidates = ['django.templatetags']
candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
for candidate in candidates:
try:
pkg = import_module(candidate)
except ImportError:
# No templatetags package defined. This is safe to ignore.
continue
if hasattr(pkg, '__path__'):
for name in get_package_libraries(pkg):
libraries[name[len(candidate) + 1:]] = name
return libraries
def get_package_libraries(pkg):
"""
Recursively yield template tag libraries defined in submodules of a
package.
"""
for entry in walk_packages(pkg.__path__, pkg.__name__ + '.'):
try:
module = import_module(entry[1])
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (entry[1], e)
)
if hasattr(module, 'register'):
yield entry[1]
| mit |
umjembersoft/picoCTF-Platform-2 | api/tests/team_test.py | 10 | 3187 | """
Team Testing Module
"""
import pytest
import api.user
import api.team
import api.common
import bcrypt
from api.common import WebException, InternalException
from common import clear_collections, ensure_empty_collections
from common import base_team, base_user
from conftest import setup_db, teardown_db
class TestTeams(object):
"""
API Tests for team.py
"""
def setup_class(self):
setup_db()
def teardown_class(self):
teardown_db()
@ensure_empty_collections("teams")
@clear_collections("teams")
def test_create_batch_teams(self, teams=10):
"""
Tests team creation.
Covers:
team.create_team
team.get_team
team.get_all_teams
"""
tids = []
for i in range(teams):
team = base_team.copy()
team["team_name"] += str(i)
tids.append(api.team.create_team(team))
assert len(set(tids)) == len(tids), "tids are not unique."
assert len(api.team.get_all_teams()) == len(tids), "Not all teams were created."
for i, tid in enumerate(tids):
name = base_team['team_name'] + str(i)
team_from_tid = api.team.get_team(tid=tid)
team_from_name = api.team.get_team(name=name)
assert team_from_tid == team_from_name, "Team lookup from tid and name are not the same."
@ensure_empty_collections("teams", "users")
@clear_collections("teams", "users")
def test_get_team_uids(self):
"""
Tests the code that retrieves the list of uids on a team
Covers:
team.create_team
user.create_user_request
team.get_team_uids
"""
tid = api.team.create_team(base_team.copy())
uids = []
for i in range(api.team.max_team_users):
test_user = base_user.copy()
test_user['username'] += str(i)
uids.append(api.user.create_user_request(test_user))
team_uids = api.team.get_team_uids(tid)
assert len(team_uids) == api.team.max_team_users, "Team does not have correct number of members"
assert sorted(uids) == sorted(team_uids), "Team does not have the correct members"
@ensure_empty_collections("teams", "users")
@clear_collections("teams", "users")
def test_create_user_request_team_size_validation(self):
"""
Tests the team size restriction
Covers:
team.create_team
user.create_user_request
"""
api.team.create_team(base_team.copy())
uid = None
for i in range(api.team.max_team_users):
test_user = base_user.copy()
test_user['username'] += str(i)
uid = api.user.create_user_request(test_user)
with pytest.raises(WebException):
api.user.create_user_request(base_user.copy())
assert False, "Team has too many users"
api.user.disable_account(uid)
#Should be able to add another user after disabling one.
test_user = base_user.copy()
test_user['username'] += "addition"
api.user.create_user_request(test_user)
| mit |
goulu/networkx | networkx/algorithms/isomorphism/tests/test_isomorphism.py | 99 | 1183 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx.algorithms import isomorphism as iso
class TestIsomorph:
def setUp(self):
self.G1=nx.Graph()
self.G2=nx.Graph()
self.G3=nx.Graph()
self.G4=nx.Graph()
self.G1.add_edges_from([ [1,2],[1,3],[1,5],[2,3] ])
self.G2.add_edges_from([ [10,20],[20,30],[10,30],[10,50] ])
self.G3.add_edges_from([ [1,2],[1,3],[1,5],[2,5] ])
self.G4.add_edges_from([ [1,2],[1,3],[1,5],[2,4] ])
def test_could_be_isomorphic(self):
assert_true(iso.could_be_isomorphic(self.G1,self.G2))
assert_true(iso.could_be_isomorphic(self.G1,self.G3))
assert_false(iso.could_be_isomorphic(self.G1,self.G4))
assert_true(iso.could_be_isomorphic(self.G3,self.G2))
def test_fast_could_be_isomorphic(self):
assert_true(iso.fast_could_be_isomorphic(self.G3,self.G2))
def test_faster_could_be_isomorphic(self):
assert_true(iso.faster_could_be_isomorphic(self.G3,self.G2))
def test_is_isomorphic(self):
assert_true(iso.is_isomorphic(self.G1,self.G2))
assert_false(iso.is_isomorphic(self.G1,self.G4))
| bsd-3-clause |
AmandaMoen/AmandaMoen | notes/resources/UW_IntroClass/class06/code/html_render/Solutions/gen_4.py | 1 | 2982 | #!/usr/bin/env python
"""
Python class example.
Add the ability to handle tag attributes.
"""
class Element(object):
"""
An element with optional attributes and multiple items in the content
"""
tag = ""
indent = " "
def __init__(self, content=None, **attributes):
"""
initialize an element with optional attributes, and any number of sub-elements and content
:param content: content of the element: single string or another element.
an empty string will be ignored
:param [attributes]: optional attributes as keyword parameters.
example:
"""
if not content:
self.children = []
else:
self.children = [content]
self.attributes = attributes
def append(self, element):
self.children.append(element)
def render(self, file_out, ind = ""):
"""
an html rendering method for elements that have attributes and content
"""
file_out.write("\n")
file_out.write(ind)
file_out.write("<%s"%self.tag)
for key, value in self.attributes.items():
file_out.write(' %s="%s"'%(key, value) )
file_out.write(">")
for child in self.children:
try:
child.render(file_out, ind + self.indent)
except AttributeError:
file_out.write("\n")
file_out.write(ind + self.indent)
file_out.write(str(child))
file_out.write("\n")
file_out.write(ind)
file_out.write('</%s>'%self.tag)
class Html(Element):
tag = "html"
class Head(Element):
tag = "head"
class Body(Element):
tag = "body"
class P(Element):
tag = "p"
class OneLineTag(Element):
def render(self, file_out, ind = ""):
"""
an html rendering method for elements that have attributes and content
"""
file_out.write("\n")
file_out.write(ind)
file_out.write("<%s"%self.tag)
for key, value in self.attributes.items():
file_out.write(' %s="%s"'%(key, value) )
file_out.write(">")
for child in self.children:
try:
child.render(file_out)
except AttributeError:
file_out.write(str(child))
file_out.write('</%s>'%self.tag)
class Title(OneLineTag):
tag = "title"
if __name__ == "__main__":
import sys, cStringIO
page = Html()
head = Head()
head.append(Title("PythonClass = Revision 1087:"))
page.append(head)
body = Body()
body.append(P("Here is a paragraph of text -- there could be more of them, but this is enough to show that we can do some text",
style="text-align: center; font-style: oblique;"))
page.append(body)
f = cStringIO.StringIO()
page.render(f)
f.reset()
print f.read()
f.reset()
open("test_html.html", 'w').write(f.read())
| gpl-2.0 |
chyeh727/django | django/utils/feedgenerator.py | 183 | 17059 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"],
{"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause |
steveblamey/uzerp-locust | locustfile.py | 1 | 2547 | import csv
from datetime import datetime
from subprocess import call
from bs4 import BeautifulSoup
import numpy as np
from locust import events
from locust import HttpLocust
from locust import TaskSet
from locust.log import console_logger
import config
from uztasks import tasks
# Hook into some events to collect statistics
def collect_stats(request_type, name, response_time, response_length):
stats.append(
[datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],
request_type,
name,
response_time])
def write_stats(filename=config.STATS_FILE):
"""
Log some info and write collected response times to a CSV file.
"""
response_times = [r[3] for r in stats]
std_dev = 3 * np.std(response_times)
mean = np.mean(response_times)
outliers = [r for r in stats if abs(r[3] - mean) > std_dev]
console_logger.info("\n uzERP Performance Summary:")
console_logger.info('3 x Std. Dev: {}, with {} Outliers ({:.2f}%)'.format(
std_dev,
len(outliers),
(float(len(outliers)) / len(response_times) * 100)))
console_logger.info('Min request time: {}'.format(np.amin(response_times)))
console_logger.info('Max request time: {}'.format(np.amax(response_times)))
csv_file = open(filename, 'w')
csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
stats.insert(0, ['Time', 'Request Type', 'URL', 'Response Time'])
for stat in stats:
csv_writer.writerow(stat)
csv_file.close()
def chart_stats():
call('gnuplot -c utils/gnuplot/chart.plt -p', shell=True)
if config.COLLECT_STATS:
stats = []
events.request_success += collect_stats
events.quitting += write_stats
if config.CHART_STATS:
events.quitting += chart_stats
class UserBehaviour(TaskSet):
"""Primary TaskSet, all other tasks are nested within."""
tasks = {tasks.SalesTasks: 1,
tasks.PurchasingTasks: 1}
def on_start(self):
"""Log-in to the application."""
response = self.client.get('/')
soup = BeautifulSoup(response.content, 'html.parser')
csrf_hidden = soup.find('input', id='csrf_token_id')
self.client.post('/?action=login', {
'username': config.APP_USERNAME,
'password': config.APP_PASSWORD,
'csrf_token': csrf_hidden['value'],
'rememberUser': 'true',
})
class WebsiteUser(HttpLocust):
host = config.APP_HOST
min_wait = config.MIN_WAIT
max_wait = config.MAX_WAIT
task_set = UserBehaviour
| mit |
WatanabeYasumasa/edx-platform | lms/djangoapps/pdfgen/views.py | 1 | 11480 | """
pdfgen views
"""
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.utils import ImageReader
from reportlab.lib.pagesizes import A4, landscape
from PyPDF2 import PdfFileWriter, PdfFileReader
from django.conf import settings
from boto.s3.connection import S3Connection, Location
from boto.s3.key import Key
from boto.exception import BotoClientError, BotoServerError, S3ResponseError
from tempfile import mkstemp
import os
import json
import logging
import StringIO
from datetime import datetime
"""
from django.http import HttpResponse
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.core.context_processors import csrf
from django_future.csrf import ensure_csrf_cookie
"""
log = logging.getLogger("pdfgen")
class CertException(Exception):
pass
class PDFBaseNotFound(CertException):
pass
class PDFBaseIsNotPDF(CertException):
pass
class PDFBaseIsNotImage(CertException):
pass
class CertificateBase(object):
"""Certificate base class."""
def create(self):
"""Create certificate."""
raise NotImplementedError
def get(self):
"""Get certificate."""
raise NotImplementedError
def delete(self):
"""Delete certificate."""
raise NotImplementedError
def verify(self):
"""Verify certificate."""
raise NotImplementedError
class CertificateHonor(CertificateBase):
"""Certificate of Honor"""
def __init__(self, username, course_id, key, display_name=None,
course_name=None, grade=None):
self.username = username
self.display_name = display_name
self.course_id = course_id
self.course_name = course_name
self.grade = grade
self.key = key
self.store = CertS3Store()
"""
self.enroll_mode = "honor"
self.is_staff = False
"""
def create(self):
"""Create certificate."""
if not self.course_name:
msg = "course_name is required."
log.error(msg)
return json.dumps({"error": msg})
if not self.grade:
msg = "grade is required."
log.error(msg)
return json.dumps({"error": msg})
try:
temp = mkstemp(suffix="-certificate.pdf")
with open(temp[1], 'wb') as fp:
form = CertPDF(fp, self.display_name, self.course_id,
self.course_name)
form.create_pdf()
response_json = self.store.save("_".join([self.username, self.key[:5]]),
self.course_id, temp[1])
except OSError, e:
msg = "OS Error: (%s)" % (e)
return json.dumps({"error": msg})
finally:
try:
os.remove(temp[1])
except UnboundLocalError:
pass
return response_json
def delete(self):
"""Delete certificate."""
return self.store.delete("_".join([self.username, self.key[:5]]), self.course_id)
class CertPDF(object):
def __init__(self, fp, username, course_id, course_name):
self.fp = fp
self.username = username
self.course_id = course_id
self.course_name = course_name
self.author = settings.PDFGEN_CERT_AUTHOR
self.title = settings.PDFGEN_CERT_TITLE
self.base_img_dir = settings.PDFGEN_BASE_IMG_DIR
self.base_pdf_dir = settings.PDFGEN_BASE_PDF_DIR
pdfmetrics.registerFont(TTFont("Ubuntu-R",
"/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf"))
pdfmetrics.registerFont(TTFont("VL-Gothic-Regular",
"/usr/share/fonts/truetype/vlgothic/VL-Gothic-Regular.ttf"))
"""
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiKakuGo-W5"))
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiMin-W3"))
"""
def create_pdf(self):
""" crate pdf """
if os.path.isdir(self.base_pdf_dir):
base_pdf = self.base_pdf_dir + "/" + "-".join(
self.course_id.split('/')) + ".pdf"
self.create_based_on_pdf(base_pdf)
return
if os.path.isdir(self.base_img_dir):
base_img = self.base_img_dir + "/" + "-".join(
self.course_id.split('/')) + ".pdf"
self.create_based_on_image(base_pdf)
return
msg = "settings.PDFGEN_BASE_PDF_DIR" + \
"(%s) and settings.PDFGEN_BASE_IMG_DIR(%s) dose not exists." % (
self.base_pdf_dir, self.base_img_dir)
log.error(msg)
raise PDFBaseNotFound(msg)
def create_based_on_pdf(self, base_pdf):
"""create pdf based on pdf"""
if not os.path.isfile(base_pdf):
msg = "%s is not exists." % (base_pdf)
log.error(msg)
raise PDFBaseNotFound(msg)
fileobj = StringIO.StringIO()
pdf = canvas.Canvas(fileobj, bottomup=True,
pageCompression=1, pagesize=landscape(A4))
pdf.setFont("VL-Gothic-Regular", 27)
pdf.drawString(260, 450, self.username)
now = datetime.now()
pdf.setFont("Ubuntu-R", 15)
pdf.drawRightString(750, 125, now.strftime('%B %d, %Y'))
pdf.showPage()
pdf.save()
fileobj.seek(0)
merge = PdfFileReader(fileobj)
try:
base = PdfFileReader(file(base_pdf, "rb"))
page = base.getPage(0)
page.mergePage(merge.getPage(0))
output = PdfFileWriter()
output.addMetadata({'/Title': self.title, '/Author': self.author,
'/Subject': u"{}".format(self.course_name)})
output.addPage(page)
output.write(self.fp)
except (TypeError, AssertionError), e:
log.error(e)
raise PDFBaseIsNotPDF(e)
def create_based_on_image(self, base_img):
"""create pdf based on image"""
if not os.path.isfile(base_img):
msg = "%s is not exists." % (base_img)
log.error(msg)
raise PDFBaseNotFound(msg)
pdf = canvas.Canvas(self.fp, bottomup=True,
pageCompression=1, pagesize=landscape(A4))
pdf.setAuthor(self.author)
pdf.setTitle(self.title)
pdf.setSubject(u"{}".format(self.course_name))
try:
bg = ImageReader(base_img)
pdf.drawImage(bg, 0, 0)
except IOError, e:
log.error(e)
raise PDFBaseIsNotImage(e)
pdf.setFont("VL-Gothic-Regular", 27)
pdf.drawString(260, 440, self.username)
now = datetime.now()
pdf.setFont("Ubuntu-R", 15)
pdf.drawRightString(740, 115, now.strftime('%B %d, %Y'))
pdf.showPage()
pdf.save()
class CertStoreBase(object):
"""Certificate Store."""
def save(self):
"""Save certificate."""
raise NotImplementedError
def get(self):
"""Get certificate."""
raise NotImplementedError
def get_url(self):
"""Get download url of the certificate"""
raise NotImplementedError
def get_all(self):
"""Get all certificates."""
raise NotImplementedError
def delete(self):
"""Delete certificate."""
raise NotImplementedError
class CertS3Store(CertStoreBase):
"""S3 store."""
def __init__(self):
self.bucket_name = settings.PDFGEN_BUCKET_NAME
self.location = Location.APNortheast
self.conn = self._connect()
def _connect(self):
return S3Connection(
settings.PDFGEN_ACCESS_KEY_ID,
settings.PDFGEN_SECRET_ACCESS_KEY)
def save(self, username, course_id, filepath):
"""Save certificate."""
try:
bucket = self.conn.get_bucket(self.bucket_name)
except S3ResponseError as e:
if e.status == 404:
bucket = self.conn.create_bucket(self.bucket_name,
location=self.location)
log.info("Cresate bucket(%s)", self.bucket_name)
else:
return json.dumps({"error": "{}".format(e)})
try:
s3key = Key(bucket)
s3key.key = "{cid}/{name}.pdf".format(
cid=course_id, name=username)
# headers meta? encrypt_key true?
s3key.set_contents_from_filename(filepath)
url = s3key.generate_url(expires_in=0, query_auth=False,
force_http=True)
finally:
s3key.close()
return json.dumps({'download_url': url, })
def delete(self, username, course_id):
"""Delete certificate."""
try:
bucket = self.conn.get_bucket(self.bucket_name)
s3key = Key(bucket)
s3key.key = "{cid}/{name}.pdf".format(
cid=course_id, name=username)
if s3key.exists():
s3key.delete()
else:
return json.dumps({'error': "file does not exists.({})".format(
s3key.key)})
finally:
s3key.close()
return json.dumps({'error': None})
def create_cert_pdf(username, course_id, key, display_name, course_name,
grade):
"""Create pdf of certificate."""
try:
cert = CertificateHonor(username, course_id, key, display_name,
course_name, grade)
contents = cert.create()
except BotoServerError as e:
log.error("Cannot get bucket: BotoServerError = %s", e)
contents = json.dumps({"error": "{}".format(e)})
except BotoClientError as e:
log.error("Cannot access S3: BotoClientError = %s", e)
contents = json.dumps({"error": "{}".format(e)})
return contents
def delete_cert_pdf(username, course_id, key):
"""Delete pdf of certificate."""
try:
cert = CertificateHonor(username, course_id, key)
contents = cert.delete()
except BotoServerError as e:
log.error("Cannot get bucket: BotoServerError = %s", e)
contents = json.dumps({"error": "{}".format(e)})
except BotoClientError as e:
log.error("Cannot access S3: BotoClientError = %s", e)
contents = json.dumps({"error": "{}".format(e)})
return contents
"""
@require_POST
@ensure_csrf_cookie
def pdfgen(request):
log.info("csrf = %s", csrf(request))
post = request.POST
username = post.get('username', '')
display_name = post.get('display_name', '')
course_id = post.get('course_id', '')
course_name = post.get('course_name', '')
grade = post.get('grade', '')
key = post.get('key', '')
if not (username and course_id and course_name and grade):
msg = "Invalid Post data."
contents = json.dumps({'error':msg})
return HttpResponse(contents, mimetype='application/json')
try:
cert = CertificateHonor(username, display_name, course_id,
course_name, grade, key)
contents = cert.create()
except BotoServerError as e:
log.error("Cannot get bucket: BotoServerError = %s", e)
contents = json.dumps({"error":e})
except BotoClientError as e:
log.error("Cannot access S3: BotoClientError = %s", e)
contents = json.dumps({"error":e})
return HttpResponse(contents, mimetype='application/json')
"""
| agpl-3.0 |
danielneis/osf.io | website/notifications/emails.py | 12 | 6559 | from babel import dates, core, Locale
from website import mails
from website import models as website_models
from website.notifications import constants
from website.notifications import utils
from website.notifications.model import NotificationDigest
from website.notifications.model import NotificationSubscription
from website.util import web_url_for
def notify(event, user, node, timestamp, **context):
"""Retrieve appropriate ***subscription*** and passe user list
:param event: event that triggered the notification
:param user: user who triggered notification
:param node: instance of Node
:param timestamp: time event happened
:param context: optional variables specific to templates
target_user: used with comment_replies
:return: List of user ids notifications were sent to
"""
event_type = utils.find_subscription_type(event)
subscriptions = compile_subscriptions(node, event_type, event)
sent_users = []
target_user = context.get('target_user', None)
if target_user:
target_user_id = target_user._id
for notification_type in subscriptions:
if notification_type != 'none' and subscriptions[notification_type]:
if user in subscriptions[notification_type]:
subscriptions[notification_type].pop(subscriptions[notification_type].index(user))
if target_user and target_user_id in subscriptions[notification_type]:
subscriptions[notification_type].pop(subscriptions[notification_type].index(target_user_id))
if target_user_id != user._id:
store_emails([target_user_id], notification_type, 'comment_replies', user, node,
timestamp, **context)
sent_users.append(target_user_id)
if subscriptions[notification_type]:
store_emails(subscriptions[notification_type], notification_type, event_type, user, node,
timestamp, **context)
sent_users.extend(subscriptions[notification_type])
return sent_users
def store_emails(recipient_ids, notification_type, event, user, node, timestamp, **context):
"""Store notification emails
Emails are sent via celery beat as digests
:param recipient_ids: List of user ids to send mail to.
:param notification_type: from constants.Notification_types
:param event: event that triggered notification
:param user: user who triggered the notification
:param node: instance of Node
:param timestamp: time event happened
:param context:
:return: --
"""
if notification_type == 'none':
return
template = event + '.html.mako'
context['user'] = user
node_lineage_ids = get_node_lineage(node) if node else []
for user_id in recipient_ids:
if user_id == user._id:
continue
recipient = website_models.User.load(user_id)
context['localized_timestamp'] = localize_timestamp(timestamp, recipient)
message = mails.render_message(template, **context)
digest = NotificationDigest(
timestamp=timestamp,
send_type=notification_type,
event=event,
user_id=user_id,
message=message,
node_lineage=node_lineage_ids
)
digest.save()
def compile_subscriptions(node, event_type, event=None, level=0):
"""Recurse through node and parents for subscriptions.
:param node: current node
:param event_type: Generally node_subscriptions_available
:param event: Particular event such a file_updated that has specific file subs
:param level: How deep the recursion is
:return: a dict of notification types with lists of users.
"""
subscriptions = check_node(node, event_type)
if event:
subscriptions = check_node(node, event) # Gets particular event subscriptions
parent_subscriptions = compile_subscriptions(node, event_type, level=level + 1) # get node and parent subs
elif node.parent_id:
parent_subscriptions = \
compile_subscriptions(website_models.Node.load(node.parent_id), event_type, level=level + 1)
else:
parent_subscriptions = check_node(None, event_type)
for notification_type in parent_subscriptions:
p_sub_n = parent_subscriptions[notification_type]
p_sub_n.extend(subscriptions[notification_type])
for nt in subscriptions:
if notification_type != nt:
p_sub_n = list(set(p_sub_n).difference(set(subscriptions[nt])))
if level == 0:
p_sub_n, removed = utils.separate_users(node, p_sub_n)
parent_subscriptions[notification_type] = p_sub_n
return parent_subscriptions
def check_node(node, event):
"""Return subscription for a particular node and event."""
node_subscriptions = {key: [] for key in constants.NOTIFICATION_TYPES}
if node:
subscription = NotificationSubscription.load(utils.to_subscription_key(node._id, event))
for notification_type in node_subscriptions:
users = getattr(subscription, notification_type, [])
for user in users:
if node.has_permission(user, 'read'):
node_subscriptions[notification_type].append(user._id)
return node_subscriptions
def get_node_lineage(node):
""" Get a list of node ids in order from the node to top most project
e.g. [parent._id, node._id]
"""
lineage = [node._id]
while node.parent_id:
node = website_models.Node.load(node.parent_id)
lineage = [node._id] + lineage
return lineage
def get_settings_url(uid, user):
if uid == user._id:
return web_url_for('user_notifications', _absolute=True)
node = website_models.Node.load(uid)
assert node, 'get_settings_url recieved an invalid Node id'
return node.web_url_for('node_setting', _guid=True, _absolute=True)
def localize_timestamp(timestamp, user):
try:
user_timezone = dates.get_timezone(user.timezone)
except LookupError:
user_timezone = dates.get_timezone('Etc/UTC')
try:
user_locale = Locale(user.locale)
except core.UnknownLocaleError:
user_locale = 'en'
formatted_date = dates.format_date(timestamp, format='full', locale=user_locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=user_timezone, locale=user_locale)
return u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
| apache-2.0 |
ianctse/pvlib-python | pvlib/test/test_tracking.py | 1 | 11782 | import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
from numpy import nan
import pandas as pd
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pandas.util.testing import assert_frame_equal
from pvlib.location import Location
from pvlib import solarposition
from pvlib import tracking
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 60, 'tracker_theta': -60},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
@raises(ValueError)
def test_index_mismatch():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90,180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730 ,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
### results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(max_angle=60.,
axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth,
backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852 , 'surface_azimuth': 180-18.432 ,
'surface_tilt': 24.92122 , 'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_LocalizedSingleAxisTracker_creation():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize_location():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
location = Location(latitude=32, longitude=-111)
localized_system = system.localize(location=location)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni':[900,0], 'ghi':[600,0], 'dhi':[100,0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
irradiance = system.get_irradiance(irrads['dni'],
irrads['ghi'],
irrads['dhi'],
solar_zenith=solar_zenith,
solar_azimuth=solar_azimuth,
surface_tilt=tracker_data['surface_tilt'],
surface_azimuth=tracker_data['surface_azimuth'])
expected = pd.DataFrame(data=np.array(
[[ 961.80070, 815.94490, 145.85580, 135.32820,
10.52757492],
[ nan, nan, nan, nan,
nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
irradiance = np.round(irradiance, 4)
expected = np.round(expected, 4)
assert_frame_equal(irradiance, expected)
| bsd-3-clause |
haripradhan/MissionPlanner | Lib/xml/dom/domreg.py | 63 | 3577 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| gpl-3.0 |
rrooij/youtube-dl | youtube_dl/extractor/googleplus.py | 27 | 2570 | # coding: utf-8
from __future__ import unicode_literals
import re
import codecs
from .common import InfoExtractor
from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
IE_DESC = 'Google Plus'
_VALID_URL = r'https?://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
_TEST = {
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
title = self._og_search_description(webpage).splitlines()[0]
upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
uploader = self._html_search_regex(
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
def unicode_escape(s):
decoder = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4,}',
lambda m: decoder(m.group(0))[0],
s)
# Extract video links all sizes
formats = [{
'url': unicode_escape(video_url),
'ext': 'flv',
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent\.com.*?)"', webpage)]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'uploader': uploader,
'upload_date': upload_date,
'formats': formats,
}
| unlicense |
kawamon/hue | desktop/core/ext-py/repoze.who-2.3/repoze/who/tests/test__compat.py | 2 | 4701 | import unittest
class CompatTests(unittest.TestCase):
def test_REQUEST_METHOD_miss(self):
# PEP 3333 says CONTENT_TYPE is mandatory
from .._compat import REQUEST_METHOD
self.assertRaises(KeyError, REQUEST_METHOD, {})
def test_REQUEST_METHOD_hit(self):
from .._compat import REQUEST_METHOD
self.assertEqual(REQUEST_METHOD({'REQUEST_METHOD': 'FOO'}), 'FOO')
def test_CONTENT_TYPE_miss(self):
# PEP 3333 says CONTENT_TYPE is optional
from .._compat import CONTENT_TYPE
self.assertEqual(CONTENT_TYPE({}), '')
def test_CONTENT_TYPE_hit(self):
from .._compat import CONTENT_TYPE
self.assertEqual(CONTENT_TYPE({'CONTENT_TYPE': 'text/html'}),
'text/html')
def test_USER_AGENT_miss(self):
from .._compat import USER_AGENT
self.assertEqual(USER_AGENT({}), None)
def test_USER_AGENT_hit(self):
from .._compat import USER_AGENT
self.assertEqual(USER_AGENT({'HTTP_USER_AGENT': 'FOO'}), 'FOO')
def test_AUTHORIZATION_miss(self):
from .._compat import AUTHORIZATION
self.assertEqual(AUTHORIZATION({}), '')
def test_AUTHORIZATION_hit(self):
from .._compat import AUTHORIZATION
self.assertEqual(AUTHORIZATION({'HTTP_AUTHORIZATION': 'FOO'}), 'FOO')
def test_get_cookies_no_cache_ok_header_value(self):
from .._compat import get_cookies
from .._compat import SimpleCookie
environ = {'HTTP_COOKIE': 'qux=spam'}
cookies = get_cookies(environ)
self.assertTrue(isinstance(cookies, SimpleCookie))
self.assertEqual(len(cookies), 1)
self.assertEqual(cookies['qux'].value, 'spam')
self.assertEqual(environ['paste.cookies'], (cookies, 'qux=spam'))
def test_get_cookies_w_cache_miss(self):
from .._compat import get_cookies
from .._compat import SimpleCookie
environ = {'HTTP_COOKIE': 'qux=spam',
'paste.cookies': (object(), 'foo=bar'),
}
cookies = get_cookies(environ)
self.assertTrue(isinstance(cookies, SimpleCookie))
self.assertEqual(len(cookies), 1)
self.assertEqual(cookies['qux'].value, 'spam')
self.assertEqual(environ['paste.cookies'], (cookies, 'qux=spam'))
def test_get_cookies_w_cache_hit(self):
from .._compat import get_cookies
from .._compat import SimpleCookie
existing = SimpleCookie()
existing['foo'] = 'bar'
environ = {'HTTP_COOKIE': 'qux=spam',
'paste.cookies': (existing, 'qux=spam'),
}
cookies = get_cookies(environ)
self.assertTrue(cookies is existing)
def test_construct_url(self):
from .._compat import construct_url
environ = {'wsgi.url_scheme': 'http',
'HTTP_HOST': 'example.com',
}
self.assertEqual(construct_url(environ), 'http://example.com/')
def test_header_value_miss(self):
from .._compat import header_value
self.assertEqual(header_value([], 'nonesuch'), '')
def test_header_value_simple(self):
from .._compat import header_value
self.assertEqual(header_value([('simple', 'SIMPLE')], 'simple'),
'SIMPLE')
def test_must_decode_non_string(self):
from .._compat import must_decode
foo = object()
self.assertTrue(must_decode(foo) is foo)
def test_must_decode_unicode(self):
from .._compat import must_decode
from .._compat import u
foo = u('foo')
self.assertTrue(must_decode(foo) is foo)
def test_must_decode_utf8(self):
from .._compat import must_decode
foo = b'b\xc3\xa2tard'
self.assertEqual(must_decode(foo), foo.decode('utf-8'))
def test_must_decode_latin1(self):
from .._compat import must_decode
foo = b'b\xe2tard'
self.assertEqual(must_decode(foo), foo.decode('latin1'))
def test_must_encode_non_string(self):
from .._compat import must_encode
foo = object()
self.assertTrue(must_encode(foo) is foo)
def test_must_encode_unicode(self):
from .._compat import must_encode
from .._compat import u
foo = u('foo')
self.assertEqual(must_encode(foo), foo.encode('utf-8'))
def test_must_encode_utf8(self):
from .._compat import must_encode
foo = b'b\xc3\xa2tard'
self.assertTrue(must_encode(foo) is foo)
def test_must_encode_latin1(self):
from .._compat import must_encode
foo = b'b\xe2tard'
self.assertTrue(must_encode(foo) is foo)
| apache-2.0 |
n0trax/ansible | lib/ansible/plugins/action/net_base.py | 15 | 7207 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.module_utils.network_common import load_provider
from imp import find_module, load_module
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
# we should be able to stream line this a bit by creating a common
# provider argument spec in module_utils/network_common.py or another
# option is that there isn't a need to push provider into the module
# since the connection is started in the action handler.
f, p, d = find_module('ansible')
f2, p2, d2 = find_module('module_utils', [p])
f3, p3, d3 = find_module(play_context.network_os, [p2])
module = load_module('ansible.module_utils.' + play_context.network_os, f3, p3, d3)
self.provider = load_provider(module.get_provider_argspec(), self._task.args)
if play_context.network_os == 'junos':
play_context.connection = 'netconf'
play_context.port = int(self.provider['port'] or self._play_context.port or 830)
else:
play_context.connection = 'network_cli'
play_context.port = int(self.provider['port'] or self._play_context.port or 22)
play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
play_context.remote_user = self.provider['username'] or self._play_context.connection_user
play_context.password = self.provider['password'] or self._play_context.password
play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
play_context.timeout = int(self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
if 'authorize' in self.provider.keys():
play_context.become = self.provider['authorize'] or False
play_context.become_pass = self.provider['auth_pass']
socket_path = self._start_connection(play_context)
task_vars['ansible_socket'] = socket_path
if 'fail_on_missing_module' not in self._task.args:
self._task.args['fail_on_missing_module'] = False
result = super(ActionModule, self).run(tmp, task_vars)
module = self._get_implementation_module(play_context.network_os, self._task.action)
if not module:
if self._task.args['fail_on_missing_module']:
result['failed'] = True
else:
result['failed'] = False
result['msg'] = ('Could not find implementation module %s for %s' %
(self._task.action, play_context.network_os))
else:
new_module_args = self._task.args.copy()
# perhaps delete the provider argument here as well since the
# module code doesn't need the information, the connection is
# already started
if 'network_os' in new_module_args:
del new_module_args['network_os']
del new_module_args['fail_on_missing_module']
display.vvvv('Running implementation module %s' % module)
result.update(self._execute_module(module_name=module,
module_args=new_module_args, task_vars=task_vars,
wrap_async=self._task.async))
display.vvvv('Caching network OS %s in facts' % play_context.network_os)
result['ansible_facts'] = {'network_os': play_context.network_os}
return result
def _start_connection(self, play_context):
display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent',
play_context, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
if str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
return socket_path
def _get_network_os(self, task_vars):
if 'network_os' in self._task.args and self._task.args['network_os']:
display.vvvv('Getting network OS from task argument')
network_os = self._task.args['network_os']
elif self._play_context.network_os:
display.vvvv('Getting network OS from inventory')
network_os = self._play_context.network_os
elif 'network_os' in task_vars.get('ansible_facts', {}) and task_vars['ansible_facts']['network_os']:
display.vvvv('Getting network OS from fact')
network_os = task_vars['ansible_facts']['network_os']
else:
raise AnsibleError('ansible_network_os must be specified on this host to use platform agnostic modules')
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
implementation_module = network_os + '_' + platform_agnostic_module.partition('_')[2]
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
| gpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/utils/six.py | 45 | 30101 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
| apache-2.0 |
sephalon/python-ivi | ivi/agilent/agilent2000A.py | 5 | 16833 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBaseInfiniiVision import *
from .. import ivi
from .. import fgen
ScreenshotImageFormatMapping = {
'bmp': 'bmp',
'bmp24': 'bmp',
'bmp8': 'bmp8bit',
'png': 'png',
'png24': 'png'}
OutputMode = set(['function'])
OperationMode = set(['continuous', 'burst'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
#'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
'pulse': 'puls',
'noise': 'nois',
'dc': 'dc'
}
class agilent2000A(agilentBaseInfiniiVision, fgen.Base, fgen.StdFunc, fgen.ModulateAM, fgen.ModulateFM):
"Agilent InfiniiVision 2000A series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent2000A, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._horizontal_divisions = 10
self._vertical_divisions = 8
self._display_screenshot_image_format_mapping = ScreenshotImageFormatMapping
# wavegen option
self._output_mode_list = OutputMode
self._operation_mode_list = OperationMode
self._output_count = 1
self._output_standard_waveform_mapping = StandardWaveformMapping
self._identity_description = "Agilent InfiniiVision 2000A X-series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSOX2002A','DSOX2004A','DSOX2012A',
'DSOX2014A','DSOX2022A','DSOX2024A','MSOX2002A','MSOX2004A','MSOX2012A','MSOX2014A',
'MSOX2022A','MSOX2024A']
self._init_outputs()
self._init_channels()
def _init_outputs(self):
try:
super(agilent2000A, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_operation_mode = list()
self._output_enabled = list()
self._output_impedance = list()
self._output_mode = list()
self._output_reference_clock_source = list()
for i in range(self._output_count):
if self._output_count == 1:
self._output_name.append("wgen")
else:
self._output_name.append("wgen%d" % (i+1))
self._output_operation_mode.append('continuous')
self._output_enabled.append(False)
self._output_impedance.append(50)
self._output_mode.append('function')
self._output_reference_clock_source.append('')
self.outputs._set_list(self._output_name)
# wavegen option
def _get_output_operation_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_operation_mode[index]
def _set_output_operation_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in self._operation_mode_list:
raise ivi.ValueNotSupportedException()
self._output_operation_mode[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:output?" % self._output_name[index])
self._output_enabled[index] = bool(int(resp))
self._set_cache_valid(index=index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":%s:output %d" % (self._output_name[index], value))
self._output_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_impedance(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
val = self._ask(":%s:output:load?" % self._output_name[index])
if val == 'ONEM':
self._output_impedance[index] = 1000000
elif val == 'FIFT':
self._output_impedance[index] = 50
self._set_cache_valid(index=index)
return self._output_impedance[index]
def _set_output_impedance(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if value != 50 and value != 1000000:
raise Exception('Invalid impedance selection')
if not self._driver_operation_simulate:
if value == 1000000:
self._write(":%s:output:load onemeg" % self._output_name[index])
elif value == 50:
self._write(":%s:output:load fifty" % self._output_name[index])
self._output_impedance[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, 'output_standard_waveform_amplitude', index)
def _get_output_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_mode[index]
def _set_output_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in self._output_mode_list:
raise ivi.ValueNotSupportedException()
self._output_mode[index] = value
def _get_output_reference_clock_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_reference_clock_source[index]
def _set_output_reference_clock_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_reference_clock_source[index] = value
def abort_generation(self):
pass
def initiate_generation(self):
pass
def _get_output_standard_waveform_amplitude(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage?" % self._output_name[index])
self._output_standard_waveform_amplitude[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_amplitude[index]
def _set_output_standard_waveform_amplitude(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:voltage %e" % (self._output_name[index], value))
self._output_standard_waveform_amplitude[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_dc_offset(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage:offset?" % self._output_name[index])
self._output_standard_waveform_dc_offset[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_dc_offset[index]
def _set_output_standard_waveform_dc_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:voltage:offset %e" % (self._output_name[index], value))
self._output_standard_waveform_dc_offset[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_duty_cycle_high(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function:square:dcycle?" % self._output_name[index])
self._output_standard_waveform_duty_cycle_high[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_duty_cycle_high[index]
def _set_output_standard_waveform_duty_cycle_high(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 20.0 or value > 80.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:function:square:dcycle %e" % (self._output_name[index], value))
self._output_standard_waveform_duty_cycle_high[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_start_phase(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_standard_waveform_start_phase[index]
def _set_output_standard_waveform_start_phase(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_standard_waveform_start_phase[index] = value
def _get_output_standard_waveform_frequency(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:frequency?" % self._output_name[index])
self._output_standard_waveform_frequency[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_frequency[index]
def _set_output_standard_waveform_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:frequency %e" % (self._output_name[index], value))
self._output_standard_waveform_frequency[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_waveform(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function?" % self._output_name[index])
value = resp.lower()
value = [k for k,v in self._output_standard_waveform_mapping.items() if v==value][0]
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
return self._output_standard_waveform_waveform[index]
def _set_output_standard_waveform_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in self._output_standard_waveform_mapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":%s:function %s" % (self._output_name[index], self._output_standard_waveform_mapping[value]))
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
def _get_output_am_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_am_enabled[index]
def _set_output_am_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_am_enabled[index] = value
def _get_output_am_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_am_source[index]
def _set_output_am_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_am_source[index] = value
def _get_am_internal_depth(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:am:depth?" % self._output_name[index])
self._am_internal_depth = float(resp)
self._set_cache_valid()
return self._am_internal_depth
def _set_am_internal_depth(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:am:depth %e" % (self._output_name[index], value))
self._am_internal_depth = value
self._set_cache_valid()
def _get_am_internal_frequency(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:am:frequency?" % self._output_name[index])
self._am_internal_frequency = float(resp)
self._set_cache_valid()
return self._am_internal_frequency
def _set_am_internal_frequency(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:am:frequency %e" % (self._output_name[index], value))
self._am_internal_frequency = value
self._set_cache_valid()
def _get_am_internal_waveform(self):
return self._am_internal_waveform
def _set_am_internal_waveform(self, value):
value = float(value)
self._am_internal_waveform = value
def _get_output_fm_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_fm_enabled[index]
def _set_output_fm_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_fm_enabled[index] = value
def _get_output_fm_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_fm_source[index]
def _set_output_fm_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_fm_source[index] = value
def _get_fm_internal_deviation(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:fm:deviation?" % self._output_name[index])
self._fm_internal_deviation = float(resp)
self._set_cache_valid()
return self._fm_internal_deviation
def _set_fm_internal_deviation(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:fm:deviation %e" % (self._output_name[index], value))
self._fm_internal_deviation = value
self._set_cache_valid()
def _get_fm_internal_frequency(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:fm:frequency?" % self._output_name[index])
self._fm_internal_frequency = float(resp)
self._set_cache_valid()
return self._fm_internal_frequency
def _set_fm_internal_frequency(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:fm:frequency %e" % (self._output_name[index], value))
self._fm_internal_frequency = value
self._set_cache_valid()
def _get_fm_internal_waveform(self):
return self._fm_internal_waveform
def _set_fm_internal_waveform(self, value):
value = float(value)
self._fm_internal_waveform = value
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.