repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
khertan/KhtNotes
khtnotes/docutils/transforms/parts.py
187
6980
# $Id: parts.py 6073 2009-08-06 12:21:10Z milde $ # Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer; Dmitry Jemerov # Copyright: This module has been placed in the public domain. """ Transforms related to document parts. """ __docformat__ = 'reStructuredText' import re import sys from docutils import nodes, utils from docutils.transforms import TransformError, Transform class SectNum(Transform): """ Automatically assigns numbers to the titles of document sections. It is possible to limit the maximum section level for which the numbers are added. For those sections that are auto-numbered, the "autonum" attribute is set, informing the contents table generator that a different form of the TOC should be used. """ default_priority = 710 """Should be applied before `Contents`.""" def apply(self): self.maxdepth = self.startnode.details.get('depth', None) self.startvalue = self.startnode.details.get('start', 1) self.prefix = self.startnode.details.get('prefix', '') self.suffix = self.startnode.details.get('suffix', '') self.startnode.parent.remove(self.startnode) if self.document.settings.sectnum_xform: if self.maxdepth is None: self.maxdepth = sys.maxint self.update_section_numbers(self.document) else: # store details for eventual section numbering by the writer self.document.settings.sectnum_depth = self.maxdepth self.document.settings.sectnum_start = self.startvalue self.document.settings.sectnum_prefix = self.prefix self.document.settings.sectnum_suffix = self.suffix def update_section_numbers(self, node, prefix=(), depth=0): depth += 1 if prefix: sectnum = 1 else: sectnum = self.startvalue for child in node: if isinstance(child, nodes.section): numbers = prefix + (str(sectnum),) title = child[0] # Use &nbsp; for spacing: generated = nodes.generated( '', (self.prefix + '.'.join(numbers) + self.suffix + u'\u00a0' * 3), classes=['sectnum']) title.insert(0, generated) title['auto'] = 1 if depth < self.maxdepth: self.update_section_numbers(child, numbers, depth) sectnum += 1 class Contents(Transform): """ This transform generates a table of contents from the entire document tree or from a single branch. It locates "section" elements and builds them into a nested bullet list, which is placed within a "topic" created by the contents directive. A title is either explicitly specified, taken from the appropriate language module, or omitted (local table of contents). The depth may be specified. Two-way references between the table of contents and section titles are generated (requires Writer support). This transform requires a startnode, which contains generation options and provides the location for the generated table of contents (the startnode is replaced by the table of contents "topic"). """ default_priority = 720 def apply(self): try: # let the writer (or output software) build the contents list? toc_by_writer = self.document.settings.use_latex_toc except AttributeError: toc_by_writer = False details = self.startnode.details if 'local' in details: startnode = self.startnode.parent.parent while not (isinstance(startnode, nodes.section) or isinstance(startnode, nodes.document)): # find the ToC root: a direct ancestor of startnode startnode = startnode.parent else: startnode = self.document self.toc_id = self.startnode.parent['ids'][0] if 'backlinks' in details: self.backlinks = details['backlinks'] else: self.backlinks = self.document.settings.toc_backlinks if toc_by_writer: # move customization settings to the parent node self.startnode.parent.attributes.update(details) self.startnode.parent.remove(self.startnode) else: contents = self.build_contents(startnode) if len(contents): self.startnode.replace_self(contents) else: self.startnode.parent.parent.remove(self.startnode.parent) def build_contents(self, node, level=0): level += 1 sections = [sect for sect in node if isinstance(sect, nodes.section)] entries = [] autonum = 0 depth = self.startnode.details.get('depth', sys.maxint) for section in sections: title = section[0] auto = title.get('auto') # May be set by SectNum. entrytext = self.copy_and_filter(title) reference = nodes.reference('', '', refid=section['ids'][0], *entrytext) ref_id = self.document.set_id(reference) entry = nodes.paragraph('', '', reference) item = nodes.list_item('', entry) if ( self.backlinks in ('entry', 'top') and title.next_node(nodes.reference) is None): if self.backlinks == 'entry': title['refid'] = ref_id elif self.backlinks == 'top': title['refid'] = self.toc_id if level < depth: subsects = self.build_contents(section, level) item += subsects entries.append(item) if entries: contents = nodes.bullet_list('', *entries) if auto: contents['classes'].append('auto-toc') return contents else: return [] def copy_and_filter(self, node): """Return a copy of a title, with references, images, etc. removed.""" visitor = ContentsFilter(self.document) node.walkabout(visitor) return visitor.get_entry_text() class ContentsFilter(nodes.TreeCopyVisitor): def get_entry_text(self): return self.get_tree_copy().children def visit_citation_reference(self, node): raise nodes.SkipNode def visit_footnote_reference(self, node): raise nodes.SkipNode def visit_image(self, node): if node.hasattr('alt'): self.parent.append(nodes.Text(node['alt'])) raise nodes.SkipNode def ignore_node_but_process_children(self, node): raise nodes.SkipDeparture visit_interpreted = ignore_node_but_process_children visit_problematic = ignore_node_but_process_children visit_reference = ignore_node_but_process_children visit_target = ignore_node_but_process_children
gpl-3.0
chokribr/inveniotest
modules/miscutil/lib/upgrades/invenio_2013_03_28_bibindex_bibrank_type_index.py
17
1813
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from invenio.dbquery import \ run_sql, \ CFG_DATABASE_NAME depends_on = ['invenio_release_1_1_0'] def info(): return "Change of rnk*R and idx*R tables to add type index" def do_upgrade(): all_tables = [t[0] for t in run_sql("SHOW TABLES LIKE 'idx%R'")] + \ [t[0] for t in run_sql("SHOW TABLES LIKE 'rnk%R'")] for table in all_tables: create_statement = run_sql('SHOW CREATE TABLE %s' % table)[0][1] if 'KEY `type`' not in create_statement: run_sql("ALTER TABLE %s ADD INDEX type (type)" % (table,)) def estimate(): """ Estimate running time of upgrade in seconds (optional). """ count_rows = run_sql("SELECT SUM(TABLE_ROWS) FROM INFORMATION_SCHEMA.TABLES " "WHERE TABLE_SCHEMA = '%s' " "AND (TABLE_NAME like 'idx%%R' or TABLE_NAME like 'rnk%%R')" % (CFG_DATABASE_NAME,))[0][0] return count_rows / 1000 def pre_upgrade(): pass def post_upgrade(): pass
gpl-2.0
ageis/securedrop
securedrop/store.py
1
6181
# -*- coding: utf-8 -*- import os import re import config import zipfile import crypto_util import uuid import tempfile import subprocess from cStringIO import StringIO import gzip from werkzeug import secure_filename from secure_tempfile import SecureTemporaryFile import logging log = logging.getLogger(__name__) VALIDATE_FILENAME = re.compile( "^(?P<index>\d+)\-[a-z0-9-_]*(?P<file_type>msg|doc\.(gz|zip)|reply)\.gpg$").match class PathException(Exception): """An exception raised by `util.verify` when it encounters a bad path. A path can be bad when it is not absolute or not normalized. """ pass def verify(p): """Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and matches the filename format. """ if not os.path.isabs(config.STORE_DIR): raise PathException("config.STORE_DIR(%s) is not absolute" % ( config.STORE_DIR, )) # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to # '/bar', etc. We have to check that the path is normalized before checking # that it starts with the `config.STORE_DIR` or else a malicious actor could # append a bunch of '../../..' to access files outside of the store. if not p == os.path.abspath(p): raise PathException("The path is not absolute and/or normalized") # Check that the path p is in config.STORE_DIR if os.path.relpath(p, config.STORE_DIR).startswith('..'): raise PathException("Invalid directory %s" % (p, )) if os.path.isfile(p): filename = os.path.basename(p) ext = os.path.splitext(filename)[-1] if filename == '_FLAG': return True if ext != '.gpg': # if there's an extension, verify it's a GPG raise PathException("Invalid file extension %s" % (ext, )) if not VALIDATE_FILENAME(filename): raise PathException("Invalid filename %s" % (filename, )) def path(*s): """Get the normalized, absolute file path, within `config.STORE_DIR`.""" joined = os.path.join(os.path.abspath(config.STORE_DIR), *s) absolute = os.path.abspath(joined) verify(absolute) return absolute def get_bulk_archive(selected_submissions, zip_directory=''): """Generate a zip file from the selected submissions""" zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_', dir=config.TEMP_DIR, delete=False) sources = set([i.source.journalist_designation for i in selected_submissions]) # The below nested for-loops are there to create a more usable # folder structure per #383 with zipfile.ZipFile(zip_file, 'w') as zip: for source in sources: submissions = [s for s in selected_submissions if s.source.journalist_designation == source] for submission in submissions: filename = path(submission.source.filesystem_id, submission.filename) verify(filename) document_number = submission.filename.split('-')[0] zip.write(filename, arcname=os.path.join( zip_directory, source, "%s_%s" % (document_number, submission.source.last_updated.date()), os.path.basename(filename) )) return zip_file def save_file_submission(sid, count, journalist_filename, filename, stream): sanitized_filename = secure_filename(filename) # We store file submissions in a .gz file for two reasons: # # 1. Downloading large files over Tor is very slow. If we can # compress the file, we can speed up future downloads. # # 2. We want to record the original filename because it might be # useful, either for context about the content of the submission # or for figuring out which application should be used to open # it. However, we'd like to encrypt that info and have the # decrypted file automatically have the name of the original # file. Given various usability constraints in GPG and Tails, this # is the most user-friendly way we have found to do this. encrypted_file_name = "{0}-{1}-doc.gz.gpg".format( count, journalist_filename) encrypted_file_path = path(sid, encrypted_file_name) with SecureTemporaryFile("/tmp") as stf: with gzip.GzipFile(filename=sanitized_filename, mode='wb', fileobj=stf) as gzf: # Buffer the stream into the gzip file to avoid excessive # memory consumption while True: buf = stream.read(1024 * 8) if not buf: break gzf.write(buf) crypto_util.encrypt(stf, config.JOURNALIST_KEY, encrypted_file_path) return encrypted_file_name def save_message_submission(sid, count, journalist_filename, message): filename = "{0}-{1}-msg.gpg".format(count, journalist_filename) msg_loc = path(sid, filename) crypto_util.encrypt(message, config.JOURNALIST_KEY, msg_loc) return filename def rename_submission(sid, orig_filename, journalist_filename): check_submission_name = VALIDATE_FILENAME(orig_filename) if check_submission_name: parsed_filename = check_submission_name.groupdict() if parsed_filename.get('file_type'): new_filename = "{}-{}-{}.gpg".format( parsed_filename['index'], journalist_filename, parsed_filename['file_type']) try: os.rename(path(sid, orig_filename), path(sid, new_filename)) except OSError: pass else: return new_filename # Only return new filename if successful return orig_filename def secure_unlink(fn, recursive=False): verify(fn) command = ['srm'] if recursive: command.append('-r') command.append(fn) subprocess.check_call(command) return "success" def delete_source_directory(source_id): secure_unlink(path(source_id), recursive=True) return "success"
agpl-3.0
duguxy/pyqtgraph
pyqtgraph/python2_3.py
35
1500
""" Helper functions that smooth out the differences between python 2 and 3. """ import sys def asUnicode(x): if sys.version_info[0] == 2: if isinstance(x, unicode): return x elif isinstance(x, str): return x.decode('UTF-8') else: return unicode(x) else: return str(x) def cmpToKey(mycmp): 'Convert a cmp= function into a key= function' class K(object): def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) < 0 def __gt__(self, other): return mycmp(self.obj, other.obj) > 0 def __eq__(self, other): return mycmp(self.obj, other.obj) == 0 def __le__(self, other): return mycmp(self.obj, other.obj) <= 0 def __ge__(self, other): return mycmp(self.obj, other.obj) >= 0 def __ne__(self, other): return mycmp(self.obj, other.obj) != 0 return K def sortList(l, cmpFunc): if sys.version_info[0] == 2: l.sort(cmpFunc) else: l.sort(key=cmpToKey(cmpFunc)) if sys.version_info[0] == 3: basestring = str def cmp(a,b): if a>b: return 1 elif b > a: return -1 else: return 0 xrange = range else: import __builtin__ basestring = __builtin__.basestring cmp = __builtin__.cmp xrange = __builtin__.xrange
mit
jprawiharjo/Nerddit
Storm/Streaming/Push_to_Cassandra_Bolt.py
1
3976
# -*- coding: utf-8 -*- """ Created on Sat Jan 23 13:37:20 2016 @author: jprawiharjo """ from cassandra.cluster import Cluster import cassandra from collections import namedtuple from pyleus.storm import SimpleBolt from Streaming.Doc_Processor import DataFrame import logging log = logging.getLogger('cassandra_bolt') # create CassandraCluster CassandraCluster = Cluster(["ec2-52-27-157-187.us-west-2.compute.amazonaws.com", "ec2-52-34-178-13.us-west-2.compute.amazonaws.com", "ec2-52-35-186-215.us-west-2.compute.amazonaws.com", 'ec2-52-10-19-240.us-west-2.compute.amazonaws.com']) keyspace = 'wikidata' tablename = "titlelinks" class Push_to_Cassandra(SimpleBolt): def initialize(self): self.session = CassandraCluster.connect(keyspace) self.session.default_consistency_level = cassandra.ConsistencyLevel.ALL #self.session.encoder.mapping[tuple] = self.session.encoder.cql_encode_set_collection queryAddNew1 = "INSERT INTO {} (id, title, linksto) VALUES (?, ?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNew1 = self.session.prepare(queryAddNew1) queryAddNew2 = "INSERT INTO {} (id, title, linksto, referredby) VALUES (?, ?, ?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNew2 = self.session.prepare(queryAddNew2) queryUpdateReferredbyTitle = "UPDATE {} SET id = ?, linksto = ? WHERE title = ? IF EXISTS".format(tablename) self.preparedReferredbyTitle = self.session.prepare(queryUpdateReferredbyTitle) queryUpdateReferredbyOnly = "UPDATE {} SET referredby = referredby + ? WHERE title = ? IF EXISTS".format(tablename) self.preparedReferredbyOnly = self.session.prepare(queryUpdateReferredbyOnly) queryAddNewReferredBy = "INSERT INTO {} (title, referredby) VALUES (?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNewReferredBy = self.session.prepare(queryAddNewReferredBy) self.bulk_data = [] log.debug("Initialized") def process_tick(self): log.debug("Process Tick") log.debug(len(self.bulk_data)) linkage = {} for row in self.bulk_data: if len(row.Links) > 0: log.debug('Processing Links') for link in row.Links: if link in linkage.keys(): linkage[link].add(row.Title) else: linkage[link] = set([row.Title]) for row in self.bulk_data: log.debug(row.Title) if row.Title in linkage.keys(): bound1 = self.preparedAddNew2.bind((str(row.Id), str(row.Title), row.Links, linkage[row.Title])) else: bound1 = self.preparedAddNew1.bind((str(row.Id), str(row.Title), row.Links)) res = self.session.execute(bound1) res = res.current_rows[0].applied #log.debug("Insertion Result = " + str(res)) if not(res): bound2 = self.preparedReferredbyTitle.bind((str(row.Id), row.Links, str(row.Title))) self.session.execute_async(bound2) #Inserting into database for k,v in linkage.iteritems(): log.debug(k) log.debug(v) bound3 = self.preparedReferredbyOnly.bind((v, k)) res = self.session.execute(bound3) res = res.current_rows[0].applied if not(res): bound4 = self.preparedAddNewReferredBy.bind((k, v)) res = self.session.execute_async(bound4) self.bulk_data = [] def process_tuple(self, tup): result = DataFrame(*tup.values) self.bulk_data.append(result) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, filename='/tmp/cassandra_bolt.log', filemode='a', ) Push_to_Cassandra().run()
gpl-3.0
seasidesun/shadowsocks-bak
shadowsocks/local.py
1015
2248
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2012-2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import sys import os import logging import signal sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns def main(): shell.check_python() # fix py2exe if hasattr(sys, "frozen") and sys.frozen in \ ("windows_exe", "console_exe"): p = os.path.dirname(os.path.abspath(sys.executable)) os.chdir(p) config = shell.get_config(True) daemon.daemon_exec(config) try: logging.info("starting local at %s:%d" % (config['local_address'], config['local_port'])) dns_resolver = asyncdns.DNSResolver() tcp_server = tcprelay.TCPRelay(config, dns_resolver, True) udp_server = udprelay.UDPRelay(config, dns_resolver, True) loop = eventloop.EventLoop() dns_resolver.add_to_loop(loop) tcp_server.add_to_loop(loop) udp_server.add_to_loop(loop) def handler(signum, _): logging.warn('received SIGQUIT, doing graceful shutting down..') tcp_server.close(next_tick=True) udp_server.close(next_tick=True) signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler) def int_handler(signum, _): sys.exit(1) signal.signal(signal.SIGINT, int_handler) daemon.set_user(config.get('user', None)) loop.run() except Exception as e: shell.print_exception(e) sys.exit(1) if __name__ == '__main__': main()
apache-2.0
noironetworks/neutron
neutron/tests/unit/db/test_db_base_plugin_common.py
37
3192
# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import db_base_plugin_common from neutron.tests import base class DummyObject(object): def __init__(self, **kwargs): self.kwargs = kwargs def to_dict(self): return self.kwargs class ConvertToDictTestCase(base.BaseTestCase): @db_base_plugin_common.convert_result_to_dict def method_dict(self, fields=None): return DummyObject(one=1, two=2, three=3) @db_base_plugin_common.convert_result_to_dict def method_list(self): return [DummyObject(one=1, two=2, three=3)] * 3 def test_simple_object(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_list_of_objects(self): expected = [{'one': 1, 'two': 2, 'three': 3}] * 3 observed = self.method_list() self.assertEqual(expected, observed) class FilterFieldsTestCase(base.BaseTestCase): @db_base_plugin_common.filter_fields def method_dict(self, fields=None): return {'one': 1, 'two': 2, 'three': 3} @db_base_plugin_common.filter_fields def method_list(self, fields=None): return [self.method_dict() for _ in range(3)] @db_base_plugin_common.filter_fields def method_multiple_arguments(self, not_used, fields=None, also_not_used=None): return {'one': 1, 'two': 2, 'three': 3} def test_no_fields(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_dict(self): expected = {'two': 2} observed = self.method_dict(['two']) self.assertEqual(expected, observed) def test_list(self): expected = [{'two': 2}, {'two': 2}, {'two': 2}] observed = self.method_list(['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), ['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional_and_keywords(self): expected = {'two': 2} observed = self.method_multiple_arguments(fields=['two'], not_used=None) self.assertEqual(expected, observed) def test_multiple_arguments_keyword(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), fields=['two']) self.assertEqual(expected, observed)
apache-2.0
repotvsupertuga/tvsupertuga.repository
instal/script.module.liveresolver/lib/liveresolver/modules/f4mproxy/flvlib/constants.py
99
3998
""" The constants used in FLV files and their meanings. """ # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian", SOUND_FORMAT_ADPCM: "ADPCM", SOUND_FORMAT_MP3: "MP3", SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian", SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono", SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono", SOUND_FORMAT_NELLYMOSER: "Nellymoser", SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM", SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM", SOUND_FORMAT_AAC: "AAC", SOUND_FORMAT_SPEEX: "Speex", SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz", SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: "5.5-kHz", SOUND_RATE_11_KHZ: "11-kHz", SOUND_RATE_22_KHZ: "22-kHz", SOUND_RATE_44_KHZ: "44-kHz" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: "snd8Bit", SOUND_SIZE_16_BIT: "snd16Bit" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: "sndMono", SOUND_TYPE_STEREO: "sndStereo" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", AAC_PACKET_TYPE_RAW: "raw" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: "JPEG", CODEC_ID_H263: "Sorenson H.263", CODEC_ID_SCREEN_VIDEO: "Screen video", CODEC_ID_VP6: "On2 VP6", CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel", CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2", CODEC_ID_H264: "H.264" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: "keyframe", FRAME_TYPE_INTERFRAME: "interframe", FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe", FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe", FRAME_TYPE_INFO_FRAME: "video info/command frame" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", H264_PACKET_TYPE_NALU: "NAL unit", H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring' }
gpl-2.0
VRciF/springy
src/node_modules/node-ninja/gyp/pylib/gyp/generator/android.py
542
45270
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This generates makefiles suitable for inclusion into the Android build system # via an Android.mk file. It is based on make.py, the standard makefile # generator. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level GypAndroid.mk. This means that all # variables in .mk-files clobber one another, and furthermore that any # variables set potentially clash with other Android build system variables. # Try to avoid setting global variables where possible. import gyp import gyp.common import gyp.generator.make as make # Reuse global functions from make backend. import os import re import subprocess generator_default_variables = { 'OS': 'android', 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)', 'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)', 'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(RULE_SOURCES)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Generator-specific gyp specs. generator_additional_non_configuration_keys = [ # Boolean to declare that this target does not want its name mangled. 'android_unmangled_name', # Map of android build system variables to set. 'aosp_build_settings', ] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] ALL_MODULES_FOOTER = """\ # "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from # all the included sub-makefiles. This is just here to clarify. gyp_all_modules: """ header = """\ # This file is generated by gyp; do not edit. """ # Map gyp target types to Android module classes. MODULE_CLASSES = { 'static_library': 'STATIC_LIBRARIES', 'shared_library': 'SHARED_LIBRARIES', 'executable': 'EXECUTABLES', } def IsCPPExtension(ext): return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx' def Sourceify(path): """Convert a path to its source directory form. The Android backend does not support options.generator_output, so this function is a noop.""" return path # Map from qualified target to path to output. # For Android, the target of these maps is a tuple ('static', 'modulename'), # ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string, # since we link by module. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class AndroidMkWriter(object): """AndroidMkWriter packages up the writing of one target-specific Android.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, android_top_dir): self.android_top_dir = android_top_dir def Write(self, qualified_target, relative_target, base_path, output_filename, spec, configs, part_of_all, write_alias_target, sdk_version): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating relative_target: qualified target name relative to the root base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target sdk_version: what to emit for LOCAL_SDK_VERSION in output """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.relative_target = relative_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] self.android_class = MODULE_CLASSES.get(self.type, 'GYP') self.android_module = self.ComputeAndroidModule(spec) (self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec) self.output = self.output_binary = self.ComputeOutput(spec) # Standard header. self.WriteLn('include $(CLEAR_VARS)\n') # Module class and name. self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class) self.WriteLn('LOCAL_MODULE := ' + self.android_module) # Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE. # The library module classes fail if the stem is set. ComputeOutputParts # makes sure that stem == modulename in these cases. if self.android_stem != self.android_module: self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem) self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix) if self.toolset == 'host': self.WriteLn('LOCAL_IS_HOST_MODULE := true') self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)') else: self.WriteLn('LOCAL_MODULE_TARGET_ARCH := ' '$(TARGET_$(GYP_VAR_PREFIX)ARCH)') self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version) # Grab output directories; needed for Actions and Rules. if self.toolset == 'host': self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))') else: self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))') self.WriteLn('gyp_shared_intermediate_dir := ' '$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))') self.WriteLn() # List files this target depends on so that actions/rules/copies/sources # can depend on the list. # TODO: doesn't pull in things through transitive link deps; needed? target_dependencies = [x[1] for x in deps if x[0] == 'path'] self.WriteLn('# Make sure our deps are built first.') self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES', local_pathify=True) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs) # GYP generated outputs. self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True) # Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend # on both our dependency targets and our generated files. self.WriteLn('# Make sure our deps and generated files are built first.') self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) ' '$(GYP_GENERATED_OUTPUTS)') self.WriteLn() # Sources. if spec.get('sources', []) or extra_sources: self.WriteSources(spec, configs, extra_sources) self.WriteTarget(spec, configs, deps, link_deps, part_of_all, write_alias_target) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = ('path', self.output_binary) # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = ('static', self.android_module) elif self.type == 'shared_library': target_link_deps[qualified_target] = ('shared', self.android_module) self.fp.close() return self.android_module def WriteActions(self, actions, extra_sources, extra_outputs): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) """ for action in actions: name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Action for target "%s" writes output to local path ' '"%s".' % (self.target, out)) dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Prepare the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: quiet_cmd = 'Gyp action: %s ($@)' % action['message'] else: quiet_cmd = 'Gyp action: %s ($@)' % name if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the gyp_* # variables for the action rule with an absolute version so that the # output goes in the right place. # Only write the gyp_* rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. main_output = make.QuoteSpaces(self.LocalPathify(outputs[0])) self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # Android's envsetup.sh adds a number of directories to the path including # the built host binary directory. This causes actions/rules invoked by # gyp to sometimes use these instead of system versions, e.g. bison. # The built host binaries may not be suitable, and can cause errors. # So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable # set by envsetup. self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) # Don't allow spaces in input/output filenames, but make an exception for # filenames which start with '$(' since it's okay for there to be spaces # inside of make function/macro invocations. for input in inputs: if not input.startswith('$(') and ' ' in input: raise gyp.common.GypError( 'Action input filename "%s" in target %s contains a space' % (input, self.target)) for output in outputs: if not output.startswith('$(') and ' ' in output: raise gyp.common.GypError( 'Action output filename "%s" in target %s contains a space' % (output, self.target)) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, ' '.join(map(self.LocalPathify, inputs)))) self.WriteLn('\t@echo "%s"' % quiet_cmd) self.WriteLn('\t$(hide)%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output)) extra_outputs += outputs self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) """ if len(rules) == 0: return for rule in rules: if len(rule.get('rule_sources', [])) == 0: continue name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, rule['rule_name'])) self.WriteLn('\n### Generated for rule "%s":' % name) self.WriteLn('# "%s":' % rule) inputs = rule.get('inputs') for rule_source in rule.get('rule_sources', []): (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Rule for target %s writes output to local path %s' % (self.target, out)) dir = os.path.dirname(out) if dir: dirs.add(dir) extra_outputs += outputs if int(rule.get('process_outputs_as_sources', False)): extra_sources.extend(outputs) components = [] for component in rule['action']: component = self.ExpandInputRoot(component, rule_source_root, rule_source_dirname) if '$(RULE_SOURCES)' in component: component = component.replace('$(RULE_SOURCES)', rule_source) components.append(component) command = gyp.common.EncodePOSIXShellList(components) cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command if dirs: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # We set up a rule to build the first output, and then set up # a rule for each additional output to depend on the first. outputs = map(self.LocalPathify, outputs) main_output = outputs[0] self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # See explanation in WriteActions. self.WriteLn('%s: export PATH := ' '$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) main_output_deps = self.LocalPathify(rule_source) if inputs: main_output_deps += ' ' main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs]) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, main_output_deps)) self.WriteLn('\t%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (output, main_output)) self.WriteLn() self.WriteLn() def WriteCopies(self, copies, extra_outputs): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) """ self.WriteLn('### Generated for copy rule.') variable = make.StringToMakefileVariable(self.relative_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # The Android build system does not allow generation of files into the # source tree. The destination should start with a variable, which will # typically be $(gyp_intermediate_dir) or # $(gyp_shared_intermediate_dir). Note that we can't use an assertion # because some of the gyp tests depend on this. if not copy['destination'].startswith('$'): print ('WARNING: Copy rule for target %s writes output to ' 'local path %s' % (self.target, copy['destination'])) # LocalPathify() calls normpath, stripping trailing slashes. path = Sourceify(self.LocalPathify(path)) filename = os.path.split(path)[1] output = Sourceify(self.LocalPathify(os.path.join(copy['destination'], filename))) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' % (output, path)) self.WriteLn('\t@echo Copying: $@') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) $(ACP) -rpf $< $@') self.WriteLn() outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(make.QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSourceFlags(self, spec, configs): """Write out the flags and include paths used to compile source files for the current target. Args: spec, configs: input from gyp. """ for configname, config in sorted(configs.iteritems()): extracted_includes = [] self.WriteLn('\n# Flags passed to both C and C++ files.') cflags, includes_from_cflags = self.ExtractIncludesFromCFlags( config.get('cflags', []) + config.get('cflags_c', [])) extracted_includes.extend(includes_from_cflags) self.WriteList(cflags, 'MY_CFLAGS_%s' % configname) self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname, prefix='-D', quoter=make.EscapeCppDefine) self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS') includes = list(config.get('include_dirs', [])) includes.extend(extracted_includes) includes = map(Sourceify, map(self.LocalPathify, includes)) includes = self.NormalizeIncludePaths(includes) self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname) self.WriteLn('\n# Flags passed to only C++ (and not C) files.') self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname) self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) ' '$(MY_DEFS_$(GYP_CONFIGURATION))') # Undefine ANDROID for host modules # TODO: the source code should not use macro ANDROID to tell if it's host # or target module. if self.toolset == 'host': self.WriteLn('# Undefine ANDROID for host modules') self.WriteLn('LOCAL_CFLAGS += -UANDROID') self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) ' '$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))') self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))') # Android uses separate flags for assembly file invocations, but gyp expects # the same CFLAGS to be applied: self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') def WriteSources(self, spec, configs, extra_sources): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. We need to handle shared_intermediate directory source files as a special case by copying them to the intermediate directory and treating them as a genereated sources. Otherwise the Android build rules won't pick them up. Args: spec, configs: input from gyp. extra_sources: Sources generated from Actions or Rules. """ sources = filter(make.Compilable, spec.get('sources', [])) generated_not_sources = [x for x in extra_sources if not make.Compilable(x)] extra_sources = filter(make.Compilable, extra_sources) # Determine and output the C++ extension used by these sources. # We simply find the first C++ file and use that extension. all_sources = sources + extra_sources local_cpp_extension = '.cpp' for source in all_sources: (root, ext) = os.path.splitext(source) if IsCPPExtension(ext): local_cpp_extension = ext break if local_cpp_extension != '.cpp': self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension) # We need to move any non-generated sources that are coming from the # shared intermediate directory out of LOCAL_SRC_FILES and put them # into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files # that don't match our local_cpp_extension, since Android will only # generate Makefile rules for a single LOCAL_CPP_EXTENSION. local_files = [] for source in sources: (root, ext) = os.path.splitext(source) if '$(gyp_shared_intermediate_dir)' in source: extra_sources.append(source) elif '$(gyp_intermediate_dir)' in source: extra_sources.append(source) elif IsCPPExtension(ext) and ext != local_cpp_extension: extra_sources.append(source) else: local_files.append(os.path.normpath(os.path.join(self.path, source))) # For any generated source, if it is coming from the shared intermediate # directory then we add a Make rule to copy them to the local intermediate # directory first. This is because the Android LOCAL_GENERATED_SOURCES # must be in the local module intermediate directory for the compile rules # to work properly. If the file has the wrong C++ extension, then we add # a rule to copy that to intermediates and use the new version. final_generated_sources = [] # If a source file gets copied, we still need to add the orginal source # directory as header search path, for GCC searches headers in the # directory that contains the source file by default. origin_src_dirs = [] for source in extra_sources: local_file = source if not '$(gyp_intermediate_dir)/' in local_file: basename = os.path.basename(local_file) local_file = '$(gyp_intermediate_dir)/' + basename (root, ext) = os.path.splitext(local_file) if IsCPPExtension(ext) and ext != local_cpp_extension: local_file = root + local_cpp_extension if local_file != source: self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source))) self.WriteLn('\tmkdir -p $(@D); cp $< $@') origin_src_dirs.append(os.path.dirname(source)) final_generated_sources.append(local_file) # We add back in all of the non-compilable stuff to make sure that the # make rules have dependencies on them. final_generated_sources.extend(generated_not_sources) self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES') origin_src_dirs = gyp.common.uniquer(origin_src_dirs) origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs)) self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS') self.WriteList(local_files, 'LOCAL_SRC_FILES') # Write out the flags used to compile the source; this must be done last # so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path. self.WriteSourceFlags(spec, configs) def ComputeAndroidModule(self, spec): """Return the Android module name used for a gyp spec. We use the complete qualified target name to avoid collisions between duplicate targets in different directories. We also add a suffix to distinguish gyp-generated module names. """ if int(spec.get('android_unmangled_name', 0)): assert self.type != 'shared_library' or self.target.startswith('lib') return self.target if self.type == 'shared_library': # For reasons of convention, the Android build system requires that all # shared library modules are named 'libfoo' when generating -l flags. prefix = 'lib_' else: prefix = '' if spec['toolset'] == 'host': suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp' else: suffix = '_gyp' if self.path: middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target)) else: middle = make.StringToMakefileVariable(self.target) return ''.join([prefix, middle, suffix]) def ComputeOutputParts(self, spec): """Return the 'output basename' of a gyp spec, split into filename + ext. Android libraries must be named the same thing as their module name, otherwise the linker can't find them, so product_name and so on must be ignored if we are building a library, and the "lib" prepending is not done for Android. """ assert self.type != 'loadable_module' # TODO: not supported? target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': target = self.ComputeAndroidModule(spec) target_ext = '.a' elif self.type == 'shared_library': target = self.ComputeAndroidModule(spec) target_ext = '.so' elif self.type == 'none': target_ext = '.stamp' elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) if self.type != 'static_library' and self.type != 'shared_library': target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext target_stem = target_prefix + target return (target_stem, target_ext) def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ return ''.join(self.ComputeOutputParts(spec)) def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = ('$(call intermediates-dir-for,%s,%s,true,,' '$(GYP_HOST_VAR_PREFIX))' % (self.android_class, self.android_module)) else: path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))' % (self.android_class, self.android_module)) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec)) def NormalizeIncludePaths(self, include_paths): """ Normalize include_paths. Convert absolute paths to relative to the Android top directory. Args: include_paths: A list of unprocessed include paths. Returns: A list of normalized include paths. """ normalized = [] for path in include_paths: if path[0] == '/': path = gyp.common.RelativePath(path, self.android_top_dir) normalized.append(path) return normalized def ExtractIncludesFromCFlags(self, cflags): """Extract includes "-I..." out from cflags Args: cflags: A list of compiler flags, which may be mixed with "-I.." Returns: A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed. """ clean_cflags = [] include_paths = [] for flag in cflags: if flag.startswith('-I'): include_paths.append(flag[2:]) else: clean_cflags.append(flag) return (clean_cflags, include_paths) def FilterLibraries(self, libraries): """Filter the 'libraries' key to separate things that shouldn't be ldflags. Library entries that look like filenames should be converted to android module names instead of being passed to the linker as flags. Args: libraries: the value of spec.get('libraries') Returns: A tuple (static_lib_modules, dynamic_lib_modules, ldflags) """ static_lib_modules = [] dynamic_lib_modules = [] ldflags = [] for libs in libraries: # Libs can have multiple words. for lib in libs.split(): # Filter the system libraries, which are added by default by the Android # build system. if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or lib.endswith('libgcc.a')): continue match = re.search(r'([^/]+)\.a$', lib) if match: static_lib_modules.append(match.group(1)) continue match = re.search(r'([^/]+)\.so$', lib) if match: dynamic_lib_modules.append(match.group(1)) continue if lib.startswith('-l'): ldflags.append(lib) return (static_lib_modules, dynamic_lib_modules, ldflags) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTargetFlags(self, spec, configs, link_deps): """Write Makefile code to specify the link flags and library dependencies. spec, configs: input from gyp. link_deps: link dependency list; see ComputeDeps() """ # Libraries (i.e. -lfoo) # These must be included even for static libraries as some of them provide # implicit include paths through the build system. libraries = gyp.common.uniquer(spec.get('libraries', [])) static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries) if self.type != 'static_library': for configname, config in sorted(configs.iteritems()): ldflags = list(config.get('ldflags', [])) self.WriteLn('') self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname) self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS') self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) ' '$(LOCAL_GYP_LIBS)') # Link dependencies (i.e. other gyp targets this target depends on) # These need not be included for static libraries as within the gyp build # we do not use the implicit include path mechanism. if self.type != 'static_library': static_link_deps = [x[1] for x in link_deps if x[0] == 'static'] shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared'] else: static_link_deps = [] shared_link_deps = [] # Only write the lists if they are non-empty. if static_libs or static_link_deps: self.WriteLn('') self.WriteList(static_libs + static_link_deps, 'LOCAL_STATIC_LIBRARIES') self.WriteLn('# Enable grouping to fix circular references') self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true') if dynamic_libs or shared_link_deps: self.WriteLn('') self.WriteList(dynamic_libs + shared_link_deps, 'LOCAL_SHARED_LIBRARIES') def WriteTarget(self, spec, configs, deps, link_deps, part_of_all, write_alias_target): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ self.WriteLn('### Rules for final target.') if self.type != 'none': self.WriteTargetFlags(spec, configs, link_deps) settings = spec.get('aosp_build_settings', {}) if settings: self.WriteLn('### Set directly by aosp_build_settings.') for k, v in settings.iteritems(): if isinstance(v, list): self.WriteList(v, k) else: self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v))) self.WriteLn('') # Add to the set of targets which represent the gyp 'all' target. We use the # name 'gyp_all_modules' as the Android build system doesn't allow the use # of the Make target 'all' and because 'all_modules' is the equivalent of # the Make target 'all' on Android. if part_of_all and write_alias_target: self.WriteLn('# Add target alias to "gyp_all_modules" target.') self.WriteLn('.PHONY: gyp_all_modules') self.WriteLn('gyp_all_modules: %s' % self.android_module) self.WriteLn('') # Add an alias from the gyp target name to the Android module name. This # simplifies manual builds of the target, and is required by the test # framework. if self.target != self.android_module and write_alias_target: self.WriteLn('# Alias gyp target name.') self.WriteLn('.PHONY: %s' % self.target) self.WriteLn('%s: %s' % (self.target, self.android_module)) self.WriteLn('') # Add the command to trigger build of the target type depending # on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY # NOTE: This has to come last! modifier = '' if self.toolset == 'host': modifier = 'HOST_' if self.type == 'static_library': self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier) elif self.type == 'shared_library': self.WriteLn('LOCAL_PRELINK_MODULE := false') self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier) elif self.type == 'executable': # Executables are for build and test purposes only, so they're installed # to a directory that doesn't get included in the system image. self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)') self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier) else: self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp') self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true') if self.toolset == 'target': self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)') else: self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)') self.WriteLn() self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk') self.WriteLn() self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)') self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) touch $@') self.WriteLn() self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') def WriteList(self, value_list, variable=None, prefix='', quoter=make.QuoteIfNecessary, local_pathify=False): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] if local_pathify: value_list = [self.LocalPathify(l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteLn(self, text=''): self.fp.write(text + '\n') def LocalPathify(self, path): """Convert a subdirectory-relative path into a normalized path which starts with the make variable $(LOCAL_PATH) (i.e. the top of the project tree). Absolute paths, or paths that contain variables, are just normalized.""" if '$(' in path or os.path.isabs(path): # path is not a file in the project tree in this case, but calling # normpath is still important for trimming trailing slashes. return os.path.normpath(path) local_path = os.path.join('$(LOCAL_PATH)', self.path, path) local_path = os.path.normpath(local_path) # Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH) # - i.e. that the resulting path is still inside the project tree. The # path may legitimately have ended up containing just $(LOCAL_PATH), though, # so we don't look for a slash. assert local_path.startswith('$(LOCAL_PATH)'), ( 'Path %s attempts to escape from gyp path %s !)' % (path, self.path)) return local_path def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return os.path.normpath(path) def PerformBuild(data, configurations, params): # The android backend only supports the default configuration. options = params['options'] makefile = os.path.abspath(os.path.join(options.toplevel_dir, 'GypAndroid.mk')) env = dict(os.environ) env['ONE_SHOT_MAKEFILE'] = makefile arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules'] print 'Building: %s' % arguments subprocess.check_call(arguments, env=env) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') limit_to_target_all = generator_flags.get('limit_to_target_all', False) write_alias_targets = generator_flags.get('write_alias_targets', True) sdk_version = generator_flags.get('aosp_sdk_version', 19) android_top_dir = os.environ.get('ANDROID_BUILD_TOP') assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.' def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'GypAndroid' + options.suffix + '.mk' makefile_path = os.path.join(options.toplevel_dir, makefile_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(header) # We set LOCAL_PATH just once, here, to the top of the project tree. This # allows all the other paths we use to be relative to the Android.mk file, # as the Android build system expects. root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n') # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() android_modules = {} for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) relative_build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) build_files.add(relative_build_file) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] part_of_all = qualified_target in needed_targets if limit_to_target_all and not part_of_all: continue relative_target = gyp.common.QualifiedTarget(relative_build_file, target, toolset) writer = AndroidMkWriter(android_top_dir) android_module = writer.Write(qualified_target, relative_target, base_path, output_file, spec, configs, part_of_all=part_of_all, write_alias_target=write_alias_targets, sdk_version=sdk_version) if android_module in android_modules: print ('ERROR: Android module names must be unique. The following ' 'targets both generate Android module name %s.\n %s\n %s' % (android_module, android_modules[android_module], qualified_target)) return android_modules[android_module] = qualified_target # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration) root_makefile.write('GYP_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_MULTILIB ?=\n') # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n') root_makefile.write('\n') if write_alias_targets: root_makefile.write(ALL_MODULES_FOOTER) root_makefile.close()
gpl-3.0
Mellthas/quodlibet
quodlibet/quodlibet/mmkeys/__init__.py
2
3760
# Copyright 2014 Christoph Reiter # 2018 Ludovic Druette # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from quodlibet import config from quodlibet.util import print_d from ._base import MMKeysAction, MMKeysImportError def iter_backends(): if config.getboolean("settings", "disable_mmkeys"): return try: from .gnome import GnomeBackend, GnomeBackendOldName, MateBackend except MMKeysImportError: pass else: yield GnomeBackend yield GnomeBackendOldName yield MateBackend try: from .keybinder import KeybinderBackend except MMKeysImportError: pass else: yield KeybinderBackend try: from .winhook import WinHookBackend except MMKeysImportError: pass else: yield WinHookBackend try: from .osx import OSXBackend except MMKeysImportError: pass else: yield OSXBackend def find_active_backend(): print_d("Trying to find a mmkeys backend") for backend in iter_backends(): if backend.is_active(): print_d("Found %r" % backend.__name__) return backend class MMKeysHandler(object): """Manages multiple keybinding backends and translates the generated events to actions on the player backend. """ def __init__(self, app): self._backend = None self._window = app.window self._player = app.player self._player_options = app.player_options self._app_name = app.name def start(self): kind = find_active_backend() if not kind: return self._backend = kind(self._app_name, self._callback) # grab on start for cases when the window is hidden on start self._backend.grab() self._window.connect("notify::is-active", self._focus_event) def quit(self): if self._backend: self._backend.cancel() self._backend = None self._window = None self._player = None def _focus_event(self, window, param): if window.get_property(param.name) and self._backend: self._backend.grab() def _callback(self, action): print_d("Event %r from %r" % (action, type(self._backend).__name__)) def seek_relative(seconds): current = player.get_position() current += seconds * 1000 current = min(player.song("~#length") * 1000 - 1, current) current = max(0, current) player.seek(current) player = self._player player_options = self._player_options if action == MMKeysAction.PREV: player.previous(force=True) elif action == MMKeysAction.NEXT: player.next() elif action == MMKeysAction.STOP: player.stop() elif action == MMKeysAction.PLAY: player.play() elif action == MMKeysAction.PLAYPAUSE: player.playpause() elif action == MMKeysAction.PAUSE: player.paused = True elif action == MMKeysAction.FORWARD: if player.song: seek_relative(10) elif action == MMKeysAction.REWIND: if player.song: seek_relative(-10) elif action == MMKeysAction.REPEAT: player_options.repeat = not player_options.repeat elif action == MMKeysAction.SHUFFLE: player_options.shuffle = not player_options.shuffle else: assert 0, "unhandled event"
gpl-2.0
codrut3/tensorflow
tensorflow/python/framework/op_def_registry.py
196
1428
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Global registry for OpDefs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import op_def_pb2 _registered_ops = {} def register_op_list(op_list): """Register all the ops in an op_def_pb2.OpList.""" if not isinstance(op_list, op_def_pb2.OpList): raise TypeError("%s is %s, not an op_def_pb2.OpList" % (op_list, type(op_list))) for op_def in op_list.op: if op_def.name in _registered_ops: assert _registered_ops[op_def.name] == op_def else: _registered_ops[op_def.name] = op_def def get_registered_ops(): """Returns a dictionary mapping names to OpDefs.""" return _registered_ops
apache-2.0
danakj/chromium
third_party/closure_compiler/processor_test.py
56
3825
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Test resources processing, i.e. <if> and <include> tag handling.""" import unittest from processor import FileCache, Processor, LineNumber class ProcessorTest(unittest.TestCase): """Test <include> tag processing logic.""" def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.maxDiff = None def setUp(self): FileCache._cache["/debug.js"] = """ // Copyright 2002 Older Chromium Author dudes. function debug(msg) { if (window.DEBUG) alert(msg); } """.strip() FileCache._cache["/global.js"] = """ // Copyright 2014 Old Chromium Author dudes. <include src="/debug.js"> var global = 'type checking!'; """.strip() FileCache._cache["/checked.js"] = """ // Copyright 2028 Future Chromium Author dudes. /** * @fileoverview Coolest app ever. * @author Douglas Crockford (douglas@crockford.com) */ <include src="/global.js"> debug(global); // Here continues checked.js, a swell file. """.strip() FileCache._cache["/double-debug.js"] = """ <include src="/debug.js"> <include src="/debug.js"> """.strip() self._processor = Processor("/checked.js") def testInline(self): self.assertMultiLineEqual(""" // Copyright 2028 Future Chromium Author dudes. /** * @fileoverview Coolest app ever. * @author Douglas Crockford (douglas@crockford.com) */ // Copyright 2014 Old Chromium Author dudes. // Copyright 2002 Older Chromium Author dudes. function debug(msg) { if (window.DEBUG) alert(msg); } var global = 'type checking!'; debug(global); // Here continues checked.js, a swell file. """.strip(), self._processor.contents) def assertLineNumber(self, abs_line, expected_line): actual_line = self._processor.get_file_from_line(abs_line) self.assertEqual(expected_line.file, actual_line.file) self.assertEqual(expected_line.line_number, actual_line.line_number) def testGetFileFromLine(self): """Verify that inlined files retain their original line info.""" self.assertLineNumber(1, LineNumber("/checked.js", 1)) self.assertLineNumber(5, LineNumber("/checked.js", 5)) self.assertLineNumber(6, LineNumber("/global.js", 1)) self.assertLineNumber(7, LineNumber("/debug.js", 1)) self.assertLineNumber(8, LineNumber("/debug.js", 2)) self.assertLineNumber(9, LineNumber("/global.js", 3)) self.assertLineNumber(10, LineNumber("/checked.js", 7)) self.assertLineNumber(11, LineNumber("/checked.js", 8)) def testIncludedFiles(self): """Verify that files are tracked correctly as they're inlined.""" self.assertEquals(set(["/global.js", "/debug.js"]), self._processor.included_files) def testDoubleIncludedSkipped(self): """Verify that doubly included files are skipped.""" processor = Processor("/double-debug.js") self.assertEquals(set(["/debug.js"]), processor.included_files) self.assertEquals(FileCache.read("/debug.js") + "\n", processor.contents) class IfStrippingTest(unittest.TestCase): """Test that the contents of XML <if> blocks are stripped.""" def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.maxDiff = None def setUp(self): FileCache._cache["/century.js"] = """ function getCurrentCentury() { <if expr="netscape_os"> alert("Oh wow!"); return "XX"; </if> return "XXI"; } """.strip() self.processor_ = Processor("/century.js") def testIfStripping(self): self.assertMultiLineEqual(""" function getCurrentCentury() { alert("Oh wow!"); return "XX"; return "XXI"; } """.strip(), self.processor_.contents) if __name__ == '__main__': unittest.main()
bsd-3-clause
googleapis/googleapis-gen
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/placeholder_type.py
1
1630
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v7.enums', marshal='google.ads.googleads.v7', manifest={ 'PlaceholderTypeEnum', }, ) class PlaceholderTypeEnum(proto.Message): r"""Container for enum describing possible placeholder types for a feed mapping. """ class PlaceholderType(proto.Enum): r"""Possible placeholder types for a feed mapping.""" UNSPECIFIED = 0 UNKNOWN = 1 SITELINK = 2 CALL = 3 APP = 4 LOCATION = 5 AFFILIATE_LOCATION = 6 CALLOUT = 7 STRUCTURED_SNIPPET = 8 MESSAGE = 9 PRICE = 10 PROMOTION = 11 AD_CUSTOMIZER = 12 DYNAMIC_EDUCATION = 13 DYNAMIC_FLIGHT = 14 DYNAMIC_CUSTOM = 15 DYNAMIC_HOTEL = 16 DYNAMIC_REAL_ESTATE = 17 DYNAMIC_TRAVEL = 18 DYNAMIC_LOCAL = 19 DYNAMIC_JOB = 20 IMAGE = 21 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
amchoukir/YouCompleteMe
python/ycm/client/omni_completion_request.py
48
1204
#!/usr/bin/env python # # Copyright (C) 2013 Google Inc. # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. from ycm.client.completion_request import CompletionRequest class OmniCompletionRequest( CompletionRequest ): def __init__( self, omni_completer, request_data ): super( OmniCompletionRequest, self ).__init__( request_data ) self._omni_completer = omni_completer def Start( self ): self._results = self._omni_completer.ComputeCandidates( self.request_data ) def Done( self ): return True def Response( self ): return self._results
gpl-3.0
GehenHe/Recognize-Face-on-Android
tensorflow/contrib/training/python/training/failure_tolerator.py
72
4450
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A retry helper for tolerating transient failures in distributed training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import time from tensorflow.python.framework import errors from tensorflow.python.platform import tf_logging as logging class FailureTolerator(object): """Helper for tolerating certain exceptions. When encountering a handled exception inside tolerator.forgive(), it is suppressed (but logged). A subsequent call to tolerator.forgive() will sleep for a period of time before continuing, with exponential backoff on multiple exceptions. (The delay avoids retrying too quickly -- a subsequent attempt will often only succeed after a transient failure has resolved itself.) If more than `limit` exceptions have been encountered, the error will not be suppressed. Exceptions occurring more than `forgive_after_seconds` ago (excluding time spent waiting between retries) are forgiven and no longer count towards the limit. An example loop using FailureTolerator to retry until a successful `session.run(...)` would look like: ``` failure_tolerator = FailureTolerator() while True: with failure_tolerator.forgive(): session = make_session_somehow() while not should_stop(): session.run(...) break # session.run was successful ``` By using FailureTolerator, failures are logged, there are delays between retries, and there's a ceiling on the maximum number of retries available. (In the case of persistent errors, the task won't just loop forever!) """ def __init__(self, limit=5, init_delay=5.0, backoff_factor=2.0, forgive_after_seconds=6000, handled_exceptions=None): """Creates a FailureTolerator. The result will pause for `init_delay * (backoff_factor^(failure_count-1))` when re-entering `forgive()` after a failure. Args: limit: The maximum number of suppressed, unforgiven, failures. init_delay: How long to pause once the first failure is encountered. Defaults to five seconds. backoff_factor: Each subsequent failure grows the pause by this factor. forgive_after_seconds: Failures older than this are forgiven. handled_exceptions: The exceptions to forgive. Defaults to `(errors.AbortedError,)`. """ self.limit = limit self.backoff = backoff_factor self.delay = init_delay self.forgive_after = forgive_after_seconds self.exceptions = [] self.time_in_delay = 0.0 if handled_exceptions is None: self.handled = (errors.AbortedError,) else: self.handled = tuple(handled_exceptions) def _adjusted_now(self): """Returns what the current time would be if no delays had occurred.""" return time.time() - self.time_in_delay def _forgive_old(self): adjusted_now = self._adjusted_now() self.exceptions = [t for t in self.exceptions if (adjusted_now - t) < self.forgive_after] def _handle_error(self, e): if not isinstance(e, self.handled): return True self._forgive_old() self.exceptions.append(self._adjusted_now()) return len(self.exceptions) >= self.limit # pylint: disable=broad-except @contextlib.contextmanager def forgive(self): self._forgive_old() if self.exceptions: delay = self.delay * (self.backoff ** (len(self.exceptions) - 1)) logging.warning('Sleeping for %f seconds before resuming' % delay) time.sleep(delay) self.time_in_delay += delay try: yield except Exception as e: if self._handle_error(e): raise else: logging.warning('Forgiving an exception', exc_info=True)
apache-2.0
phammin1/QaManagement
QaManagement/env/Lib/site-packages/django/views/decorators/debug.py
712
2627
import functools from django.http import HttpRequest def sensitive_variables(*variables): """ Indicates which variables used in the decorated function are sensitive, so that those variables can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified variable names: @sensitive_variables('user', 'password', 'credit_card') def my_function(user): password = user.pass_word credit_card = user.credit_card_number ... * without any specified variable names, in which case it is assumed that all variables are considered sensitive: @sensitive_variables() def my_function() ... """ def decorator(func): @functools.wraps(func) def sensitive_variables_wrapper(*func_args, **func_kwargs): if variables: sensitive_variables_wrapper.sensitive_variables = variables else: sensitive_variables_wrapper.sensitive_variables = '__ALL__' return func(*func_args, **func_kwargs) return sensitive_variables_wrapper return decorator def sensitive_post_parameters(*parameters): """ Indicates which POST parameters used in the decorated view are sensitive, so that those parameters can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified parameters: @sensitive_post_parameters('password', 'credit_card') def my_view(request): pw = request.POST['password'] cc = request.POST['credit_card'] ... * without any specified parameters, in which case it is assumed that all parameters are considered sensitive: @sensitive_post_parameters() def my_view(request) ... """ def decorator(view): @functools.wraps(view) def sensitive_post_parameters_wrapper(request, *args, **kwargs): assert isinstance(request, HttpRequest), ( "sensitive_post_parameters didn't receive an HttpRequest. " "If you are decorating a classmethod, be sure to use " "@method_decorator." ) if parameters: request.sensitive_post_parameters = parameters else: request.sensitive_post_parameters = '__ALL__' return view(request, *args, **kwargs) return sensitive_post_parameters_wrapper return decorator
mit
DJMuggs/ansible-modules-extras
network/f5/bigip_facts.py
8
61784
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Matt Hite <mhite@hotmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: bigip_facts short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" version_added: "1.6" author: '"Matt Hite (@mhite)" <mhite@hotmail.com>' notes: - "Requires BIG-IP software version >= 11.4" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - "Best run as a local_action in your playbook" - "Tested with manager and above account privilege level" requirements: - bigsuds options: server: description: - BIG-IP host required: true default: null choices: [] aliases: [] user: description: - BIG-IP username required: true default: null choices: [] aliases: [] password: description: - BIG-IP password required: true default: null choices: [] aliases: [] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 2.0 session: description: - BIG-IP session support; may be useful to avoid concurrency issues in certain circumstances. required: false default: true choices: [] aliases: [] include: description: - Fact category or list of categories to collect required: true default: null choices: ['address_class', 'certificate', 'client_ssl_profile', 'device', 'device_group', 'interface', 'key', 'node', 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan'] aliases: [] filter: description: - Shell-style glob matching string used to filter fact keys. Not applicable for software and system_info fact categories. required: false default: null choices: [] aliases: [] ''' EXAMPLES = ''' ## playbook task examples: --- # file bigip-test.yml # ... - hosts: bigip-test tasks: - name: Collect BIG-IP facts local_action: > bigip_facts server=lb.mydomain.com user=admin password=mysecret include=interface,vlan ''' try: import bigsuds from suds import MethodNotFound, WebFault except ImportError: bigsuds_found = False else: bigsuds_found = True import fnmatch import traceback import re # =========================================== # bigip_facts module specific support methods. # class F5(object): """F5 iControl class. F5 BIG-IP iControl API class. Attributes: api: iControl API instance. """ def __init__(self, host, user, password, session=False): self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) if session: self.start_session() def start_session(self): self.api = self.api.with_session_id() def get_api(self): return self.api def set_recursive_query_state(self, state): self.api.System.Session.set_recursive_query_state(state) def get_recursive_query_state(self): return self.api.System.Session.get_recursive_query_state() def enable_recursive_query_state(self): self.set_recursive_query_state('STATE_ENABLED') def disable_recursive_query_state(self): self.set_recursive_query_state('STATE_DISABLED') def set_active_folder(self, folder): self.api.System.Session.set_active_folder(folder=folder) def get_active_folder(self): return self.api.System.Session.get_active_folder() class Interfaces(object): """Interfaces class. F5 BIG-IP interfaces class. Attributes: api: iControl API instance. interfaces: A list of BIG-IP interface names. """ def __init__(self, api, regex=None): self.api = api self.interfaces = api.Networking.Interfaces.get_list() if regex: re_filter = re.compile(regex) self.interfaces = filter(re_filter.search, self.interfaces) def get_list(self): return self.interfaces def get_active_media(self): return self.api.Networking.Interfaces.get_active_media(self.interfaces) def get_actual_flow_control(self): return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) def get_bundle_state(self): return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) def get_description(self): return self.api.Networking.Interfaces.get_description(self.interfaces) def get_dual_media_state(self): return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) def get_enabled_state(self): return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) def get_if_index(self): return self.api.Networking.Interfaces.get_if_index(self.interfaces) def get_learning_mode(self): return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) def get_lldp_admin_status(self): return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) def get_lldp_tlvmap(self): return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) def get_mac_address(self): return self.api.Networking.Interfaces.get_mac_address(self.interfaces) def get_media(self): return self.api.Networking.Interfaces.get_media(self.interfaces) def get_media_option(self): return self.api.Networking.Interfaces.get_media_option(self.interfaces) def get_media_option_sfp(self): return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) def get_media_sfp(self): return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) def get_media_speed(self): return self.api.Networking.Interfaces.get_media_speed(self.interfaces) def get_media_status(self): return self.api.Networking.Interfaces.get_media_status(self.interfaces) def get_mtu(self): return self.api.Networking.Interfaces.get_mtu(self.interfaces) def get_phy_master_slave_mode(self): return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) def get_prefer_sfp_state(self): return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) def get_flow_control(self): return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) def get_sflow_poll_interval(self): return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) def get_sflow_poll_interval_global(self): return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) def get_sfp_media_state(self): return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) def get_stp_active_edge_port_state(self): return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) def get_stp_enabled_state(self): return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) def get_stp_link_type(self): return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) def get_stp_protocol_detection_reset_state(self): return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) class SelfIPs(object): """Self IPs class. F5 BIG-IP Self IPs class. Attributes: api: iControl API instance. self_ips: List of self IPs. """ def __init__(self, api, regex=None): self.api = api self.self_ips = api.Networking.SelfIPV2.get_list() if regex: re_filter = re.compile(regex) self.self_ips = filter(re_filter.search, self.self_ips) def get_list(self): return self.self_ips def get_address(self): return self.api.Networking.SelfIPV2.get_address(self.self_ips) def get_allow_access_list(self): return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) def get_description(self): return self.api.Networking.SelfIPV2.get_description(self.self_ips) def get_enforced_firewall_policy(self): return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) def get_floating_state(self): return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) def get_fw_rule(self): return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) def get_netmask(self): return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) def get_staged_firewall_policy(self): return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) def get_traffic_group(self): return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) def get_vlan(self): return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) def get_is_traffic_group_inherited(self): return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) class Trunks(object): """Trunks class. F5 BIG-IP trunks class. Attributes: api: iControl API instance. trunks: List of trunks. """ def __init__(self, api, regex=None): self.api = api self.trunks = api.Networking.Trunk.get_list() if regex: re_filter = re.compile(regex) self.trunks = filter(re_filter.search, self.trunks) def get_list(self): return self.trunks def get_active_lacp_state(self): return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) def get_configured_member_count(self): return self.api.Networking.Trunk.get_configured_member_count(self.trunks) def get_description(self): return self.api.Networking.Trunk.get_description(self.trunks) def get_distribution_hash_option(self): return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) def get_interface(self): return self.api.Networking.Trunk.get_interface(self.trunks) def get_lacp_enabled_state(self): return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) def get_lacp_timeout_option(self): return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) def get_link_selection_policy(self): return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) def get_media_speed(self): return self.api.Networking.Trunk.get_media_speed(self.trunks) def get_media_status(self): return self.api.Networking.Trunk.get_media_status(self.trunks) def get_operational_member_count(self): return self.api.Networking.Trunk.get_operational_member_count(self.trunks) def get_stp_enabled_state(self): return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) def get_stp_protocol_detection_reset_state(self): return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) class Vlans(object): """Vlans class. F5 BIG-IP Vlans class. Attributes: api: iControl API instance. vlans: List of VLANs. """ def __init__(self, api, regex=None): self.api = api self.vlans = api.Networking.VLAN.get_list() if regex: re_filter = re.compile(regex) self.vlans = filter(re_filter.search, self.vlans) def get_list(self): return self.vlans def get_auto_lasthop(self): return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) def get_cmp_hash_algorithm(self): return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) def get_description(self): return self.api.Networking.VLAN.get_description(self.vlans) def get_dynamic_forwarding(self): return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) def get_failsafe_action(self): return self.api.Networking.VLAN.get_failsafe_action(self.vlans) def get_failsafe_state(self): return self.api.Networking.VLAN.get_failsafe_state(self.vlans) def get_failsafe_timeout(self): return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) def get_if_index(self): return self.api.Networking.VLAN.get_if_index(self.vlans) def get_learning_mode(self): return self.api.Networking.VLAN.get_learning_mode(self.vlans) def get_mac_masquerade_address(self): return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) def get_member(self): return self.api.Networking.VLAN.get_member(self.vlans) def get_mtu(self): return self.api.Networking.VLAN.get_mtu(self.vlans) def get_sflow_poll_interval(self): return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) def get_sflow_poll_interval_global(self): return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) def get_sflow_sampling_rate(self): return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) def get_sflow_sampling_rate_global(self): return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) def get_source_check_state(self): return self.api.Networking.VLAN.get_source_check_state(self.vlans) def get_true_mac_address(self): return self.api.Networking.VLAN.get_true_mac_address(self.vlans) def get_vlan_id(self): return self.api.Networking.VLAN.get_vlan_id(self.vlans) class Software(object): """Software class. F5 BIG-IP software class. Attributes: api: iControl API instance. """ def __init__(self, api): self.api = api def get_all_software_status(self): return self.api.System.SoftwareManagement.get_all_software_status() class VirtualServers(object): """Virtual servers class. F5 BIG-IP virtual servers class. Attributes: api: iControl API instance. virtual_servers: List of virtual servers. """ def __init__(self, api, regex=None): self.api = api self.virtual_servers = api.LocalLB.VirtualServer.get_list() if regex: re_filter = re.compile(regex) self.virtual_servers = filter(re_filter.search, self.virtual_servers) def get_list(self): return self.virtual_servers def get_actual_hardware_acceleration(self): return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) def get_authentication_profile(self): return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) def get_auto_lasthop(self): return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) def get_bw_controller_policy(self): return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) def get_clone_pool(self): return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) def get_cmp_enable_mode(self): return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) def get_connection_limit(self): return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) def get_connection_mirror_state(self): return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) def get_default_pool_name(self): return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) def get_description(self): return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) def get_destination(self): return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) def get_enabled_state(self): return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) def get_enforced_firewall_policy(self): return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) def get_fallback_persistence_profile(self): return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) def get_fw_rule(self): return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) def get_gtm_score(self): return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) def get_last_hop_pool(self): return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) def get_nat64_state(self): return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) def get_object_status(self): return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) def get_persistence_profile(self): return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) def get_profile(self): return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) def get_protocol(self): return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) def get_rate_class(self): return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) def get_rate_limit(self): return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) def get_rate_limit_destination_mask(self): return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) def get_rate_limit_mode(self): return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) def get_rate_limit_source_mask(self): return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) def get_related_rule(self): return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) def get_rule(self): return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) def get_security_log_profile(self): return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) def get_snat_pool(self): return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) def get_snat_type(self): return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) def get_source_address(self): return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) def get_source_address_translation_lsn_pool(self): return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) def get_source_address_translation_snat_pool(self): return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) def get_source_address_translation_type(self): return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) def get_source_port_behavior(self): return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) def get_staged_firewall_policy(self): return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) def get_translate_address_state(self): return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) def get_translate_port_state(self): return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) def get_type(self): return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) def get_vlan(self): return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) def get_wildmask(self): return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) class Pools(object): """Pools class. F5 BIG-IP pools class. Attributes: api: iControl API instance. pool_names: List of pool names. """ def __init__(self, api, regex=None): self.api = api self.pool_names = api.LocalLB.Pool.get_list() if regex: re_filter = re.compile(regex) self.pool_names = filter(re_filter.search, self.pool_names) def get_list(self): return self.pool_names def get_action_on_service_down(self): return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) def get_active_member_count(self): return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) def get_aggregate_dynamic_ratio(self): return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) def get_allow_nat_state(self): return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) def get_allow_snat_state(self): return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) def get_client_ip_tos(self): return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) def get_client_link_qos(self): return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) def get_description(self): return self.api.LocalLB.Pool.get_description(self.pool_names) def get_gateway_failsafe_device(self): return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) def get_ignore_persisted_weight_state(self): return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) def get_lb_method(self): return self.api.LocalLB.Pool.get_lb_method(self.pool_names) def get_member(self): return self.api.LocalLB.Pool.get_member_v2(self.pool_names) def get_minimum_active_member(self): return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) def get_minimum_up_member(self): return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) def get_minimum_up_member_action(self): return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) def get_minimum_up_member_enabled_state(self): return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) def get_monitor_association(self): return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) def get_monitor_instance(self): return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) def get_object_status(self): return self.api.LocalLB.Pool.get_object_status(self.pool_names) def get_profile(self): return self.api.LocalLB.Pool.get_profile(self.pool_names) def get_queue_depth_limit(self): return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) def get_queue_on_connection_limit_state(self): return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) def get_queue_time_limit(self): return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) def get_reselect_tries(self): return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) def get_server_ip_tos(self): return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) def get_server_link_qos(self): return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) def get_simple_timeout(self): return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) def get_slow_ramp_time(self): return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) class Devices(object): """Devices class. F5 BIG-IP devices class. Attributes: api: iControl API instance. devices: List of devices. """ def __init__(self, api, regex=None): self.api = api self.devices = api.Management.Device.get_list() if regex: re_filter = re.compile(regex) self.devices = filter(re_filter.search, self.devices) def get_list(self): return self.devices def get_active_modules(self): return self.api.Management.Device.get_active_modules(self.devices) def get_base_mac_address(self): return self.api.Management.Device.get_base_mac_address(self.devices) def get_blade_addresses(self): return self.api.Management.Device.get_blade_addresses(self.devices) def get_build(self): return self.api.Management.Device.get_build(self.devices) def get_chassis_id(self): return self.api.Management.Device.get_chassis_id(self.devices) def get_chassis_type(self): return self.api.Management.Device.get_chassis_type(self.devices) def get_comment(self): return self.api.Management.Device.get_comment(self.devices) def get_configsync_address(self): return self.api.Management.Device.get_configsync_address(self.devices) def get_contact(self): return self.api.Management.Device.get_contact(self.devices) def get_description(self): return self.api.Management.Device.get_description(self.devices) def get_edition(self): return self.api.Management.Device.get_edition(self.devices) def get_failover_state(self): return self.api.Management.Device.get_failover_state(self.devices) def get_local_device(self): return self.api.Management.Device.get_local_device() def get_hostname(self): return self.api.Management.Device.get_hostname(self.devices) def get_inactive_modules(self): return self.api.Management.Device.get_inactive_modules(self.devices) def get_location(self): return self.api.Management.Device.get_location(self.devices) def get_management_address(self): return self.api.Management.Device.get_management_address(self.devices) def get_marketing_name(self): return self.api.Management.Device.get_marketing_name(self.devices) def get_multicast_address(self): return self.api.Management.Device.get_multicast_address(self.devices) def get_optional_modules(self): return self.api.Management.Device.get_optional_modules(self.devices) def get_platform_id(self): return self.api.Management.Device.get_platform_id(self.devices) def get_primary_mirror_address(self): return self.api.Management.Device.get_primary_mirror_address(self.devices) def get_product(self): return self.api.Management.Device.get_product(self.devices) def get_secondary_mirror_address(self): return self.api.Management.Device.get_secondary_mirror_address(self.devices) def get_software_version(self): return self.api.Management.Device.get_software_version(self.devices) def get_timelimited_modules(self): return self.api.Management.Device.get_timelimited_modules(self.devices) def get_timezone(self): return self.api.Management.Device.get_timezone(self.devices) def get_unicast_addresses(self): return self.api.Management.Device.get_unicast_addresses(self.devices) class DeviceGroups(object): """Device groups class. F5 BIG-IP device groups class. Attributes: api: iControl API instance. device_groups: List of device groups. """ def __init__(self, api, regex=None): self.api = api self.device_groups = api.Management.DeviceGroup.get_list() if regex: re_filter = re.compile(regex) self.device_groups = filter(re_filter.search, self.device_groups) def get_list(self): return self.device_groups def get_all_preferred_active(self): return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) def get_autosync_enabled_state(self): return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) def get_description(self): return self.api.Management.DeviceGroup.get_description(self.device_groups) def get_device(self): return self.api.Management.DeviceGroup.get_device(self.device_groups) def get_full_load_on_sync_state(self): return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) def get_incremental_config_sync_size_maximum(self): return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) def get_network_failover_enabled_state(self): return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) def get_sync_status(self): return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) def get_type(self): return self.api.Management.DeviceGroup.get_type(self.device_groups) class TrafficGroups(object): """Traffic groups class. F5 BIG-IP traffic groups class. Attributes: api: iControl API instance. traffic_groups: List of traffic groups. """ def __init__(self, api, regex=None): self.api = api self.traffic_groups = api.Management.TrafficGroup.get_list() if regex: re_filter = re.compile(regex) self.traffic_groups = filter(re_filter.search, self.traffic_groups) def get_list(self): return self.traffic_groups def get_auto_failback_enabled_state(self): return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) def get_auto_failback_time(self): return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) def get_default_device(self): return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) def get_description(self): return self.api.Management.TrafficGroup.get_description(self.traffic_groups) def get_ha_load_factor(self): return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) def get_ha_order(self): return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) def get_is_floating(self): return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) def get_mac_masquerade_address(self): return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) def get_unit_id(self): return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) class Rules(object): """Rules class. F5 BIG-IP iRules class. Attributes: api: iControl API instance. rules: List of iRules. """ def __init__(self, api, regex=None): self.api = api self.rules = api.LocalLB.Rule.get_list() if regex: re_filter = re.compile(regex) self.traffic_groups = filter(re_filter.search, self.rules) def get_list(self): return self.rules def get_description(self): return self.api.LocalLB.Rule.get_description(rule_names=self.rules) def get_ignore_vertification(self): return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) def get_verification_status(self): return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) def get_definition(self): return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] class Nodes(object): """Nodes class. F5 BIG-IP nodes class. Attributes: api: iControl API instance. nodes: List of nodes. """ def __init__(self, api, regex=None): self.api = api self.nodes = api.LocalLB.NodeAddressV2.get_list() if regex: re_filter = re.compile(regex) self.nodes = filter(re_filter.search, self.nodes) def get_list(self): return self.nodes def get_address(self): return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) def get_connection_limit(self): return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) def get_description(self): return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) def get_dynamic_ratio(self): return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) def get_monitor_instance(self): return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) def get_monitor_rule(self): return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) def get_monitor_status(self): return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) def get_object_status(self): return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) def get_rate_limit(self): return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) def get_ratio(self): return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) def get_session_status(self): return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) class VirtualAddresses(object): """Virtual addresses class. F5 BIG-IP virtual addresses class. Attributes: api: iControl API instance. virtual_addresses: List of virtual addresses. """ def __init__(self, api, regex=None): self.api = api self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() if regex: re_filter = re.compile(regex) self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) def get_list(self): return self.virtual_addresses def get_address(self): return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) def get_arp_state(self): return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) def get_auto_delete_state(self): return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) def get_connection_limit(self): return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) def get_description(self): return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) def get_enabled_state(self): return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) def get_icmp_echo_state(self): return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) def get_is_floating_state(self): return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) def get_netmask(self): return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) def get_object_status(self): return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) def get_route_advertisement_state(self): return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) def get_traffic_group(self): return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) class AddressClasses(object): """Address group/class class. F5 BIG-IP address group/class class. Attributes: api: iControl API instance. address_classes: List of address classes. """ def __init__(self, api, regex=None): self.api = api self.address_classes = api.LocalLB.Class.get_address_class_list() if regex: re_filter = re.compile(regex) self.address_classes = filter(re_filter.search, self.address_classes) def get_list(self): return self.address_classes def get_address_class(self): key = self.api.LocalLB.Class.get_address_class(self.address_classes) value = self.api.LocalLB.Class.get_address_class_member_data_value(key) result = map(zip, [x['members'] for x in key], value) return result def get_description(self): return self.api.LocalLB.Class.get_description(self.address_classes) class Certificates(object): """Certificates class. F5 BIG-IP certificates class. Attributes: api: iControl API instance. certificates: List of certificate identifiers. certificate_list: List of certificate information structures. """ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): self.api = api self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] if regex: re_filter = re.compile(regex) self.certificates = filter(re_filter.search, self.certificates) self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] def get_list(self): return self.certificates def get_certificate_list(self): return self.certificate_list class Keys(object): """Keys class. F5 BIG-IP keys class. Attributes: api: iControl API instance. keys: List of key identifiers. key_list: List of key information structures. """ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): self.api = api self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) self.keys = [x['key_info']['id'] for x in self.key_list] if regex: re_filter = re.compile(regex) self.keys = filter(re_filter.search, self.keys) self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] def get_list(self): return self.keys def get_key_list(self): return self.key_list class ProfileClientSSL(object): """Client SSL profiles class. F5 BIG-IP client SSL profiles class. Attributes: api: iControl API instance. profiles: List of client SSL profiles. """ def __init__(self, api, regex=None): self.api = api self.profiles = api.LocalLB.ProfileClientSSL.get_list() if regex: re_filter = re.compile(regex) self.profiles = filter(re_filter.search, self.profiles) def get_list(self): return self.profiles def get_alert_timeout(self): return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) def get_allow_nonssl_state(self): return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) def get_authenticate_depth(self): return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) def get_authenticate_once_state(self): return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) def get_ca_file(self): return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) def get_cache_size(self): return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) def get_cache_timeout(self): return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) def get_certificate_file(self): return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) def get_chain_file(self): return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) def get_cipher_list(self): return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) def get_client_certificate_ca_file(self): return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) def get_crl_file(self): return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) def get_default_profile(self): return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) def get_description(self): return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) def get_forward_proxy_ca_certificate_file(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) def get_forward_proxy_ca_key_file(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) def get_forward_proxy_ca_passphrase(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) def get_forward_proxy_certificate_extension_include(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) def get_forward_proxy_certificate_lifespan(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) def get_forward_proxy_enabled_state(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) def get_forward_proxy_lookup_by_ipaddr_port_state(self): return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) def get_handshake_timeout(self): return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) def get_key_file(self): return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) def get_modssl_emulation_state(self): return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) def get_passphrase(self): return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) def get_peer_certification_mode(self): return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) def get_profile_mode(self): return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) def get_renegotiation_maximum_record_delay(self): return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) def get_renegotiation_period(self): return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) def get_renegotiation_state(self): return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) def get_renegotiation_throughput(self): return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) def get_retain_certificate_state(self): return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) def get_secure_renegotiation_mode(self): return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) def get_server_name(self): return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) def get_session_ticket_state(self): return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) def get_sni_default_state(self): return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) def get_sni_require_state(self): return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) def get_ssl_option(self): return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) def get_strict_resume_state(self): return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) def get_unclean_shutdown_state(self): return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) def get_is_base_profile(self): return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) def get_is_system_profile(self): return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) class SystemInfo(object): """System information class. F5 BIG-IP system information class. Attributes: api: iControl API instance. """ def __init__(self, api): self.api = api def get_base_mac_address(self): return self.api.System.SystemInfo.get_base_mac_address() def get_blade_temperature(self): return self.api.System.SystemInfo.get_blade_temperature() def get_chassis_slot_information(self): return self.api.System.SystemInfo.get_chassis_slot_information() def get_globally_unique_identifier(self): return self.api.System.SystemInfo.get_globally_unique_identifier() def get_group_id(self): return self.api.System.SystemInfo.get_group_id() def get_hardware_information(self): return self.api.System.SystemInfo.get_hardware_information() def get_marketing_name(self): return self.api.System.SystemInfo.get_marketing_name() def get_product_information(self): return self.api.System.SystemInfo.get_product_information() def get_pva_version(self): return self.api.System.SystemInfo.get_pva_version() def get_system_id(self): return self.api.System.SystemInfo.get_system_id() def get_system_information(self): return self.api.System.SystemInfo.get_system_information() def get_time(self): return self.api.System.SystemInfo.get_time() def get_time_zone(self): return self.api.System.SystemInfo.get_time_zone() def get_uptime(self): return self.api.System.SystemInfo.get_uptime() def generate_dict(api_obj, fields): result_dict = {} lists = [] supported_fields = [] if api_obj.get_list(): for field in fields: try: api_response = getattr(api_obj, "get_" + field)() except (MethodNotFound, WebFault): pass else: lists.append(api_response) supported_fields.append(field) for i, j in enumerate(api_obj.get_list()): temp = {} temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) result_dict[j] = temp return result_dict def generate_simple_dict(api_obj, fields): result_dict = {} for field in fields: try: api_response = getattr(api_obj, "get_" + field)() except (MethodNotFound, WebFault): pass else: result_dict[field] = api_response return result_dict def generate_interface_dict(f5, regex): interfaces = Interfaces(f5.get_api(), regex) fields = ['active_media', 'actual_flow_control', 'bundle_state', 'description', 'dual_media_state', 'enabled_state', 'if_index', 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', 'mac_address', 'media', 'media_option', 'media_option_sfp', 'media_sfp', 'media_speed', 'media_status', 'mtu', 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', 'sflow_poll_interval', 'sflow_poll_interval_global', 'sfp_media_state', 'stp_active_edge_port_state', 'stp_enabled_state', 'stp_link_type', 'stp_protocol_detection_reset_state'] return generate_dict(interfaces, fields) def generate_self_ip_dict(f5, regex): self_ips = SelfIPs(f5.get_api(), regex) fields = ['address', 'allow_access_list', 'description', 'enforced_firewall_policy', 'floating_state', 'fw_rule', 'netmask', 'staged_firewall_policy', 'traffic_group', 'vlan', 'is_traffic_group_inherited'] return generate_dict(self_ips, fields) def generate_trunk_dict(f5, regex): trunks = Trunks(f5.get_api(), regex) fields = ['active_lacp_state', 'configured_member_count', 'description', 'distribution_hash_option', 'interface', 'lacp_enabled_state', 'lacp_timeout_option', 'link_selection_policy', 'media_speed', 'media_status', 'operational_member_count', 'stp_enabled_state', 'stp_protocol_detection_reset_state'] return generate_dict(trunks, fields) def generate_vlan_dict(f5, regex): vlans = Vlans(f5.get_api(), regex) fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', 'failsafe_timeout', 'if_index', 'learning_mode', 'mac_masquerade_address', 'member', 'mtu', 'sflow_poll_interval', 'sflow_poll_interval_global', 'sflow_sampling_rate', 'sflow_sampling_rate_global', 'source_check_state', 'true_mac_address', 'vlan_id'] return generate_dict(vlans, fields) def generate_vs_dict(f5, regex): virtual_servers = VirtualServers(f5.get_api(), regex) fields = ['actual_hardware_acceleration', 'authentication_profile', 'auto_lasthop', 'bw_controller_policy', 'clone_pool', 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', 'default_pool_name', 'description', 'destination', 'enabled_state', 'enforced_firewall_policy', 'fallback_persistence_profile', 'fw_rule', 'gtm_score', 'last_hop_pool', 'nat64_state', 'object_status', 'persistence_profile', 'profile', 'protocol', 'rate_class', 'rate_limit', 'rate_limit_destination_mask', 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', 'rule', 'security_log_profile', 'snat_pool', 'snat_type', 'source_address', 'source_address_translation_lsn_pool', 'source_address_translation_snat_pool', 'source_address_translation_type', 'source_port_behavior', 'staged_firewall_policy', 'translate_address_state', 'translate_port_state', 'type', 'vlan', 'wildmask'] return generate_dict(virtual_servers, fields) def generate_pool_dict(f5, regex): pools = Pools(f5.get_api(), regex) fields = ['action_on_service_down', 'active_member_count', 'aggregate_dynamic_ratio', 'allow_nat_state', 'allow_snat_state', 'client_ip_tos', 'client_link_qos', 'description', 'gateway_failsafe_device', 'ignore_persisted_weight_state', 'lb_method', 'member', 'minimum_active_member', 'minimum_up_member', 'minimum_up_member_action', 'minimum_up_member_enabled_state', 'monitor_association', 'monitor_instance', 'object_status', 'profile', 'queue_depth_limit', 'queue_on_connection_limit_state', 'queue_time_limit', 'reselect_tries', 'server_ip_tos', 'server_link_qos', 'simple_timeout', 'slow_ramp_time'] return generate_dict(pools, fields) def generate_device_dict(f5, regex): devices = Devices(f5.get_api(), regex) fields = ['active_modules', 'base_mac_address', 'blade_addresses', 'build', 'chassis_id', 'chassis_type', 'comment', 'configsync_address', 'contact', 'description', 'edition', 'failover_state', 'hostname', 'inactive_modules', 'location', 'management_address', 'marketing_name', 'multicast_address', 'optional_modules', 'platform_id', 'primary_mirror_address', 'product', 'secondary_mirror_address', 'software_version', 'timelimited_modules', 'timezone', 'unicast_addresses'] return generate_dict(devices, fields) def generate_device_group_dict(f5, regex): device_groups = DeviceGroups(f5.get_api(), regex) fields = ['all_preferred_active', 'autosync_enabled_state','description', 'device', 'full_load_on_sync_state', 'incremental_config_sync_size_maximum', 'network_failover_enabled_state', 'sync_status', 'type'] return generate_dict(device_groups, fields) def generate_traffic_group_dict(f5, regex): traffic_groups = TrafficGroups(f5.get_api(), regex) fields = ['auto_failback_enabled_state', 'auto_failback_time', 'default_device', 'description', 'ha_load_factor', 'ha_order', 'is_floating', 'mac_masquerade_address', 'unit_id'] return generate_dict(traffic_groups, fields) def generate_rule_dict(f5, regex): rules = Rules(f5.get_api(), regex) fields = ['definition', 'description', 'ignore_vertification', 'verification_status'] return generate_dict(rules, fields) def generate_node_dict(f5, regex): nodes = Nodes(f5.get_api(), regex) fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', 'monitor_instance', 'monitor_rule', 'monitor_status', 'object_status', 'rate_limit', 'ratio', 'session_status'] return generate_dict(nodes, fields) def generate_virtual_address_dict(f5, regex): virtual_addresses = VirtualAddresses(f5.get_api(), regex) fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', 'description', 'enabled_state', 'icmp_echo_state', 'is_floating_state', 'netmask', 'object_status', 'route_advertisement_state', 'traffic_group'] return generate_dict(virtual_addresses, fields) def generate_address_class_dict(f5, regex): address_classes = AddressClasses(f5.get_api(), regex) fields = ['address_class', 'description'] return generate_dict(address_classes, fields) def generate_certificate_dict(f5, regex): certificates = Certificates(f5.get_api(), regex) return dict(zip(certificates.get_list(), certificates.get_certificate_list())) def generate_key_dict(f5, regex): keys = Keys(f5.get_api(), regex) return dict(zip(keys.get_list(), keys.get_key_list())) def generate_client_ssl_profile_dict(f5, regex): profiles = ProfileClientSSL(f5.get_api(), regex) fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', 'authenticate_once_state', 'ca_file', 'cache_size', 'cache_timeout', 'certificate_file', 'chain_file', 'cipher_list', 'client_certificate_ca_file', 'crl_file', 'default_profile', 'description', 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', 'forward_proxy_ca_passphrase', 'forward_proxy_certificate_extension_include', 'forward_proxy_certificate_lifespan', 'forward_proxy_enabled_state', 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', 'key_file', 'modssl_emulation_state', 'passphrase', 'peer_certification_mode', 'profile_mode', 'renegotiation_maximum_record_delay', 'renegotiation_period', 'renegotiation_state', 'renegotiation_throughput', 'retain_certificate_state', 'secure_renegotiation_mode', 'server_name', 'session_ticket_state', 'sni_default_state', 'sni_require_state', 'ssl_option', 'strict_resume_state', 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] return generate_dict(profiles, fields) def generate_system_info_dict(f5): system_info = SystemInfo(f5.get_api()) fields = ['base_mac_address', 'blade_temperature', 'chassis_slot_information', 'globally_unique_identifier', 'group_id', 'hardware_information', 'marketing_name', 'product_information', 'pva_version', 'system_id', 'system_information', 'time', 'time_zone', 'uptime'] return generate_simple_dict(system_info, fields) def generate_software_list(f5): software = Software(f5.get_api()) software_list = software.get_all_software_status() return software_list def disable_ssl_cert_validation(): # You probably only want to do this for testing and never in production. # From https://www.python.org/dev/peps/pep-0476/#id29 import ssl ssl._create_default_https_context = ssl._create_unverified_context def main(): module = AnsibleModule( argument_spec = dict( server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), validate_certs = dict(default='yes', type='bool'), session = dict(type='bool', default=False), include = dict(type='list', required=True), filter = dict(type='str', required=False), ) ) if not bigsuds_found: module.fail_json(msg="the python suds and bigsuds modules is required") server = module.params['server'] user = module.params['user'] password = module.params['password'] validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] if fact_filter: regex = fnmatch.translate(fact_filter) else: regex = None include = map(lambda x: x.lower(), module.params['include']) valid_includes = ('address_class', 'certificate', 'client_ssl_profile', 'device', 'device_group', 'interface', 'key', 'node', 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) if not validate_certs: disable_ssl_cert_validation() try: facts = {} if len(include) > 0: f5 = F5(server, user, password, session) saved_active_folder = f5.get_active_folder() saved_recursive_query_state = f5.get_recursive_query_state() if saved_active_folder != "/": f5.set_active_folder("/") if saved_recursive_query_state != "STATE_ENABLED": f5.enable_recursive_query_state() if 'interface' in include: facts['interface'] = generate_interface_dict(f5, regex) if 'self_ip' in include: facts['self_ip'] = generate_self_ip_dict(f5, regex) if 'trunk' in include: facts['trunk'] = generate_trunk_dict(f5, regex) if 'vlan' in include: facts['vlan'] = generate_vlan_dict(f5, regex) if 'virtual_server' in include: facts['virtual_server'] = generate_vs_dict(f5, regex) if 'pool' in include: facts['pool'] = generate_pool_dict(f5, regex) if 'device' in include: facts['device'] = generate_device_dict(f5, regex) if 'device_group' in include: facts['device_group'] = generate_device_group_dict(f5, regex) if 'traffic_group' in include: facts['traffic_group'] = generate_traffic_group_dict(f5, regex) if 'rule' in include: facts['rule'] = generate_rule_dict(f5, regex) if 'node' in include: facts['node'] = generate_node_dict(f5, regex) if 'virtual_address' in include: facts['virtual_address'] = generate_virtual_address_dict(f5, regex) if 'address_class' in include: facts['address_class'] = generate_address_class_dict(f5, regex) if 'software' in include: facts['software'] = generate_software_list(f5) if 'certificate' in include: facts['certificate'] = generate_certificate_dict(f5, regex) if 'key' in include: facts['key'] = generate_key_dict(f5, regex) if 'client_ssl_profile' in include: facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) if 'system_info' in include: facts['system_info'] = generate_system_info_dict(f5) # restore saved state if saved_active_folder and saved_active_folder != "/": f5.set_active_folder(saved_active_folder) if saved_recursive_query_state and \ saved_recursive_query_state != "STATE_ENABLED": f5.set_recursive_query_state(saved_recursive_query_state) result = {'ansible_facts': facts} except Exception, e: module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) module.exit_json(**result) # include magic from lib/ansible/module_common.py from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
smmribeiro/intellij-community
python/helpers/python-skeletons/numpy/core/__init__.py
23
149797
from . import multiarray __all__ = [] # Generated with generator3. class generic(object): """ Base class for numpy scalar types. Class from which most (all?) numpy scalar types are derived. For consistency, exposes the same API as `ndarray`, despite many consequent attributes being either "get-only," or completely irrelevant. This is the class from which it is strongly suggested users should derive custom scalar types. """ def all(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def any(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def argmax(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def argmin(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def argsort(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def astype(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def byteswap(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def choose(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def clip(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def compress(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def conj(self, *args, **kwargs): # real signature unknown pass def conjugate(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def copy(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def cumprod(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def cumsum(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def diagonal(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def dump(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def dumps(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def fill(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def flatten(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def getfield(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def item(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def itemset(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def max(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def mean(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def min(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def newbyteorder(self, new_order='S'): # real signature unknown; restored from __doc__ """ newbyteorder(new_order='S') Return a new `dtype` with a different byte order. Changes are also made in all fields and sub-arrays of the data type. The `new_order` code can be any from the following: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) Parameters ---------- new_order : str, optional Byte order to force; a value from the byte order specifications above. The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_dtype : dtype New `dtype` object with the given change to the byte order. """ pass def nonzero(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def prod(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def ptp(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def put(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def ravel(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def repeat(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def reshape(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def resize(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def round(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def searchsorted(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def setfield(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def setflags(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def sort(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def squeeze(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def std(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def sum(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def swapaxes(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def take(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def tobytes(self, *args, **kwargs): # real signature unknown pass def tofile(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def tolist(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def tostring(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def trace(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def transpose(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def var(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def view(self, *args, **kwargs): # real signature unknown """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """ pass def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __array_wrap__(self, obj): # real signature unknown; restored from __doc__ """ sc.__array_wrap__(obj) return scalar from array """ pass def __array__(self, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__ """ sc.__array__(|type) return 0-dim array """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __copy__(self, *args, **kwargs): # real signature unknown pass def __deepcopy__(self, *args, **kwargs): # real signature unknown pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __format__(self, *args, **kwargs): # real signature unknown """ NumPy array scalar formatter """ pass def __getitem__(self, *args, **kwargs): # real signature unknown """ Return self[key]. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __reduce__(self, *args, **kwargs): # real signature unknown pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __round__(self, *args, **kwargs): # real signature unknown pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __setstate__(self, *args, **kwargs): # real signature unknown pass def __sizeof__(self, *args, **kwargs): # real signature unknown pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass base = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """base object""" data = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """pointer to start of data""" dtype = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """get array data-descriptor""" flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """integer value of flags""" flat = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """a 1-d view of scalar""" imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """imaginary part of scalar""" itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """length of one element in bytes""" nbytes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """length of item in bytes""" ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """number of array dimensions""" real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """real part of scalar""" shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """tuple of array dimensions""" size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """number of elements in the gentype""" strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """tuple of bytes steps in each dimension""" T = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """transpose""" __array_interface__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Array protocol: Python side""" __array_priority__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Array priority.""" __array_struct__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Array protocol: struct""" __hash__ = None class bool_(__numpy.generic): """ NumPy's Boolean type. Character code: ``?``. Alias: bool8 """ def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass bool8 = bool_ class number(__numpy.generic): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class integer(__numpy.number): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """denominator of value (1)""" numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """numerator of value (the value itself)""" class signedinteger(__numpy.integer): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class int8(__numpy.signedinteger): """ 8-bit integer. Character code ``b``. C char compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass byte = int8 class flexible(__numpy.generic): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class character(__numpy.flexible): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class string_(bytes, __numpy.character): # no doc def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass bytes_ = string_ bytes0 = string_ class inexact(number): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class complexfloating(inexact): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class complex_(complexfloating, complex): """ Composed of two 64 bit floats """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass complex128 = complex_ cfloat = complex_ cdouble = complex_ class longcomplex(complexfloating): """ Composed of two 128 bit floats """ def __complex__(self, *args, **kwargs): # real signature unknown pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass complex256 = longcomplex clongfloat = longcomplex clongdouble = longcomplex class singlecomplex(complexfloating): """ Composed of two 32 bit floats """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __complex__(self, *args, **kwargs): # real signature unknown pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass csingle = singlecomplex complex64 = singlecomplex class datetime64(generic): # no doc def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass class floating(inexact): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class float_(floating, float): """ 64-bit floating-point number. Character code 'd'. Python float compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass float64 = float_ double = float_ class longfloat(floating): """ 128-bit floating-point number. Character code: 'g'. C long float compatible. """ def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass longdouble = longfloat float128 = longfloat class half(floating): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass float16 = half class single(floating): """ 32-bit floating-point number. Character code 'f'. C float compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass float32 = single class long(signedinteger): """ 64-bit integer. Character code 'l'. Python int compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass int_ = long intp = long int64 = long int0 = long class short(signedinteger): """ 16-bit integer. Character code ``h``. C short compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass int16 = short class intc(signedinteger): """ 32-bit integer. Character code 'i'. C int compatible. """ def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass int32 = intc class longlong(signedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass class object_(generic): """ Any Python object. Character code: 'O'. """ def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __call__(self, *args, **kwargs): # real signature unknown """ Call self as a function. """ pass def __contains__(self, *args, **kwargs): # real signature unknown """ Return key in self. """ pass def __delattr__(self, *args, **kwargs): # real signature unknown """ Implement delattr(self, name). """ pass def __delitem__(self, *args, **kwargs): # real signature unknown """ Delete self[key]. """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __getattribute__(self, *args, **kwargs): # real signature unknown """ Return getattr(self, name). """ pass def __getitem__(self, *args, **kwargs): # real signature unknown """ Return self[key]. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __iadd__(self, *args, **kwargs): # real signature unknown """ Implement self+=value. """ pass def __imul__(self, *args, **kwargs): # real signature unknown """ Implement self*=value. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __len__(self, *args, **kwargs): # real signature unknown """ Return len(self). """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value.n """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __setattr__(self, *args, **kwargs): # real signature unknown """ Implement setattr(self, name, value). """ pass def __setitem__(self, *args, **kwargs): # real signature unknown """ Set self[key] to value. """ pass object0 = object_ class void0(flexible): # no doc def getfield(self, *args, **kwargs): # real signature unknown pass def setfield(self, *args, **kwargs): # real signature unknown pass def __delitem__(self, *args, **kwargs): # real signature unknown """ Delete self[key]. """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __getitem__(self, *args, **kwargs): # real signature unknown """ Return self[key]. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __len__(self, *args, **kwargs): # real signature unknown """ Return len(self). """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __setitem__(self, *args, **kwargs): # real signature unknown """ Set self[key] to value. """ pass base = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """base object""" dtype = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """dtype object""" flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """integer value of flags""" void = void0 class unicode_(str, character): # no doc def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass unicode = unicode_ str_ = unicode_ str0 = unicode_ class timedelta64(signedinteger): # no doc def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass class unsignedinteger(integer): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass class uint8(unsignedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass ubyte = uint8 class ufunc(object): """ Functions that operate element by element on whole arrays. To see the documentation for a specific ufunc, use `info`. For example, ``np.info(np.sin)``. Because ufuncs are written in C (for speed) and linked into Python with NumPy's ufunc facility, Python's help() function finds this page whenever help() is called on a ufunc. A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. Calling ufuncs: =============== op(*x[, out], where=True, **kwargs) Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. The broadcasting rules are: * Dimensions of length 1 may be prepended to either array. * Arrays may be repeated along dimensions of length 1. Parameters ---------- *x : array_like Input arrays. out : ndarray, None, or tuple of ndarray and None, optional Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the number of outputs; use `None` for outputs to be allocated by the ufunc. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. Returns ------- r : ndarray or tuple of ndarray `r` will have the shape that the arrays in `x` broadcast to; if `out` is provided, `r` will be equal to `out`. If the function has more than one output, then the result will be a tuple of arrays. """ def accumulate(self, array, axis=0, dtype=None, out=None, keepdims=None): # real signature unknown; restored from __doc__ """ accumulate(array, axis=0, dtype=None, out=None, keepdims=None) Accumulate the result of applying the operator to all elements. For a one-dimensional array, accumulate produces results equivalent to:: r = np.empty(len(A)) t = op.identity # op = the ufunc being applied to A's elements for i in range(len(A)): t = op(t, A[i]) r[i] = t return r For example, add.accumulate() is equivalent to np.cumsum(). For a multi-dimensional array, accumulate is applied along only one axis (axis zero by default; see Examples below) so repeated use is necessary if one wants to accumulate over multiple axes. Parameters ---------- array : array_like The array to act on. axis : int, optional The axis along which to apply the accumulation; default is zero. dtype : data-type code, optional The data-type used to represent the intermediate results. Defaults to the data-type of the output array if such is provided, or the the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. keepdims : bool Has no effect. Deprecated, and will be removed in future. Returns ------- r : ndarray The accumulated values. If `out` was supplied, `r` is a reference to `out`. Examples -------- 1-D array examples: >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) array([ 2, 6, 30]) 2-D array examples: >>> I = np.eye(2) >>> I array([[ 1., 0.], [ 0., 1.]]) Accumulate along axis 0 (rows), down columns: >>> np.add.accumulate(I, 0) array([[ 1., 0.], [ 1., 1.]]) >>> np.add.accumulate(I) # no axis specified = axis zero array([[ 1., 0.], [ 1., 1.]]) Accumulate along axis 1 (columns), through rows: >>> np.add.accumulate(I, 1) array([[ 1., 1.], [ 0., 1.]]) """ pass def at(self, a, indices, b=None): # real signature unknown; restored from __doc__ """ at(a, indices, b=None) Performs unbuffered in place operation on operand 'a' for elements specified by 'indices'. For addition ufunc, this method is equivalent to `a[indices] += b`, except that results are accumulated for elements that are indexed more than once. For example, `a[[0,0]] += 1` will only increment the first element once because of buffering, whereas `add.at(a, [0,0], 1)` will increment the first element twice. .. versionadded:: 1.8.0 Parameters ---------- a : array_like The array to perform in place operation on. indices : array_like or tuple Array like index object or slice object for indexing into first operand. If first operand has multiple dimensions, indices can be a tuple of array like index objects or slice objects. b : array_like Second operand for ufuncs requiring two operands. Operand must be broadcastable over first operand after indexing or slicing. Examples -------- Set items 0 and 1 to their negative values: >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> print(a) array([-1, -2, 3, 4]) :: Increment items 0 and 1, and increment item 2 twice: >>> a = np.array([1, 2, 3, 4]) >>> np.add.at(a, [0, 1, 2, 2], 1) >>> print(a) array([2, 3, 5, 4]) :: Add items 0 and 1 in first array to second array, and store results in first array: >>> a = np.array([1, 2, 3, 4]) >>> b = np.array([1, 2]) >>> np.add.at(a, [0, 1], b) >>> print(a) array([2, 4, 3, 4]) """ pass def outer(self, A, B, **kwargs): # real signature unknown; restored from __doc__ """ outer(A, B, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of ``op.outer(A, B)`` is an array of dimension M + N such that: .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) For `A` and `B` one-dimensional, this is equivalent to:: r = empty(len(A),len(B)) for i in range(len(A)): for j in range(len(B)): r[i,j] = op(A[i], B[j]) # op = ufunc in question Parameters ---------- A : array_like First array B : array_like Second array kwargs : any Arguments to pass on to the ufunc. Typically `dtype` or `out`. Returns ------- r : ndarray Output array See Also -------- numpy.outer Examples -------- >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) array([[ 4, 5, 6], [ 8, 10, 12], [12, 15, 18]]) A multi-dimensional example: >>> A = np.array([[1, 2, 3], [4, 5, 6]]) >>> A.shape (2, 3) >>> B = np.array([[1, 2, 3, 4]]) >>> B.shape (1, 4) >>> C = np.multiply.outer(A, B) >>> C.shape; C (2, 3, 1, 4) array([[[[ 1, 2, 3, 4]], [[ 2, 4, 6, 8]], [[ 3, 6, 9, 12]]], [[[ 4, 8, 12, 16]], [[ 5, 10, 15, 20]], [[ 6, 12, 18, 24]]]]) """ pass def reduce(self, a, axis=0, dtype=None, out=None, keepdims=False): # real signature unknown; restored from __doc__ """ reduce(a, axis=0, dtype=None, out=None, keepdims=False) Reduces `a`'s dimension by one, by applying ufunc along one axis. Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = the result of iterating `j` over :math:`range(N_i)`, cumulatively applying ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. For a one-dimensional array, reduce produces results equivalent to: :: r = op.identity # op = ufunc for i in range(len(A)): r = op(r, A[i]) return r For example, add.reduce() is equivalent to sum(). Parameters ---------- a : array_like The array to act on. axis : None or int or tuple of ints, optional Axis or axes along which a reduction is performed. The default (`axis` = 0) is perform a reduction over the first dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is `None`, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. For operations which are either not commutative or not associative, doing a reduction over multiple axes is not well-defined. The ufuncs do not currently raise an exception in this case, but will likely do so in the future. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data-type of the output array if this is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.7.0 Returns ------- r : ndarray The reduced array. If `out` was supplied, `r` is a reference to it. Examples -------- >>> np.multiply.reduce([2,3,5]) 30 A multi-dimensional array example: >>> X = np.arange(8).reshape((2,2,2)) >>> X array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.add.reduce(X, 0) array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X) # confirm: default axis value is 0 array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X, 1) array([[ 2, 4], [10, 12]]) >>> np.add.reduce(X, 2) array([[ 1, 5], [ 9, 13]]) """ pass def reduceat(self, a, indices, axis=0, dtype=None, out=None): # real signature unknown; restored from __doc__ """ reduceat(a, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. For i in ``range(len(indices))``, `reduceat` computes ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th generalized "row" parallel to `axis` in the final result (i.e., in a 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if `axis = 1`, it becomes the i-th column). There are three exceptions to this: * when ``i = len(indices) - 1`` (so for the last index), ``indices[i+1] = a.shape[axis]``. * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is simply ``a[indices[i]]``. * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. The shape of the output depends on the size of `indices`, and may be larger than `a` (this happens if ``len(indices) > a.shape[axis]``). Parameters ---------- a : array_like The array to act on. indices : array_like Paired indices, comma separated (not colon), specifying slices to reduce. axis : int, optional The axis along which to apply the reduceat. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data type of the output array if this is provided, or the data type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The reduced values. If `out` was supplied, `r` is a reference to `out`. Notes ----- A descriptive example: If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as ``ufunc.reduceat(a, indices)[::2]`` where `indices` is ``range(len(array) - 1)`` with a zero placed in every other element: ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. Don't be fooled by this attribute's name: `reduceat(a)` is not necessarily smaller than `a`. Examples -------- To take the running sum of four successive values: >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) A 2-D example: >>> x = np.linspace(0, 15, 16).reshape(4,4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) :: # reduce such that the result has the following five rows: # [row1 + row2 + row3] # [row4] # [row2] # [row3] # [row1 + row2 + row3 + row4] >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) array([[ 12., 15., 18., 21.], [ 12., 13., 14., 15.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 24., 28., 32., 36.]]) :: # reduce such that result has the following two columns: # [col1 * col2 * col3, col4] >>> np.multiply.reduceat(x, [0, 3], 1) array([[ 0., 3.], [ 120., 7.], [ 720., 11.], [ 2184., 15.]]) """ pass def __call__(self, *args, **kwargs): # real signature unknown """ Call self as a function. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __repr__(self, *args, **kwargs): # real signature unknown """ Return repr(self). """ pass def __str__(self, *args, **kwargs): # real signature unknown """ Return str(self). """ pass identity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The identity value. Data attribute containing the identity element for the ufunc, if it has one. If it does not, the attribute value is None. Examples -------- >>> np.add.identity 0 >>> np.multiply.identity 1 >>> np.power.identity 1 >>> print(np.exp.identity) None""" nargs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of arguments. Data attribute containing the number of arguments the ufunc takes, including optional ones. Notes ----- Typically this value will be one more than what you might expect because all ufuncs take the optional "out" argument. Examples -------- >>> np.add.nargs 3 >>> np.multiply.nargs 3 >>> np.power.nargs 3 >>> np.exp.nargs 2""" nin = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of inputs. Data attribute containing the number of arguments the ufunc treats as input. Examples -------- >>> np.add.nin 2 >>> np.multiply.nin 2 >>> np.power.nin 2 >>> np.exp.nin 1""" nout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of outputs. Data attribute containing the number of arguments the ufunc treats as output. Notes ----- Since all ufuncs can take output arguments, this will always be (at least) 1. Examples -------- >>> np.add.nout 1 >>> np.multiply.nout 1 >>> np.power.nout 1 >>> np.exp.nout 1""" ntypes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """The number of types. The number of numerical NumPy types - of which there are 18 total - on which the ufunc can operate. See Also -------- numpy.ufunc.types Examples -------- >>> np.add.ntypes 18 >>> np.multiply.ntypes 18 >>> np.power.ntypes 17 >>> np.exp.ntypes 7 >>> np.remainder.ntypes 14""" signature = property(lambda self: object(), lambda self, v: None, lambda self: None) # default types = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """Returns a list with types grouped input->output. Data attribute listing the data-type "Domain-Range" groupings the ufunc can deliver. The data-types are given using the character codes. See Also -------- numpy.ufunc.ntypes Examples -------- >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.multiply.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.power.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.exp.types ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']""" __name__ = 'ufunc' class uintp(unsignedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass uint64 = uintp uint0 = uintp uint = uintp class ushort(unsignedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass uint16 = ushort class uintc(unsignedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass uint32 = uintc class ulonglong(unsignedinteger): # no doc def __abs__(self, *args, **kwargs): # real signature unknown """ abs(self) """ pass def __add__(self, *args, **kwargs): # real signature unknown """ Return self+value. """ pass def __and__(self, *args, **kwargs): # real signature unknown """ Return self&value. """ pass def __bool__(self, *args, **kwargs): # real signature unknown """ self != 0 """ pass def __divmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(self, value). """ pass def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __float__(self, *args, **kwargs): # real signature unknown """ float(self) """ pass def __floordiv__(self, *args, **kwargs): # real signature unknown """ Return self//value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __index__(self, *args, **kwargs): # real signature unknown """ Return self converted to an integer, if self is suitable for use as an index into a list. """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __int__(self, *args, **kwargs): # real signature unknown """ int(self) """ pass def __invert__(self, *args, **kwargs): # real signature unknown """ ~self """ pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lshift__(self, *args, **kwargs): # real signature unknown """ Return self<<value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self<value. """ pass def __mod__(self, *args, **kwargs): # real signature unknown """ Return self%value. """ pass def __mul__(self, *args, **kwargs): # real signature unknown """ Return self*value. """ pass def __neg__(self, *args, **kwargs): # real signature unknown """ -self """ pass @staticmethod # known case of __new__ def __new__(*args, **kwargs): # real signature unknown """ Create and return a new object. See help(type) for accurate signature. """ pass def __ne__(self, *args, **kwargs): # real signature unknown """ Return self!=value. """ pass def __or__(self, *args, **kwargs): # real signature unknown """ Return self|value. """ pass def __pos__(self, *args, **kwargs): # real signature unknown """ +self """ pass def __pow__(self, *args, **kwargs): # real signature unknown """ Return pow(self, value, mod). """ pass def __radd__(self, *args, **kwargs): # real signature unknown """ Return value+self. """ pass def __rand__(self, *args, **kwargs): # real signature unknown """ Return value&self. """ pass def __rdivmod__(self, *args, **kwargs): # real signature unknown """ Return divmod(value, self). """ pass def __rfloordiv__(self, *args, **kwargs): # real signature unknown """ Return value//self. """ pass def __rlshift__(self, *args, **kwargs): # real signature unknown """ Return value<<self. """ pass def __rmod__(self, *args, **kwargs): # real signature unknown """ Return value%self. """ pass def __rmul__(self, *args, **kwargs): # real signature unknown """ Return value*self. """ pass def __ror__(self, *args, **kwargs): # real signature unknown """ Return value|self. """ pass def __rpow__(self, *args, **kwargs): # real signature unknown """ Return pow(value, self, mod). """ pass def __rrshift__(self, *args, **kwargs): # real signature unknown """ Return value>>self. """ pass def __rshift__(self, *args, **kwargs): # real signature unknown """ Return self>>value. """ pass def __rsub__(self, *args, **kwargs): # real signature unknown """ Return value-self. """ pass def __rtruediv__(self, *args, **kwargs): # real signature unknown """ Return value/self. """ pass def __rxor__(self, *args, **kwargs): # real signature unknown """ Return value^self. """ pass def __sub__(self, *args, **kwargs): # real signature unknown """ Return self-value. """ pass def __truediv__(self, *args, **kwargs): # real signature unknown """ Return self/value. """ pass def __xor__(self, *args, **kwargs): # real signature unknown """ Return self^value. """ pass
apache-2.0
cloud9UG/odoo
addons/report/models/report.py
168
26320
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import api from openerp import SUPERUSER_ID from openerp.exceptions import AccessError from openerp.osv import osv, fields from openerp.tools import config from openerp.tools.misc import find_in_path from openerp.tools.translate import _ from openerp.addons.web.http import request from openerp.tools.safe_eval import safe_eval as eval import re import time import base64 import logging import tempfile import lxml.html import os import subprocess from contextlib import closing from distutils.version import LooseVersion from functools import partial from pyPdf import PdfFileWriter, PdfFileReader #-------------------------------------------------------------------------- # Helpers #-------------------------------------------------------------------------- _logger = logging.getLogger(__name__) def _get_wkhtmltopdf_bin(): wkhtmltopdf_bin = find_in_path('wkhtmltopdf') if wkhtmltopdf_bin is None: raise IOError return wkhtmltopdf_bin #-------------------------------------------------------------------------- # Check the presence of Wkhtmltopdf and return its version at Odoo start-up #-------------------------------------------------------------------------- wkhtmltopdf_state = 'install' try: process = subprocess.Popen( [_get_wkhtmltopdf_bin(), '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except (OSError, IOError): _logger.info('You need Wkhtmltopdf to print a pdf version of the reports.') else: _logger.info('Will use the Wkhtmltopdf binary at %s' % _get_wkhtmltopdf_bin()) out, err = process.communicate() version = re.search('([0-9.]+)', out).group(0) if LooseVersion(version) < LooseVersion('0.12.0'): _logger.info('Upgrade Wkhtmltopdf to (at least) 0.12.0') wkhtmltopdf_state = 'upgrade' else: wkhtmltopdf_state = 'ok' if config['workers'] == 1: _logger.info('You need to start Odoo with at least two workers to print a pdf version of the reports.') wkhtmltopdf_state = 'workers' class Report(osv.Model): _name = "report" _description = "Report" public_user = None #-------------------------------------------------------------------------- # Extension of ir_ui_view.render with arguments frequently used in reports #-------------------------------------------------------------------------- def translate_doc(self, cr, uid, doc_id, model, lang_field, template, values, context=None): """Helper used when a report should be translated into a specific lang. <t t-foreach="doc_ids" t-as="doc_id"> <t t-raw="translate_doc(doc_id, doc_model, 'partner_id.lang', account.report_invoice_document')"/> </t> :param doc_id: id of the record to translate :param model: model of the record to translate :param lang_field': field of the record containing the lang :param template: name of the template to translate into the lang_field """ ctx = context.copy() doc = self.pool[model].browse(cr, uid, doc_id, context=ctx) qcontext = values.copy() # Do not force-translate if we chose to display the report in a specific lang if ctx.get('translatable') is True: qcontext['o'] = doc else: # Reach the lang we want to translate the doc into ctx['lang'] = eval('doc.%s' % lang_field, {'doc': doc}) qcontext['o'] = self.pool[model].browse(cr, uid, doc_id, context=ctx) return self.pool['ir.ui.view'].render(cr, uid, template, qcontext, context=ctx) def render(self, cr, uid, ids, template, values=None, context=None): """Allow to render a QWeb template python-side. This function returns the 'ir.ui.view' render but embellish it with some variables/methods used in reports. :param values: additionnal methods/variables used in the rendering :returns: html representation of the template """ if values is None: values = {} if context is None: context = {} context = dict(context, inherit_branding=True) # Tell QWeb to brand the generated html view_obj = self.pool['ir.ui.view'] def translate_doc(doc_id, model, lang_field, template): return self.translate_doc(cr, uid, doc_id, model, lang_field, template, values, context=context) user = self.pool['res.users'].browse(cr, uid, uid) website = None if request and hasattr(request, 'website'): if request.website is not None: website = request.website context = dict(context, translatable=context.get('lang') != request.website.default_lang_code) values.update( time=time, context_timestamp=lambda t: fields.datetime.context_timestamp(cr, uid, t, context), translate_doc=translate_doc, editable=True, user=user, res_company=user.company_id, website=website, ) return view_obj.render(cr, uid, template, values, context=context) #-------------------------------------------------------------------------- # Main report methods #-------------------------------------------------------------------------- @api.v7 def get_html(self, cr, uid, ids, report_name, data=None, context=None): """This method generates and returns html version of a report. """ # If the report is using a custom model to render its html, we must use it. # Otherwise, fallback on the generic html rendering. try: report_model_name = 'report.%s' % report_name particularreport_obj = self.pool[report_model_name] return particularreport_obj.render_html(cr, uid, ids, data=data, context=context) except KeyError: report = self._get_report_from_name(cr, uid, report_name) report_obj = self.pool[report.model] docs = report_obj.browse(cr, uid, ids, context=context) docargs = { 'doc_ids': ids, 'doc_model': report.model, 'docs': docs, } return self.render(cr, uid, [], report.report_name, docargs, context=context) @api.v8 def get_html(self, records, report_name, data=None): return self._model.get_html(self._cr, self._uid, records.ids, report_name, data=data, context=self._context) @api.v7 def get_pdf(self, cr, uid, ids, report_name, html=None, data=None, context=None): """This method generates and returns pdf version of a report. """ if context is None: context = {} if html is None: html = self.get_html(cr, uid, ids, report_name, data=data, context=context) html = html.decode('utf-8') # Ensure the current document is utf-8 encoded. # Get the ir.actions.report.xml record we are working on. report = self._get_report_from_name(cr, uid, report_name) # Check if we have to save the report or if we have to get one from the db. save_in_attachment = self._check_attachment_use(cr, uid, ids, report) # Get the paperformat associated to the report, otherwise fallback on the company one. if not report.paperformat_id: user = self.pool['res.users'].browse(cr, uid, uid) paperformat = user.company_id.paperformat_id else: paperformat = report.paperformat_id # Preparing the minimal html pages css = '' # Will contain local css headerhtml = [] contenthtml = [] footerhtml = [] irconfig_obj = self.pool['ir.config_parameter'] base_url = irconfig_obj.get_param(cr, SUPERUSER_ID, 'report.url') or irconfig_obj.get_param(cr, SUPERUSER_ID, 'web.base.url') # Minimal page renderer view_obj = self.pool['ir.ui.view'] render_minimal = partial(view_obj.render, cr, uid, 'report.minimal_layout', context=context) # The received html report must be simplified. We convert it in a xml tree # in order to extract headers, bodies and footers. try: root = lxml.html.fromstring(html) match_klass = "//div[contains(concat(' ', normalize-space(@class), ' '), ' {} ')]" for node in root.xpath("//html/head/style"): css += node.text for node in root.xpath(match_klass.format('header')): body = lxml.html.tostring(node) header = render_minimal(dict(css=css, subst=True, body=body, base_url=base_url)) headerhtml.append(header) for node in root.xpath(match_klass.format('footer')): body = lxml.html.tostring(node) footer = render_minimal(dict(css=css, subst=True, body=body, base_url=base_url)) footerhtml.append(footer) for node in root.xpath(match_klass.format('page')): # Previously, we marked some reports to be saved in attachment via their ids, so we # must set a relation between report ids and report's content. We use the QWeb # branding in order to do so: searching after a node having a data-oe-model # attribute with the value of the current report model and read its oe-id attribute if ids and len(ids) == 1: reportid = ids[0] else: oemodelnode = node.find(".//*[@data-oe-model='%s']" % report.model) if oemodelnode is not None: reportid = oemodelnode.get('data-oe-id') if reportid: reportid = int(reportid) else: reportid = False # Extract the body body = lxml.html.tostring(node) reportcontent = render_minimal(dict(css=css, subst=False, body=body, base_url=base_url)) contenthtml.append(tuple([reportid, reportcontent])) except lxml.etree.XMLSyntaxError: contenthtml = [] contenthtml.append(html) save_in_attachment = {} # Don't save this potentially malformed document # Get paperformat arguments set in the root html tag. They are prioritized over # paperformat-record arguments. specific_paperformat_args = {} for attribute in root.items(): if attribute[0].startswith('data-report-'): specific_paperformat_args[attribute[0]] = attribute[1] # Run wkhtmltopdf process return self._run_wkhtmltopdf( cr, uid, headerhtml, footerhtml, contenthtml, context.get('landscape'), paperformat, specific_paperformat_args, save_in_attachment ) @api.v8 def get_pdf(self, records, report_name, html=None, data=None): return self._model.get_pdf(self._cr, self._uid, records.ids, report_name, html=html, data=data, context=self._context) @api.v7 def get_action(self, cr, uid, ids, report_name, data=None, context=None): """Return an action of type ir.actions.report.xml. :param ids: Ids of the records to print (if not used, pass an empty list) :param report_name: Name of the template to generate an action for """ if ids: if not isinstance(ids, list): ids = [ids] context = dict(context or {}, active_ids=ids) report_obj = self.pool['ir.actions.report.xml'] idreport = report_obj.search(cr, uid, [('report_name', '=', report_name)], context=context) try: report = report_obj.browse(cr, uid, idreport[0], context=context) except IndexError: raise osv.except_osv( _('Bad Report Reference'), _('This report is not loaded into the database: %s.' % report_name) ) return { 'context': context, 'data': data, 'type': 'ir.actions.report.xml', 'report_name': report.report_name, 'report_type': report.report_type, 'report_file': report.report_file, 'context': context, } @api.v8 def get_action(self, records, report_name, data=None): return self._model.get_action(self._cr, self._uid, records.ids, report_name, data=data, context=self._context) #-------------------------------------------------------------------------- # Report generation helpers #-------------------------------------------------------------------------- @api.v7 def _check_attachment_use(self, cr, uid, ids, report): """ Check attachment_use field. If set to true and an existing pdf is already saved, load this one now. Else, mark save it. """ save_in_attachment = {} save_in_attachment['model'] = report.model save_in_attachment['loaded_documents'] = {} if report.attachment: for record_id in ids: obj = self.pool[report.model].browse(cr, uid, record_id) filename = eval(report.attachment, {'object': obj, 'time': time}) # If the user has checked 'Reload from Attachment' if report.attachment_use: alreadyindb = [('datas_fname', '=', filename), ('res_model', '=', report.model), ('res_id', '=', record_id)] attach_ids = self.pool['ir.attachment'].search(cr, uid, alreadyindb) if attach_ids: # Add the loaded pdf in the loaded_documents list pdf = self.pool['ir.attachment'].browse(cr, uid, attach_ids[0]).datas pdf = base64.decodestring(pdf) save_in_attachment['loaded_documents'][record_id] = pdf _logger.info('The PDF document %s was loaded from the database' % filename) continue # Do not save this document as we already ignore it # If the user has checked 'Save as Attachment Prefix' if filename is False: # May be false if, for instance, the 'attachment' field contains a condition # preventing to save the file. continue else: save_in_attachment[record_id] = filename # Mark current document to be saved return save_in_attachment @api.v8 def _check_attachment_use(self, records, report): return self._model._check_attachment_use( self._cr, self._uid, records.ids, report, context=self._context) def _check_wkhtmltopdf(self): return wkhtmltopdf_state def _run_wkhtmltopdf(self, cr, uid, headers, footers, bodies, landscape, paperformat, spec_paperformat_args=None, save_in_attachment=None): """Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf document. :param header: list of string containing the headers :param footer: list of string containing the footers :param bodies: list of string containing the reports :param landscape: boolean to force the pdf to be rendered under a landscape format :param paperformat: ir.actions.report.paperformat to generate the wkhtmltopf arguments :param specific_paperformat_args: dict of prioritized paperformat arguments :param save_in_attachment: dict of reports to save/load in/from the db :returns: Content of the pdf as a string """ command_args = [] # Passing the cookie to wkhtmltopdf in order to resolve internal links. try: if request: command_args.extend(['--cookie', 'session_id', request.session.sid]) except AttributeError: pass # Wkhtmltopdf arguments command_args.extend(['--quiet']) # Less verbose error messages if paperformat: # Convert the paperformat record into arguments command_args.extend(self._build_wkhtmltopdf_args(paperformat, spec_paperformat_args)) # Force the landscape orientation if necessary if landscape and '--orientation' in command_args: command_args_copy = list(command_args) for index, elem in enumerate(command_args_copy): if elem == '--orientation': del command_args[index] del command_args[index] command_args.extend(['--orientation', 'landscape']) elif landscape and not '--orientation' in command_args: command_args.extend(['--orientation', 'landscape']) # Execute WKhtmltopdf pdfdocuments = [] temporary_files = [] for index, reporthtml in enumerate(bodies): local_command_args = [] pdfreport_fd, pdfreport_path = tempfile.mkstemp(suffix='.pdf', prefix='report.tmp.') temporary_files.append(pdfreport_path) # Directly load the document if we already have it if save_in_attachment and save_in_attachment['loaded_documents'].get(reporthtml[0]): with closing(os.fdopen(pdfreport_fd, 'w')) as pdfreport: pdfreport.write(save_in_attachment['loaded_documents'][reporthtml[0]]) pdfdocuments.append(pdfreport_path) continue else: os.close(pdfreport_fd) # Wkhtmltopdf handles header/footer as separate pages. Create them if necessary. if headers: head_file_fd, head_file_path = tempfile.mkstemp(suffix='.html', prefix='report.header.tmp.') temporary_files.append(head_file_path) with closing(os.fdopen(head_file_fd, 'w')) as head_file: head_file.write(headers[index]) local_command_args.extend(['--header-html', head_file_path]) if footers: foot_file_fd, foot_file_path = tempfile.mkstemp(suffix='.html', prefix='report.footer.tmp.') temporary_files.append(foot_file_path) with closing(os.fdopen(foot_file_fd, 'w')) as foot_file: foot_file.write(footers[index]) local_command_args.extend(['--footer-html', foot_file_path]) # Body stuff content_file_fd, content_file_path = tempfile.mkstemp(suffix='.html', prefix='report.body.tmp.') temporary_files.append(content_file_path) with closing(os.fdopen(content_file_fd, 'w')) as content_file: content_file.write(reporthtml[1]) try: wkhtmltopdf = [_get_wkhtmltopdf_bin()] + command_args + local_command_args wkhtmltopdf += [content_file_path] + [pdfreport_path] process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if process.returncode not in [0, 1]: raise osv.except_osv(_('Report (PDF)'), _('Wkhtmltopdf failed (error code: %s). ' 'Message: %s') % (str(process.returncode), err)) # Save the pdf in attachment if marked if reporthtml[0] is not False and save_in_attachment.get(reporthtml[0]): with open(pdfreport_path, 'rb') as pdfreport: attachment = { 'name': save_in_attachment.get(reporthtml[0]), 'datas': base64.encodestring(pdfreport.read()), 'datas_fname': save_in_attachment.get(reporthtml[0]), 'res_model': save_in_attachment.get('model'), 'res_id': reporthtml[0], } try: self.pool['ir.attachment'].create(cr, uid, attachment) except AccessError: _logger.warning("Cannot save PDF report %r as attachment", attachment['name']) else: _logger.info('The PDF document %s is now saved in the database', attachment['name']) pdfdocuments.append(pdfreport_path) except: raise # Return the entire document if len(pdfdocuments) == 1: entire_report_path = pdfdocuments[0] else: entire_report_path = self._merge_pdf(pdfdocuments) temporary_files.append(entire_report_path) with open(entire_report_path, 'rb') as pdfdocument: content = pdfdocument.read() # Manual cleanup of the temporary files for temporary_file in temporary_files: try: os.unlink(temporary_file) except (OSError, IOError): _logger.error('Error when trying to remove file %s' % temporary_file) return content def _get_report_from_name(self, cr, uid, report_name): """Get the first record of ir.actions.report.xml having the ``report_name`` as value for the field report_name. """ report_obj = self.pool['ir.actions.report.xml'] qwebtypes = ['qweb-pdf', 'qweb-html'] conditions = [('report_type', 'in', qwebtypes), ('report_name', '=', report_name)] idreport = report_obj.search(cr, uid, conditions)[0] return report_obj.browse(cr, uid, idreport) def _build_wkhtmltopdf_args(self, paperformat, specific_paperformat_args=None): """Build arguments understandable by wkhtmltopdf from a report.paperformat record. :paperformat: report.paperformat record :specific_paperformat_args: a dict containing prioritized wkhtmltopdf arguments :returns: list of string representing the wkhtmltopdf arguments """ command_args = [] if paperformat.format and paperformat.format != 'custom': command_args.extend(['--page-size', paperformat.format]) if paperformat.page_height and paperformat.page_width and paperformat.format == 'custom': command_args.extend(['--page-width', str(paperformat.page_width) + 'mm']) command_args.extend(['--page-height', str(paperformat.page_height) + 'mm']) if specific_paperformat_args and specific_paperformat_args.get('data-report-margin-top'): command_args.extend(['--margin-top', str(specific_paperformat_args['data-report-margin-top'])]) else: command_args.extend(['--margin-top', str(paperformat.margin_top)]) if specific_paperformat_args and specific_paperformat_args.get('data-report-dpi'): command_args.extend(['--dpi', str(specific_paperformat_args['data-report-dpi'])]) elif paperformat.dpi: if os.name == 'nt' and int(paperformat.dpi) <= 95: _logger.info("Generating PDF on Windows platform require DPI >= 96. Using 96 instead.") command_args.extend(['--dpi', '96']) else: command_args.extend(['--dpi', str(paperformat.dpi)]) if specific_paperformat_args and specific_paperformat_args.get('data-report-header-spacing'): command_args.extend(['--header-spacing', str(specific_paperformat_args['data-report-header-spacing'])]) elif paperformat.header_spacing: command_args.extend(['--header-spacing', str(paperformat.header_spacing)]) command_args.extend(['--margin-left', str(paperformat.margin_left)]) command_args.extend(['--margin-bottom', str(paperformat.margin_bottom)]) command_args.extend(['--margin-right', str(paperformat.margin_right)]) if paperformat.orientation: command_args.extend(['--orientation', str(paperformat.orientation)]) if paperformat.header_line: command_args.extend(['--header-line']) return command_args def _merge_pdf(self, documents): """Merge PDF files into one. :param documents: list of path of pdf files :returns: path of the merged pdf """ writer = PdfFileWriter() streams = [] # We have to close the streams *after* PdfFilWriter's call to write() for document in documents: pdfreport = file(document, 'rb') streams.append(pdfreport) reader = PdfFileReader(pdfreport) for page in range(0, reader.getNumPages()): writer.addPage(reader.getPage(page)) merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.html', prefix='report.merged.tmp.') with closing(os.fdopen(merged_file_fd, 'w')) as merged_file: writer.write(merged_file) for stream in streams: stream.close() return merged_file_path
agpl-3.0
matthiasdiener/spack
var/spack/repos/builtin/packages/r-nloptr/package.py
5
2105
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RNloptr(RPackage): """nloptr is an R interface to NLopt. NLopt is a free/open-source library for nonlinear optimization, providing a common interface for a number of different free optimization routines available online as well as original implementations of various other algorithms. See http://ab-initio.mit.edu/wiki/index.php/NLopt _Introduction for more information on the available algorithms. During installation on Unix the NLopt code is downloaded and compiled from the NLopt website.""" homepage = "https://cran.r-project.org/package=nloptr" url = "https://cran.rstudio.com/src/contrib/nloptr_1.0.4.tar.gz" list_url = "https://cran.rstudio.com/src/contrib/Archive/nloptr" version('1.0.4', 'f2775dfb4f7f5552d46937a04c062b0d') depends_on('r-testthat', type=('build', 'run'))
lgpl-2.1
julierthanjulie/PedestrianTracking
generate_frames.py
1
3979
""" This code generates frames from CSV values that can be stiched together using FFMPEG to animate pedestrian data. This version produces an animation at 4x speed. """ print "Importing..." # Please ensure the following dependencies are installed before use: import pylab import numpy as np import itertools import sys, getopt import operator import collections drawing_by_frame = [] # def generate_frames(argv): # Some default values if nothing is provided in command line arguments. traces = 'bubble_pop_traces.csv' background = 'trails_480.png' # Get command line arguments. # -f specify a file name. This code expects csv files in the format PedestrianID, X, Y, FrameNum # -b specify a backgroun image. Any format available to pylab is acceptable. try: opts,args = getopt.getopt(argv, "f:b:") except getopt.GetoptError: print "Getopt Error" exit(2) for opt, arg in opts: if opt == "-f": traces = arg elif opt == "-b": background = arg # Name each frame based on the filename figure_name = traces.split("/")[-1].split(".")[-2] # Load up csv file trace = np.loadtxt(traces, comments=';', delimiter=',') traces = itertools.groupby(trace, lambda x:x[0]) # These values should match those in pedestrian_tracking.py w,h=640,360 border=20 # Some values from trail validation valid = 0 avg_length = 0 num_traces = 0 # Load up background image. background = pylab.imread(background) pylab.imshow(background) for id,t in traces: pts = np.array(list(t)) invalid = False # Validate Trails if (pts[0,1]>border and pts[0,1]<w-border) and (pts[0,2]>border and pts[0,2]<h-border): invalid = True if (pts[-1,1]>border and pts[-1,1]<w-border) and (pts[-1,2]>border and pts[-1,2]<h-border): invalid = True if len(pts) < 200: invalid = True if ((pts[0,2] > h-border) and (pts[0,1] > w/2-75 and pts[0,1] < w/2+75) or (pts[-1,2] > h-border) and (pts[-1,1] > w/2-75 and pts[-1,1] < w/2+75)): invalid = True # For all valid trails, prepare them for generating animated trails by frame number if not invalid: num_traces += 1 avg_length += len(pts) # Drawing colour for traces given as RGB colour = (0,0,1) for pt in pts: this_frame = [pt[3], pt[1], pt[2], pt[0]] drawing_by_frame.append(this_frame) valid += 1 x = np.clip(pts[:,1],0,w) y = np.clip(pts[:,2],0,h) print "Valid Trails: " , valid, " Average Length:" , avg_length/num_traces drawing_by_frame.sort() last_frame = drawing_by_frame[-1][0] current_frame = drawing_by_frame[0][0] drawing_dict = collections.defaultdict(list) count = 0 while len(drawing_by_frame) > 0: #print "Next Frame, " , current_frame pylab.imshow(background) while drawing_by_frame[0][0] == current_frame: list_one = drawing_by_frame.pop(0) x = drawing_dict[list_one[3]] x.append([list_one[1], list_one[2]]) drawing_dict[list_one[3]] = x # Adjust mod value here to adjust frame drawing frequency # Draw stuff here if (current_frame % 10 ==0): print "Percentage Complete: " , (current_frame/last_frame)*100 draw_dict(drawing_dict, w, h, border, figure_name, current_frame, count) count += 1 pylab.clf() current_frame = drawing_by_frame[0][0] def draw_dict(dict, w, h, border, figure_name, frame, count): for trace in dict: print trace pts = dict[trace] pylab.plot([p[0] for p in pts], [p[1] for p in pts],'-',color=(0,0,1),alpha=0.5, linewidth=2) pylab.xlim(0,w) pylab.ylim(h,0) pylab.axis('off') pylab.subplots_adjust(0,0,1,1,0,0) pylab.savefig("Frames/" + figure_name + "_" + str(count).zfill(6) + '.png', dpi=150,bbox_inches='tight', pad_inches=0) #pylab.savefig("Frames/" + 'frame' + str(int(frame)) + '.png', dpi=150,bbox_inches='tight', pad_inches=0) if __name__ == "__main__": print "Starting Frame Generation" generate_frames(sys.argv[1:])
mit
donny/mako-mori
external/boto/ec2/address.py
13
4260
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.ec2.ec2object import EC2Object class Address(EC2Object): """ Represents an EC2 Elastic IP Address :ivar public_ip: The Elastic IP address. :ivar instance_id: The instance the address is associated with (if any). :ivar domain: Indicates whether the address is a EC2 address or a VPC address (standard|vpc). :ivar allocation_id: The allocation ID for the address (VPC addresses only). :ivar association_id: The association ID for the address (VPC addresses only). :ivar network_interface_id: The network interface (if any) that the address is associated with (VPC addresses only). :ivar network_interface_owner_id: The owner IID (VPC addresses only). :ivar private_ip_address: The private IP address associated with the Elastic IP address (VPC addresses only). """ def __init__(self, connection=None, public_ip=None, instance_id=None): EC2Object.__init__(self, connection) self.connection = connection self.public_ip = public_ip self.instance_id = instance_id self.domain = None self.allocation_id = None self.association_id = None self.network_interface_id = None self.network_interface_owner_id = None self.private_ip_address = None def __repr__(self): return 'Address:%s' % self.public_ip def endElement(self, name, value, connection): if name == 'publicIp': self.public_ip = value elif name == 'instanceId': self.instance_id = value elif name == 'domain': self.domain = value elif name == 'allocationId': self.allocation_id = value elif name == 'associationId': self.association_id = value elif name == 'networkInterfaceId': self.network_interface_id = value elif name == 'networkInterfaceOwnerId': self.network_interface_owner_id = value elif name == 'privateIpAddress': self.private_ip_address = value else: setattr(self, name, value) def release(self): """ Free up this Elastic IP address. :see: :meth:`boto.ec2.connection.EC2Connection.release_address` """ if self.allocation_id: return self.connection.release_address(None, self.allocation_id) else: return self.connection.release_address(self.public_ip) delete = release def associate(self, instance_id): """ Associate this Elastic IP address with a currently running instance. :see: :meth:`boto.ec2.connection.EC2Connection.associate_address` """ return self.connection.associate_address(instance_id, self.public_ip) def disassociate(self): """ Disassociate this Elastic IP address from a currently running instance. :see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address` """ if self.association_id: return self.connection.disassociate_address(None, self.association_id) else: return self.connection.disassociate_address(self.public_ip)
mit
cristiana214/cristianachavez214-cristianachavez
python-build/python-libs/BeautifulSoup/BeautifulSoup.py
138
78871
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2009, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.1.0.1" __copyright__ = "Copyright (c) 2004-2009 Leonard Richardson" __license__ = "New-style BSD" import codecs import markupbase import types import re from HTMLParser import HTMLParser, HTMLParseError try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. def sob(unicode, encoding): """Returns either the given Unicode string or its encoding.""" if encoding is None: return unicode else: return unicode.encode(encoding) class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (unicode(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def encode(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.decode().encode(encoding) def decodeGivenEventualEncoding(self, eventualEncoding): return self class CData(NavigableString): def decodeGivenEventualEncoding(self, eventualEncoding): return u'<![CDATA[' + self + u']]>' class ProcessingInstruction(NavigableString): def decodeGivenEventualEncoding(self, eventualEncoding): output = self if u'%SOUP-ENCODING%' in output: output = self.substituteEncoding(output, eventualEncoding) return u'<?' + output + u'?>' class Comment(NavigableString): def decodeGivenEventualEncoding(self, eventualEncoding): return u'<!--' + self + u'-->' class Declaration(NavigableString): def decodeGivenEventualEncoding(self, eventualEncoding): return u'<!' + self + u'>' class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities def convert(kval): "Converts HTML, XML and numeric entities in the attribute value." k, val = kval if val is None: return kval return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.decode(eventualEncoding=encoding) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __unicode__(self): return self.decode() def __str__(self): return self.encode() def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): return self.decode(prettyPrint, indentLevel, encoding).encode(encoding) def decode(self, prettyPrint=False, indentLevel=0, eventualEncoding=DEFAULT_OUTPUT_ENCODING): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding.""" attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if (self.containsSubstitutions and eventualEncoding is not None and '%SOUP-ENCODING%' in val): val = self.substituteEncoding(val, eventualEncoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) if val is None: # Handle boolean attributes. decoded = key else: decoded = fmt % (key, val) attrs.append(decoded) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % self.name indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.decodeContents(prettyPrint, indentContents, eventualEncoding) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (self.name, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" contents = [i for i in self.contents] for i in contents: if isinstance(i, Tag): i.decompose() else: i.extract() self.extract() def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.encode(encoding, True) def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): return self.decodeContents(prettyPrint, indentLevel).encode(encoding) def decodeContents(self, prettyPrint=False, indentLevel=0, eventualEncoding=DEFAULT_OUTPUT_ENCODING): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.decodeGivenEventualEncoding(eventualEncoding) elif isinstance(c, Tag): s.append(c.decode(prettyPrint, indentLevel, eventualEncoding)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods. Will go away in 4.0. first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) # 3.x compatibility methods. Will go away in 4.0. def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): if encoding is None: return self.decodeContents(prettyPrint, indentLevel, encoding) else: return self.encodeContents(encoding, prettyPrint, indentLevel) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def recursiveChildGenerator(self): if not len(self.contents): raise StopIteration stopNode = self._lastRecursiveChild().next current = self.contents[0] while current is not stopNode: yield current current = current.next def childGenerator(self): if not len(self.contents): raise StopIteration current = self.contents[0] while current: yield current current = current.nextSibling raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup is not None and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif (isList(matchAgainst) and (markup is not None or not isString(matchAgainst))): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return ((hasattr(l, '__iter__') and not isString(l)) or (type(l) in (types.ListType, types.TupleType))) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isinstance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion) and not isString(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class HTMLParserBuilder(HTMLParser): def __init__(self, soup): HTMLParser.__init__(self) self.soup = soup # We inherit feed() and reset(). def handle_starttag(self, name, attrs): if name == 'meta': self.soup.extractCharsetFromMeta(attrs) else: self.soup.unknown_starttag(name, attrs) def handle_endtag(self, name): self.soup.unknown_endtag(name) def handle_data(self, content): self.soup.handle_data(content) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.soup.endData() self.handle_data(text) self.soup.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.soup.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.soup.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.soup.convertXMLEntities: data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.soup.convertHTMLEntities and \ not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = HTMLParser.parse_declaration(self, i) except HTMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulStoneSoup(Tag): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False, builder=HTMLParserBuilder): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. HTMLParser will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills HTMLParser, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke HTMLParser: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) self.builder = builder(self) self.reset() if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed. self.builder = None # So can the builder. def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.builder.reset() self.builder.feed(markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def extractCharsetFromMeta(self, attrs): self.unknown_starttag('meta', attrs) class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def extractCharsetFromMeta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, match): """Changes a MS smart quote character to an XML or HTML entity.""" orig = match.group(1) sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x'.encode() + sub[1].encode() + ';'.encode() else: sub = '&'.encode() + sub[0].encode() + ';'.encode() else: sub = sub.encode() return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): smart_quotes_re = "([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._subMSChar, markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode() xml_encoding_match = re.compile(xml_encoding_re).match(xml_data) if not xml_encoding_match and isHTML: meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode() regexp = re.compile(meta_re, re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].decode( 'ascii').lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
apache-2.0
sameerparekh/pants
tests/python/pants_test/tasks/test_builddict.py
15
1523
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from pants.backend.core.register import build_file_aliases as register_core from pants.backend.core.tasks import builddictionary, reflect from pants.backend.jvm.register import build_file_aliases as register_jvm from pants.backend.python.register import build_file_aliases as register_python from pants_test.base_test import BaseTest class ExtractedContentSanityTests(BaseTest): @property def alias_groups(self): return register_core().merge(register_jvm().merge(register_python())) def setUp(self): super(ExtractedContentSanityTests, self).setUp() self._syms = reflect.assemble_buildsyms(build_file_parser=self.build_file_parser) def test_sub_tocls(self): python_symbols = builddictionary.python_sub_tocl(self._syms).e # python_requirements goes through build_file_aliases.curry_context. # It's in the "Python" sub_tocl, but tenuously self.assertTrue('python_requirements' in python_symbols) # Some less-tenuous sanity checks for sym in ['python_library', 'python_tests']: self.assertTrue(sym in python_symbols) jvm_symbols = builddictionary.jvm_sub_tocl(self._syms).e for sym in ['java_library', 'scala_library']: self.assertTrue(sym in jvm_symbols)
apache-2.0
alexgorban/models
research/inception/inception/slim/ops.py
20
18781
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains convenience wrappers for typical Neural Network TensorFlow layers. Additionally it maintains a collection with update_ops that need to be updated after the ops have been computed, for example to update moving means and moving variances of batch_norm. Ops that have different behavior during training or eval have an is_training parameter. Additionally Ops that contain variables.variable have a trainable parameter, which control if the ops variables are trainable or not. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.training import moving_averages from inception.slim import losses from inception.slim import scopes from inception.slim import variables # Used to keep the update ops done by batch_norm. UPDATE_OPS_COLLECTION = '_update_ops_' @scopes.add_arg_scope def batch_norm(inputs, decay=0.999, center=True, scale=False, epsilon=0.001, moving_vars='moving_vars', activation=None, is_training=True, trainable=True, restore=True, scope=None, reuse=None): """Adds a Batch Normalization layer. Args: inputs: a tensor of size [batch_size, height, width, channels] or [batch_size, channels]. decay: decay for the moving average. center: If True, subtract beta. If False, beta is not created and ignored. scale: If True, multiply by gamma. If False, gamma is not used. When the next layer is linear (also e.g. ReLU), this can be disabled since the scaling can be done by the next layer. epsilon: small float added to variance to avoid dividing by zero. moving_vars: collection to store the moving_mean and moving_variance. activation: activation function. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: a tensor representing the output of the operation. """ inputs_shape = inputs.get_shape() with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse): axis = list(range(len(inputs_shape) - 1)) params_shape = inputs_shape[-1:] # Allocate parameters for the beta and gamma of the normalization. beta, gamma = None, None if center: beta = variables.variable('beta', params_shape, initializer=tf.zeros_initializer(), trainable=trainable, restore=restore) if scale: gamma = variables.variable('gamma', params_shape, initializer=tf.ones_initializer(), trainable=trainable, restore=restore) # Create moving_mean and moving_variance add them to # GraphKeys.MOVING_AVERAGE_VARIABLES collections. moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES] moving_mean = variables.variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False, restore=restore, collections=moving_collections) moving_variance = variables.variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False, restore=restore, collections=moving_collections) if is_training: # Calculate the moments based on the individual batch. mean, variance = tf.nn.moments(inputs, axis) update_moving_mean = moving_averages.assign_moving_average( moving_mean, mean, decay) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) update_moving_variance = moving_averages.assign_moving_average( moving_variance, variance, decay) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) else: # Just use the moving_mean and moving_variance. mean = moving_mean variance = moving_variance # Normalize the activations. outputs = tf.nn.batch_normalization( inputs, mean, variance, beta, gamma, epsilon) outputs.set_shape(inputs.get_shape()) if activation: outputs = activation(outputs) return outputs def _two_element_tuple(int_or_tuple): """Converts `int_or_tuple` to height, width. Several of the functions that follow accept arguments as either a tuple of 2 integers or a single integer. A single integer indicates that the 2 values of the tuple are the same. This functions normalizes the input value by always returning a tuple. Args: int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape. Returns: A tuple with 2 values. Raises: ValueError: If `int_or_tuple` it not well formed. """ if isinstance(int_or_tuple, (list, tuple)): if len(int_or_tuple) != 2: raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple) return int(int_or_tuple[0]), int(int_or_tuple[1]) if isinstance(int_or_tuple, int): return int(int_or_tuple), int(int_or_tuple) if isinstance(int_or_tuple, tf.TensorShape): if len(int_or_tuple) == 2: return int_or_tuple[0], int_or_tuple[1] raise ValueError('Must be an int, a list with 2 elements or a TensorShape of ' 'length 2') @scopes.add_arg_scope def conv2d(inputs, num_filters_out, kernel_size, stride=1, padding='SAME', activation=tf.nn.relu, stddev=0.01, bias=0.0, weight_decay=0, batch_norm_params=None, is_training=True, trainable=True, restore=True, scope=None, reuse=None): """Adds a 2D convolution followed by an optional batch_norm layer. conv2d creates a variable called 'weights', representing the convolutional kernel, that is convolved with the input. If `batch_norm_params` is None, a second variable called 'biases' is added to the result of the convolution operation. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_filters_out: the number of output filters. kernel_size: a list of length 2: [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: one of 'VALID' or 'SAME'. activation: activation function. stddev: standard deviation of the truncated guassian weight distribution. bias: the initial value of the biases. weight_decay: the weight decay. batch_norm_params: parameters for the batch_norm. If is None don't use it. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: a tensor representing the output of the operation. """ with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): kernel_h, kernel_w = _two_element_tuple(kernel_size) stride_h, stride_w = _two_element_tuple(stride) num_filters_in = inputs.get_shape()[-1] weights_shape = [kernel_h, kernel_w, num_filters_in, num_filters_out] weights_initializer = tf.truncated_normal_initializer(stddev=stddev) l2_regularizer = None if weight_decay and weight_decay > 0: l2_regularizer = losses.l2_regularizer(weight_decay) weights = variables.variable('weights', shape=weights_shape, initializer=weights_initializer, regularizer=l2_regularizer, trainable=trainable, restore=restore) conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding) if batch_norm_params is not None: with scopes.arg_scope([batch_norm], is_training=is_training, trainable=trainable, restore=restore): outputs = batch_norm(conv, **batch_norm_params) else: bias_shape = [num_filters_out,] bias_initializer = tf.constant_initializer(bias) biases = variables.variable('biases', shape=bias_shape, initializer=bias_initializer, trainable=trainable, restore=restore) outputs = tf.nn.bias_add(conv, biases) if activation: outputs = activation(outputs) return outputs @scopes.add_arg_scope def fc(inputs, num_units_out, activation=tf.nn.relu, stddev=0.01, bias=0.0, weight_decay=0, batch_norm_params=None, is_training=True, trainable=True, restore=True, scope=None, reuse=None): """Adds a fully connected layer followed by an optional batch_norm layer. FC creates a variable called 'weights', representing the fully connected weight matrix, that is multiplied by the input. If `batch_norm` is None, a second variable called 'biases' is added to the result of the initial vector-matrix multiplication. Args: inputs: a [B x N] tensor where B is the batch size and N is the number of input units in the layer. num_units_out: the number of output units in the layer. activation: activation function. stddev: the standard deviation for the weights. bias: the initial value of the biases. weight_decay: the weight decay. batch_norm_params: parameters for the batch_norm. If is None don't use it. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: the tensor variable representing the result of the series of operations. """ with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse): num_units_in = inputs.get_shape()[1] weights_shape = [num_units_in, num_units_out] weights_initializer = tf.truncated_normal_initializer(stddev=stddev) l2_regularizer = None if weight_decay and weight_decay > 0: l2_regularizer = losses.l2_regularizer(weight_decay) weights = variables.variable('weights', shape=weights_shape, initializer=weights_initializer, regularizer=l2_regularizer, trainable=trainable, restore=restore) if batch_norm_params is not None: outputs = tf.matmul(inputs, weights) with scopes.arg_scope([batch_norm], is_training=is_training, trainable=trainable, restore=restore): outputs = batch_norm(outputs, **batch_norm_params) else: bias_shape = [num_units_out,] bias_initializer = tf.constant_initializer(bias) biases = variables.variable('biases', shape=bias_shape, initializer=bias_initializer, trainable=trainable, restore=restore) outputs = tf.nn.xw_plus_b(inputs, weights, biases) if activation: outputs = activation(outputs) return outputs def one_hot_encoding(labels, num_classes, scope=None): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels @scopes.add_arg_scope def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): """Adds a Max Pooling layer. It is assumed by the wrapper that the pooling is only done per image and not in depth or batch. Args: inputs: a tensor of size [batch_size, height, width, depth]. kernel_size: a list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: the padding method, either 'VALID' or 'SAME'. scope: Optional scope for name_scope. Returns: a tensor representing the results of the pooling operation. Raises: ValueError: if 'kernel_size' is not a 2-D list """ with tf.name_scope(scope, 'MaxPool', [inputs]): kernel_h, kernel_w = _two_element_tuple(kernel_size) stride_h, stride_w = _two_element_tuple(stride) return tf.nn.max_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) @scopes.add_arg_scope def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None): """Adds a Avg Pooling layer. It is assumed by the wrapper that the pooling is only done per image and not in depth or batch. Args: inputs: a tensor of size [batch_size, height, width, depth]. kernel_size: a list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: the padding method, either 'VALID' or 'SAME'. scope: Optional scope for name_scope. Returns: a tensor representing the results of the pooling operation. """ with tf.name_scope(scope, 'AvgPool', [inputs]): kernel_h, kernel_w = _two_element_tuple(kernel_size) stride_h, stride_w = _two_element_tuple(stride) return tf.nn.avg_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) @scopes.add_arg_scope def dropout(inputs, keep_prob=0.5, is_training=True, scope=None): """Returns a dropout layer applied to the input. Args: inputs: the tensor to pass to the Dropout layer. keep_prob: the probability of keeping each input unit. is_training: whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ if is_training and keep_prob > 0: with tf.name_scope(scope, 'Dropout', [inputs]): return tf.nn.dropout(inputs, keep_prob) else: return inputs def flatten(inputs, scope=None): """Flattens the input while maintaining the batch_size. Assumes that the first dimension represents the batch. Args: inputs: a tensor of size [batch_size, ...]. scope: Optional scope for name_scope. Returns: a flattened tensor with shape [batch_size, k]. Raises: ValueError: if inputs.shape is wrong. """ if len(inputs.get_shape()) < 2: raise ValueError('Inputs must be have a least 2 dimensions') dims = inputs.get_shape()[1:] k = dims.num_elements() with tf.name_scope(scope, 'Flatten', [inputs]): return tf.reshape(inputs, [-1, k]) def repeat_op(repetitions, inputs, op, *args, **kwargs): """Build a sequential Tower starting from inputs by using an op repeatedly. It creates new scopes for each operation by increasing the counter. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') it will repeat the given op under the following variable_scopes: conv1/Conv conv1/Conv_1 conv1/Conv_2 Args: repetitions: number or repetitions. inputs: a tensor of size [batch_size, height, width, channels]. op: an operation. *args: args for the op. **kwargs: kwargs for the op. Returns: a tensor result of applying the operation op, num times. Raises: ValueError: if the op is unknown or wrong. """ scope = kwargs.pop('scope', None) with tf.variable_scope(scope, 'RepeatOp', [inputs]): tower = inputs for _ in range(repetitions): tower = op(tower, *args, **kwargs) return tower
apache-2.0
by46/simplekit
simplekit/email/__init__.py
1
4151
import httplib import os.path import requests import six from simplekit import settings from simplekit.exceptions import MailException PRIORITY_NORMAL = 0 PRIORITY_LOW = 1 PRIORITY_HIGH = 2 CONTENT_TYPE_HTML = 0 CONTENT_TYPE_TEXT = 1 ENCODING_UTF8 = 0 ENCODING_ASCII = 1 ENCODING_UTF32 = 2 ENCODING_UNICODE = 3 MEDIA_TYPE_GIF = 0 MEDIA_TYPE_JPEG = 1 MEDIA_TYPE_TIFF = 2 MEDIA_TYPE_PDF = 3 MEDIA_TYPE_RTF = 4 MEDIA_TYPE_SOAP = 5 MEDIA_TYPE_ZIP = 6 MEDIA_TYPE_OTHER = 7 MAIL_TYPE_SMTP = 1 MAIL_TYPE_LONDON2 = 0 class SmtpSetting(dict): def __init__(self, subject_encoding, body_encoding, attachments=None): kwargs = dict(SubjectEncoding=subject_encoding, BodyEncoding=body_encoding, Attachments=attachments) super(SmtpSetting, self).__init__(**kwargs) self.__dict__ = self class MailAttachment(dict): def __init__(self, filename, file_content, media_type=MEDIA_TYPE_OTHER): kwargs = dict(FileName=filename, FileContent=file_content, MediaType=media_type) super(MailAttachment, self).__init__(**kwargs) self.__dict__ = self class LondonIISetting(dict): def __init__(self, company_code, country_code, language_code, system_id, template_id, mail_template_variables): kwargs = dict(CompanyCode=company_code, CountryCode=country_code, LanguageCode=language_code, SystemID=system_id, TemplateID=template_id, MailTemplateVariables=mail_template_variables) super(LondonIISetting, self).__init__(**kwargs) self.__dict__ = self class MailTemplateVariable(dict): def __init__(self, key, value): kwargs = dict(Key=key, Value=value) super(MailTemplateVariable, self).__init__(**kwargs) def send_email_inner(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL, content_type=CONTENT_TYPE_TEXT, mail_type=None, smtp_setting=None, london_2_setting=None): if isinstance(to, (list, tuple)): to = ';'.join(to) body = dict(From=sender, To=to, CC=cc, BCC=bcc, Subject=subject, Body=body, Priority=priority, ContentType=content_type, MailType=mail_type, SmtpSetting=smtp_setting, LondonIISetting=london_2_setting) response = requests.post(settings.URL_EMAIL, json=body, headers={'Content-Type': 'Application/json', 'accept': 'application/json'}) if response.status_code != httplib.OK: del body['SmtpSetting'] raise MailException("Send mail use api {0} status code: {1}\n body : {2}\n response content : {3}".format( settings.URL_EMAIL, response.status_code, body, response.content)) def send_email(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL, content_type=CONTENT_TYPE_TEXT, files=None): attachments = [] import base64 if files: for item in files: if isinstance(item, six.string_types): filename = os.path.basename(item) file_content = open(item, 'rb').read() file_content = base64.b64encode(file_content) media_type = MEDIA_TYPE_OTHER attachment = MailAttachment(filename, file_content, media_type) attachments.append(attachment) else: attachments.append(item) smtp_setting = SmtpSetting(ENCODING_UTF8, ENCODING_UTF8, attachments) send_email_inner(sender, to, subject, body, cc, bcc, priority, content_type, MAIL_TYPE_SMTP, smtp_setting) if __name__ == '__main__': send_email('benjamin.c.yan@newegg.com', 'benjamin.c.yan@newegg.com', '(info) testing', 'testing body', files=['__init__.py'])
mit
allmende/synnefo
snf-cyclades-app/synnefo/logic/networks.py
6
5966
# Copyright (C) 2010-2014 GRNET S.A. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from functools import wraps from synnefo.db import transaction from django.conf import settings from snf_django.lib.api import faults from synnefo.api import util from synnefo import quotas from synnefo.db.models import Network, Backend from synnefo.db.utils import validate_mac from synnefo.db.pools import EmptyPool from synnefo.logic import backend as backend_mod from synnefo.logic import utils from logging import getLogger log = getLogger(__name__) def validate_network_action(network, action): if network.deleted: raise faults.BadRequest("Network has been deleted.") def network_command(action): def decorator(func): @wraps(func) @transaction.commit_on_success() def wrapper(network, *args, **kwargs): validate_network_action(network, action) return func(network, *args, **kwargs) return wrapper return decorator @transaction.commit_on_success def create(userid, name, flavor, link=None, mac_prefix=None, mode=None, floating_ip_pool=False, tags=None, public=False, drained=False, project=None): if flavor is None: raise faults.BadRequest("Missing request parameter 'type'") elif flavor not in Network.FLAVORS.keys(): raise faults.BadRequest("Invalid network type '%s'" % flavor) if mac_prefix is not None and flavor == "MAC_FILTERED": raise faults.BadRequest("Cannot override MAC_FILTERED mac-prefix") if link is not None and flavor == "PHYSICAL_VLAN": raise faults.BadRequest("Cannot override PHYSICAL_VLAN link") utils.check_name_length(name, Network.NETWORK_NAME_LENGTH, "Network name " "is too long") try: fmode, flink, fmac_prefix, ftags = util.values_from_flavor(flavor) except EmptyPool: log.error("Failed to allocate resources for network of type: %s", flavor) msg = "Failed to allocate resources for network." raise faults.ServiceUnavailable(msg) mode = mode or fmode link = link or flink mac_prefix = mac_prefix or fmac_prefix tags = tags or ftags validate_mac(mac_prefix + "0:00:00:00") # Check that given link is unique! if (link is not None and flavor == "IP_LESS_ROUTED" and Network.objects.filter(deleted=False, mode=mode, link=link).exists()): msg = "Link '%s' is already used." % link raise faults.BadRequest(msg) if project is None: project = userid network = Network.objects.create( name=name, userid=userid, project=project, flavor=flavor, mode=mode, link=link, mac_prefix=mac_prefix, tags=tags, public=public, external_router=public, floating_ip_pool=floating_ip_pool, action='CREATE', state='ACTIVE', drained=drained) if link is None: network.link = "%slink-%d" % (settings.BACKEND_PREFIX_ID, network.id) network.save() # Issue commission to Quotaholder and accept it since at the end of # this transaction the Network object will be created in the DB. # Note: the following call does a commit! if not public: quotas.issue_and_accept_commission(network) return network def create_network_in_backends(network): job_ids = [] for bend in Backend.objects.filter(offline=False): network.create_backend_network(bend) jobs = backend_mod.create_network(network=network, backend=bend, connect=True) job_ids.extend(jobs) return job_ids @network_command("RENAME") def rename(network, name): utils.check_name_length(name, Network.NETWORK_NAME_LENGTH, "Network name " "is too long") network.name = name network.save() return network @network_command("DESTROY") def delete(network): if network.nics.exists(): raise faults.Conflict("Cannot delete network. There are ports still" " configured on network network %s" % network.id) if network.ips.filter(deleted=False, floating_ip=True).exists(): msg = "Cannot delete netowrk. Network has allocated floating IPs." raise faults.Conflict(msg) network.action = "DESTROY" # Mark network as drained to prevent automatic allocation of # public/floating IPs while the network is being deleted if network.public: network.drained = True network.save() # Delete network to all backends that exists for bnet in network.backend_networks.exclude(operstate="DELETED"): backend_mod.delete_network(network, bnet.backend) else: # If network does not exist in any backend, update the network state backend_mod.update_network_state(network) return network @network_command("REASSIGN") def reassign(network, project): action_fields = {"to_project": project, "from_project": network.project} log.info("Reassigning network %s from project %s to %s", network, network.project, project) network.project = project network.save() quotas.issue_and_accept_commission(network, action="REASSIGN", action_fields=action_fields) return network
gpl-3.0
bixbydev/Bixby_v3
database/mysql/base.py
1
2501
#!/usr/bin/env python # Filename: base.py #=====================================================================# # Copyright (c) 2015 Bradley Hilton <bradleyhilton@bradleyhilton.com> # # Distributed under the terms of the GNU GENERAL PUBLIC LICENSE V3. # #=====================================================================# import os import sys import time from config import config from logger.log import log log.debug('mysql.base Loaded') print config.MYSQL_BACKUPPATH try: import MySQLdb except ImportError, e: 'The Module MySQLdb is not installed' log.critical('Failed to load MySQLdb Module: '+str(e)) sys.exit(1) def backup_mysql(): """Backups the DB until things get very large I am going to do this every time. Or until I am sure my code is good.""" dnsdt = str(time.strftime('%Y%m%d%H%M%S', time.localtime())) dump_file = os.path.join(config.MYSQL_BACKUPPATH , config.MYSQL_DB+'_bak_'+dnsdt+'.sql') log.info("""Creating mysqldump: '%s'""" %dump_file) os.system("""mysqldump -h '%s' -u '%s' -p'%s' '%s' > '%s'""" \ %(config.MYSQL_HOST, config.MYSQL_USER, config.MYSQL_PASSWORD, config.MYSQL_DB, dump_file)) def restore_mysql(db, sqlfile): if not os.path.exists(sqlfile): raise TypeError("""This is totally the wrong error because I don't know the right error""") log.info("Restoring DB: %s from File: %s" %(db, sqlfile)) os.system("""mysql -h '%s' -u '%s' -p'%s' '%s' < %s""" \ %(config.MYSQL_HOST, config.MYSQL_USER, config.MYSQL_PASSWORD, db, sqlfile)) class CursorWrapper(object): """Wrapper to open a MySQL Connection and creates a Cursor""" def __init__(self, host=config.MYSQL_HOST, user=config.MYSQL_USER, passwd=config.MYSQL_PASSWORD, db=config.MYSQL_DB): self.example = 'Testing' self.connection = MySQLdb.connect (host = host, user = user, passwd = passwd, db = db) self.cursor = self.connection.cursor() log.info("""Setting autocommit = \"True\"""") self.connection.autocommit(True) log.info("Connected to MySQL Host: %s Database: %s" % (host, db)) def close(self): self.cursor.close() log.info('MySQL Cursor Closed') # self.connection.commit() self.connection.close() log.info('MySQL Connection Closed')
gpl-3.0
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
python-packages/keras-0.2.0/keras/datasets/imdb.py
41
1850
from __future__ import absolute_import from six.moves import cPickle import gzip from .data_utils import get_file import random from six.moves import zip import numpy as np def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3): path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/imdb.pkl") if path.endswith(".gz"): f = gzip.open(path, 'rb') else: f = open(path, 'rb') X, labels = cPickle.load(f) f.close() np.random.seed(seed) np.random.shuffle(X) np.random.seed(seed) np.random.shuffle(labels) if start_char is not None: X = [[start_char] + [w + index_from for w in x] for x in X] elif index_from: X = [[w + index_from for w in x] for x in X] if maxlen: new_X = [] new_labels = [] for x, y in zip(X, labels): if len(x) < maxlen: new_X.append(x) new_labels.append(y) X = new_X labels = new_labels if not nb_words: nb_words = max([max(x) for x in X]) # by convention, use 2 as OOV word # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV) if oov_char is not None: X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X] else: nX = [] for x in X: nx = [] for w in x: if (w >= nb_words or w < skip_top): nx.append(w) nX.append(nx) X = nX X_train = X[:int(len(X)*(1-test_split))] y_train = labels[:int(len(X)*(1-test_split))] X_test = X[int(len(X)*(1-test_split)):] y_test = labels[int(len(X)*(1-test_split)):] return (X_train, y_train), (X_test, y_test)
bsd-3-clause
shsingh/ansible
test/units/modules/network/fortios/test_fortios_vpn_certificate_remote.py
21
8513
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_vpn_certificate_remote except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_certificate_remote.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_vpn_certificate_remote_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'vpn_certificate_remote': { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) expected_data = { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' } set_method_mock.assert_called_with('vpn.certificate', 'remote', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_vpn_certificate_remote_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'vpn_certificate_remote': { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) expected_data = { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' } set_method_mock.assert_called_with('vpn.certificate', 'remote', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_vpn_certificate_remote_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'vpn_certificate_remote': { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) delete_method_mock.assert_called_with('vpn.certificate', 'remote', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_vpn_certificate_remote_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'vpn_certificate_remote': { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) delete_method_mock.assert_called_with('vpn.certificate', 'remote', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_vpn_certificate_remote_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'vpn_certificate_remote': { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) expected_data = { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' } set_method_mock.assert_called_with('vpn.certificate', 'remote', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_vpn_certificate_remote_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'vpn_certificate_remote': { 'random_attribute_not_valid': 'tag', 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' }, 'vdom': 'root'} is_error, changed, response = fortios_vpn_certificate_remote.fortios_vpn_certificate(input_data, fos_instance) expected_data = { 'name': 'default_name_3', 'range': 'global', 'remote': 'test_value_5', 'source': 'factory' } set_method_mock.assert_called_with('vpn.certificate', 'remote', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
gpl-3.0
hongliang5623/sentry
src/sentry/db/models/base.py
27
2948
""" sentry.db.models ~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import logging from django.db import models from django.db.models import signals from .fields.bounded import BoundedBigAutoField from .manager import BaseManager from .query import update __all__ = ('BaseModel', 'Model', 'sane_repr') UNSAVED = object() def sane_repr(*attrs): if 'id' not in attrs and 'pk' not in attrs: attrs = ('id',) + attrs def _repr(self): cls = type(self).__name__ pairs = ( '%s=%s' % (a, repr(getattr(self, a, None))) for a in attrs) return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs)) return _repr class BaseModel(models.Model): class Meta: abstract = True objects = BaseManager() update = update def __init__(self, *args, **kwargs): super(BaseModel, self).__init__(*args, **kwargs) self._update_tracked_data() def __getstate__(self): d = self.__dict__.copy() # we cant serialize weakrefs d.pop('_Model__data', None) return d def __reduce__(self): (model_unpickle, stuff, _) = super(BaseModel, self).__reduce__() return (model_unpickle, stuff, self.__getstate__()) def __setstate__(self, state): self.__dict__.update(state) self._update_tracked_data() def __get_field_value(self, field): if isinstance(field, models.ForeignKey): return getattr(self, field.column) return getattr(self, field.name) def _update_tracked_data(self): "Updates a local copy of attributes values" if self.id: data = {} for f in self._meta.fields: try: data[f.column] = self.__get_field_value(f) except AttributeError as e: # this case can come up from pickling logging.exception(unicode(e)) self.__data = data else: self.__data = UNSAVED def has_changed(self, field_name): "Returns ``True`` if ``field`` has changed since initialization." if self.__data is UNSAVED: return False field = self._meta.get_field(field_name) return self.__data.get(field_name) != self.__get_field_value(field) def old_value(self, field_name): "Returns the previous value of ``field``" if self.__data is UNSAVED: return None return self.__data.get(field_name) def __model_post_save(instance, **kwargs): if not isinstance(instance, BaseModel): return instance._update_tracked_data() class Model(BaseModel): id = BoundedBigAutoField(primary_key=True) class Meta: abstract = True signals.post_save.connect(__model_post_save)
bsd-3-clause
alxgu/ansible
lib/ansible/modules/network/aci/mso_schema_template_externalepg.py
21
6089
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: mso_schema_template_externalepg short_description: Manage external EPGs in schema templates description: - Manage external EPGs in schema templates on Cisco ACI Multi-Site. author: - Dag Wieers (@dagwieers) version_added: '2.8' options: schema: description: - The name of the schema. type: str required: yes template: description: - The name of the template. type: str required: yes externalepg: description: - The name of the external EPG to manage. type: str aliases: [ name ] display_name: description: - The name as displayed on the MSO web interface. type: str vrf: description: - The VRF associated to this ANP. type: dict state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present extends_documentation_fragment: mso ''' EXAMPLES = r''' - name: Add a new external EPG mso_schema_template_externalepg: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 externalepg: External EPG 1 state: present delegate_to: localhost - name: Remove an external EPG mso_schema_template_externalepg: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 externalepg: external EPG1 state: absent delegate_to: localhost - name: Query a specific external EPGs mso_schema_template_externalepg: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 externalepg: external EPG1 state: query delegate_to: localhost register: query_result - name: Query all external EPGs mso_schema_template_externalepg: host: mso_host username: admin password: SomeSecretPassword schema: Schema 1 template: Template 1 state: query delegate_to: localhost register: query_result ''' RETURN = r''' ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, issubset def main(): argument_spec = mso_argument_spec() argument_spec.update( schema=dict(type='str', required=True), template=dict(type='str', required=True), externalepg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects display_name=dict(type='str'), vrf=dict(type='dict', options=mso_reference_spec()), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['externalepg']], ['state', 'present', ['externalepg', 'vrf']], ], ) schema = module.params['schema'] template = module.params['template'] externalepg = module.params['externalepg'] display_name = module.params['display_name'] vrf = module.params['vrf'] state = module.params['state'] mso = MSOModule(module) # Get schema_id schema_obj = mso.get_obj('schemas', displayName=schema) if schema_obj: schema_id = schema_obj['id'] else: mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) schema_path = 'schemas/{id}'.format(**schema_obj) # Get template templates = [t['name'] for t in schema_obj['templates']] if template not in templates: mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates))) template_idx = templates.index(template) # Get external EPGs externalepgs = [e['name'] for e in schema_obj['templates'][template_idx]['externalEpgs']] if externalepg is not None and externalepg in externalepgs: externalepg_idx = externalepgs.index(externalepg) mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][externalepg_idx] if state == 'query': if externalepg is None: mso.existing = schema_obj['templates'][template_idx]['externalEpgs'] elif not mso.existing: mso.fail_json(msg="External EPG '{externalepg}' not found".format(externalepg=externalepg)) mso.exit_json() eepgs_path = '/templates/{0}/externalEpgs'.format(template) eepg_path = '/templates/{0}/externalEpgs/{1}'.format(template, externalepg) ops = [] mso.previous = mso.existing if state == 'absent': if mso.existing: mso.sent = mso.existing = {} ops.append(dict(op='remove', path=eepg_path)) elif state == 'present': vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template) if display_name is None and not mso.existing: display_name = externalepg payload = dict( name=externalepg, displayName=display_name, vrfRef=vrf_ref, # FIXME subnets=[], contractRelationships=[], ) mso.sanitize(payload, collate=True) if mso.existing: ops.append(dict(op='replace', path=eepg_path, value=mso.sent)) else: ops.append(dict(op='add', path=eepgs_path + '/-', value=mso.sent)) mso.existing = mso.proposed if not module.check_mode: mso.request(schema_path, method='PATCH', data=ops) mso.exit_json() if __name__ == "__main__": main()
gpl-3.0
ZihengJiang/mxnet
python/mxnet/ndarray/utils.py
5
9264
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """Utility functions for NDArray and BaseSparseNDArray.""" import ctypes from ..base import _LIB, check_call, py_str, c_str, string_types, mx_uint, NDArrayHandle, c_array from .ndarray import NDArray from .ndarray import array as _array from .ndarray import empty as _empty_ndarray from .ndarray import zeros as _zeros_ndarray from .sparse import zeros as _zeros_sparse_ndarray from .sparse import empty as _empty_sparse_ndarray from .sparse import array as _sparse_array from .sparse import _ndarray_cls try: import scipy.sparse as spsp except ImportError: spsp = None def zeros(shape, ctx=None, dtype=None, stype=None, aux_types=None, **kwargs): """Return a new array of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of int The shape of the empty array ctx : Context, optional An optional device context (default is the current default context) dtype : str or numpy.dtype, optional An optional value type (default is `float32`) stype: string, optional The storage type of the empty array, such as 'row_sparse', 'csr', etc. aux_types: list of numpy.dtype, optional An optional list of types of the aux data for RowSparseNDArray or CSRNDArray. The default value for CSRNDArray is [`int64`, `int64`] for `indptr` and `indices`. The default value for RowSparseNDArray is [`int64`] for `indices`. Returns ------- NDArray, CSRNDArray or RowSparseNDArray A created array Examples -------- >>> mx.nd.zeros((1,2), mx.cpu(), stype='csr') <CSRNDArray 1x2 @cpu(0)> >>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy() array([[ 0., 0.]], dtype=float16) """ if stype is None or stype == 'default': return _zeros_ndarray(shape, ctx, dtype, **kwargs) else: return _zeros_sparse_ndarray(stype, shape, ctx, dtype, aux_types, **kwargs) def empty(shape, ctx=None, dtype=None, stype=None, aux_types=None): """Returns a new array of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). stype : str, optional An optional storage type (default is `default`). aux_types: list of numpy.dtype, optional An optional list of types of the aux data for RowSparseNDArray or CSRNDArray. The default value for CSRNDArray is [`int64`, `int64`] for `indptr` and `indices`. The default value for RowSparseNDArray is [`int64`] for `indices`. Returns ------- NDArray, CSRNDArray or RowSparseNDArray A created array. Examples -------- >>> mx.nd.empty(1) <NDArray 1 @cpu(0)> >>> mx.nd.empty((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.empty((1,2), mx.gpu(0), 'float16') <NDArray 1x2 @gpu(0)> >>> mx.nd.empty((1,2), stype='csr') <CSRNDArray 1x2 @cpu(0)> """ if stype is None or stype == 'default': return _empty_ndarray(shape, ctx, dtype) else: return _empty_sparse_ndarray(stype, shape, ctx, dtype, aux_types) def array(source_array, ctx=None, dtype=None, aux_types=None): """Creates an array from any object exposing the array interface. Parameters ---------- source_array : array_like An object exposing the array interface, an object whose `__array__` method returns an array, or any (nested) sequence. ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `float32` otherwise. aux_types: list of numpy.dtype, optional An optional list of types of the aux data for RowSparseNDArray or CSRNDArray. The default value for CSRNDArray is [`int64`, `int64`] for `indptr` and `indices`. The default value for RowSparseNDArray is [`int64`] for `indices`. Returns ------- NDArray, RowSparseNDArray or CSRNDArray An array with the same contents as the `source_array`. Examples -------- >>> import numpy as np >>> mx.nd.array([1, 2, 3]) <NDArray 3 @cpu(0)> >>> mx.nd.array([[1, 2], [3, 4]]) <NDArray 2x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2))) <NDArray 3x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0)) <NDArray 3x2 @gpu(0)> >>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse')) <RowSparseNDArray 3x2 @cpu(0)> """ if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix): return _sparse_array(source_array, ctx=ctx, dtype=dtype, aux_types=aux_types) elif isinstance(source_array, NDArray) and source_array.stype != 'default': return _sparse_array(source_array, ctx=ctx, dtype=dtype, aux_types=aux_types) else: return _array(source_array, ctx=ctx, dtype=dtype) def load(fname): """Loads an array from file. See more details in ``save``. Parameters ---------- fname : str The filename. Returns ------- list of NDArray, RowSparseNDArray or CSRNDArray, or \ dict of str to NDArray, RowSparseNDArray or CSRNDArray Loaded data. """ if not isinstance(fname, string_types): raise TypeError('fname required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value)) def save(fname, data): """Saves a list of arrays or a dict of str->array to file. Examples of filenames: - ``/path/to/file`` - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports) - ``hdfs://path/to/file`` (if compiled with HDFS supports) Parameters ---------- fname : str The filename. data : NDArray, RowSparseNDArray or CSRNDArray, \ or list of NDArray, RowSparseNDArray or CSRNDArray, \ or dict of str to NDArray, RowSparseNDArray or CSRNDArray The data to save. Examples -------- >>> x = mx.nd.zeros((2,3)) >>> y = mx.nd.ones((1,4)) >>> mx.nd.save('my_list', [x,y]) >>> mx.nd.save('my_dict', {'x':x, 'y':y}) >>> mx.nd.load('my_list') [<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>] >>> mx.nd.load('my_dict') {'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>} """ if isinstance(data, NDArray): data = [data] handles = [] if isinstance(data, dict): keys = [] for key, val in data.items(): if not isinstance(key, string_types): raise TypeError('save only accept dict str->NDArray or list of NDArray') if not isinstance(val, NDArray): raise TypeError('save only accept dict str->NDArray or list of NDArray') keys.append(c_str(key)) handles.append(val.handle) keys = c_array(ctypes.c_char_p, keys) elif isinstance(data, list): for val in data: if not isinstance(val, NDArray): raise TypeError('save only accept dict str->NDArray or list of NDArray') handles.append(val.handle) keys = None else: raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs " "or a list of NDarrays.") check_call(_LIB.MXNDArraySave(c_str(fname), mx_uint(len(handles)), c_array(NDArrayHandle, handles), keys))
apache-2.0
Tahsin-Mayeesha/Udacity-Machine-Learning-Nanodegree
projects/titanic_survival_exploration/titanic_visualizations.py
24
5425
import numpy as np import pandas as pd import matplotlib.pyplot as plt def filter_data(data, condition): """ Remove elements that do not match the condition provided. Takes a data list as input and returns a filtered list. Conditions should be a list of strings of the following format: '<field> <op> <value>' where the following operations are valid: >, <, >=, <=, ==, != Example: ["Sex == 'male'", 'Age < 18'] """ field, op, value = condition.split(" ") # convert value into number or strip excess quotes if string try: value = float(value) except: value = value.strip("\'\"") # get booleans for filtering if op == ">": matches = data[field] > value elif op == "<": matches = data[field] < value elif op == ">=": matches = data[field] >= value elif op == "<=": matches = data[field] <= value elif op == "==": matches = data[field] == value elif op == "!=": matches = data[field] != value else: # catch invalid operation codes raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.") # filter data and outcomes data = data[matches].reset_index(drop = True) return data def survival_stats(data, outcomes, key, filters = []): """ Print out selected statistics regarding survival, given a feature of interest and any number of filters (including no filters) """ # Check that the key exists if key not in data.columns.values : print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key) return False # Return the function before visualizing if 'Cabin' or 'Ticket' # is selected: too many unique categories to display if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'): print "'{}' has too many unique categories to display! Try a different feature.".format(key) return False # Merge data and outcomes into single dataframe all_data = pd.concat([data, outcomes], axis = 1) # Apply filters to data for condition in filters: all_data = filter_data(all_data, condition) # Create outcomes DataFrame all_data = all_data[[key, 'Survived']] # Create plotting figure plt.figure(figsize=(8,6)) # 'Numerical' features if(key == 'Age' or key == 'Fare'): # Remove NaN values from Age data all_data = all_data[~np.isnan(all_data[key])] # Divide the range of data into bins and count survival rates min_value = all_data[key].min() max_value = all_data[key].max() value_range = max_value - min_value # 'Fares' has larger range of values than 'Age' so create more bins if(key == 'Fare'): bins = np.arange(0, all_data['Fare'].max() + 20, 20) if(key == 'Age'): bins = np.arange(0, all_data['Age'].max() + 10, 10) # Overlay each bin's survival rates nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True) surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True) plt.hist(nonsurv_vals, bins = bins, alpha = 0.6, color = 'red', label = 'Did not survive') plt.hist(surv_vals, bins = bins, alpha = 0.6, color = 'green', label = 'Survived') # Add legend to plot plt.xlim(0, bins.max()) plt.legend(framealpha = 0.8) # 'Categorical' features else: # Set the various categories if(key == 'Pclass'): values = np.arange(1,4) if(key == 'Parch' or key == 'SibSp'): values = np.arange(0,np.max(data[key]) + 1) if(key == 'Embarked'): values = ['C', 'Q', 'S'] if(key == 'Sex'): values = ['male', 'female'] # Create DataFrame containing categories and count of each frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived')) for i, value in enumerate(values): frame.loc[i] = [value, \ len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \ len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])] # Set the width of each bar bar_width = 0.4 # Display each category's survival rates for i in np.arange(len(frame)): nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r') surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g') plt.xticks(np.arange(len(frame)), values) plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8) # Common attributes for plot formatting plt.xlabel(key) plt.ylabel('Number of Passengers') plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key)) plt.show() # Report number of passengers with missing values if sum(pd.isnull(all_data[key])): nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived'] print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \ key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
mit
mrquim/repository.mrquim
script.module.youtube.dl/lib/youtube_dl/extractor/motorsport.py
73
1804
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urlparse, ) class MotorsportIE(InfoExtractor): IE_DESC = 'motorsport.com' _VALID_URL = r'https?://(?:www\.)?motorsport\.com/[^/?#]+/video/(?:[^/?#]+/)(?P<id>[^/]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/', 'info_dict': { 'id': '2-T3WuR-KMM', 'ext': 'mp4', 'title': 'Red Bull Racing: 2014 Rules Explained', 'duration': 208, 'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations – which are arguably the most complex the sport has ever seen.', 'uploader': 'mcomstaff', 'uploader_id': 'UC334JIYKkVnyFoNCclfZtHQ', 'upload_date': '20140903', 'thumbnail': r're:^https?://.+\.jpg$' }, 'add_ie': ['Youtube'], 'params': { 'skip_download': True, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) iframe_path = self._html_search_regex( r'<iframe id="player_iframe"[^>]+src="([^"]+)"', webpage, 'iframe path') iframe = self._download_webpage( compat_urlparse.urljoin(url, iframe_path), display_id, 'Downloading iframe') youtube_id = self._search_regex( r'www.youtube.com/embed/(.{11})', iframe, 'youtube id') return { '_type': 'url_transparent', 'display_id': display_id, 'url': 'https://youtube.com/watch?v=%s' % youtube_id, }
gpl-2.0
kernel-sanders/arsenic-mobile
Dependencies/Twisted-13.0.0/twisted/runner/test/test_procmon.py
49
16794
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.runner.procmon}. """ from twisted.trial import unittest from twisted.runner.procmon import LoggingProtocol, ProcessMonitor from twisted.internet.error import (ProcessDone, ProcessTerminated, ProcessExitedAlready) from twisted.internet.task import Clock from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactor class DummyProcess(object): """ An incomplete and fake L{IProcessTransport} implementation for testing how L{ProcessMonitor} behaves when its monitored processes exit. @ivar _terminationDelay: the delay in seconds after which the DummyProcess will appear to exit when it receives a TERM signal """ pid = 1 proto = None _terminationDelay = 1 def __init__(self, reactor, executable, args, environment, path, proto, uid=None, gid=None, usePTY=0, childFDs=None): self.proto = proto self._reactor = reactor self._executable = executable self._args = args self._environment = environment self._path = path self._uid = uid self._gid = gid self._usePTY = usePTY self._childFDs = childFDs def signalProcess(self, signalID): """ A partial implementation of signalProcess which can only handle TERM and KILL signals. - When a TERM signal is given, the dummy process will appear to exit after L{DummyProcess._terminationDelay} seconds with exit code 0 - When a KILL signal is given, the dummy process will appear to exit immediately with exit code 1. @param signalID: The signal name or number to be issued to the process. @type signalID: C{str} """ params = { "TERM": (self._terminationDelay, 0), "KILL": (0, 1) } if self.pid is None: raise ProcessExitedAlready() if signalID in params: delay, status = params[signalID] self._signalHandler = self._reactor.callLater( delay, self.processEnded, status) def processEnded(self, status): """ Deliver the process ended event to C{self.proto}. """ self.pid = None statusMap = { 0: ProcessDone, 1: ProcessTerminated, } self.proto.processEnded(Failure(statusMap[status](status))) class DummyProcessReactor(MemoryReactor, Clock): """ @ivar spawnedProcesses: a list that keeps track of the fake process instances built by C{spawnProcess}. @type spawnedProcesses: C{list} """ def __init__(self): MemoryReactor.__init__(self) Clock.__init__(self) self.spawnedProcesses = [] def spawnProcess(self, processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """ Fake L{reactor.spawnProcess}, that logs all the process arguments and returns a L{DummyProcess}. """ proc = DummyProcess(self, executable, args, env, path, processProtocol, uid, gid, usePTY, childFDs) processProtocol.makeConnection(proc) self.spawnedProcesses.append(proc) return proc class ProcmonTests(unittest.TestCase): """ Tests for L{ProcessMonitor}. """ def setUp(self): """ Create an L{ProcessMonitor} wrapped around a fake reactor. """ self.reactor = DummyProcessReactor() self.pm = ProcessMonitor(reactor=self.reactor) self.pm.minRestartDelay = 2 self.pm.maxRestartDelay = 10 self.pm.threshold = 10 def test_getStateIncludesProcesses(self): """ The list of monitored processes must be included in the pickle state. """ self.pm.addProcess("foo", ["arg1", "arg2"], uid=1, gid=2, env={}) self.assertEqual(self.pm.__getstate__()['processes'], {'foo': (['arg1', 'arg2'], 1, 2, {})}) def test_getStateExcludesReactor(self): """ The private L{ProcessMonitor._reactor} instance variable should not be included in the pickle state. """ self.assertNotIn('_reactor', self.pm.__getstate__()) def test_addProcess(self): """ L{ProcessMonitor.addProcess} only starts the named program if L{ProcessMonitor.startService} has been called. """ self.pm.addProcess("foo", ["arg1", "arg2"], uid=1, gid=2, env={}) self.assertEqual(self.pm.protocols, {}) self.assertEqual(self.pm.processes, {"foo": (["arg1", "arg2"], 1, 2, {})}) self.pm.startService() self.reactor.advance(0) self.assertEqual(self.pm.protocols.keys(), ["foo"]) def test_addProcessDuplicateKeyError(self): """ L{ProcessMonitor.addProcess} raises a C{KeyError} if a process with the given name already exists. """ self.pm.addProcess("foo", ["arg1", "arg2"], uid=1, gid=2, env={}) self.assertRaises(KeyError, self.pm.addProcess, "foo", ["arg1", "arg2"], uid=1, gid=2, env={}) def test_addProcessEnv(self): """ L{ProcessMonitor.addProcess} takes an C{env} parameter that is passed to L{IReactorProcess.spawnProcess}. """ fakeEnv = {"KEY": "value"} self.pm.startService() self.pm.addProcess("foo", ["foo"], uid=1, gid=2, env=fakeEnv) self.reactor.advance(0) self.assertEqual( self.reactor.spawnedProcesses[0]._environment, fakeEnv) def test_removeProcess(self): """ L{ProcessMonitor.removeProcess} removes the process from the public processes list. """ self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.assertEqual(len(self.pm.processes), 1) self.pm.removeProcess("foo") self.assertEqual(len(self.pm.processes), 0) def test_removeProcessUnknownKeyError(self): """ L{ProcessMonitor.removeProcess} raises a C{KeyError} if the given process name isn't recognised. """ self.pm.startService() self.assertRaises(KeyError, self.pm.removeProcess, "foo") def test_startProcess(self): """ When a process has been started, an instance of L{LoggingProtocol} will be added to the L{ProcessMonitor.protocols} dict and the start time of the process will be recorded in the L{ProcessMonitor.timeStarted} dictionary. """ self.pm.addProcess("foo", ["foo"]) self.pm.startProcess("foo") self.assertIsInstance(self.pm.protocols["foo"], LoggingProtocol) self.assertIn("foo", self.pm.timeStarted.keys()) def test_startProcessAlreadyStarted(self): """ L{ProcessMonitor.startProcess} silently returns if the named process is already started. """ self.pm.addProcess("foo", ["foo"]) self.pm.startProcess("foo") self.assertIdentical(None, self.pm.startProcess("foo")) def test_startProcessUnknownKeyError(self): """ L{ProcessMonitor.startProcess} raises a C{KeyError} if the given process name isn't recognised. """ self.assertRaises(KeyError, self.pm.startProcess, "foo") def test_stopProcessNaturalTermination(self): """ L{ProcessMonitor.stopProcess} immediately sends a TERM signal to the named process. """ self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.assertIn("foo", self.pm.protocols) # Configure fake process to die 1 second after receiving term signal timeToDie = self.pm.protocols["foo"].transport._terminationDelay = 1 # Advance the reactor to just before the short lived process threshold # and leave enough time for the process to die self.reactor.advance(self.pm.threshold) # Then signal the process to stop self.pm.stopProcess("foo") # Advance the reactor just enough to give the process time to die and # verify that the process restarts self.reactor.advance(timeToDie) # We expect it to be restarted immediately self.assertEqual(self.reactor.seconds(), self.pm.timeStarted["foo"]) def test_stopProcessForcedKill(self): """ L{ProcessMonitor.stopProcess} kills a process which fails to terminate naturally within L{ProcessMonitor.killTime} seconds. """ self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.assertIn("foo", self.pm.protocols) self.reactor.advance(self.pm.threshold) proc = self.pm.protocols["foo"].transport # Arrange for the fake process to live longer than the killTime proc._terminationDelay = self.pm.killTime + 1 self.pm.stopProcess("foo") # If process doesn't die before the killTime, procmon should # terminate it self.reactor.advance(self.pm.killTime - 1) self.assertEqual(0.0, self.pm.timeStarted["foo"]) self.reactor.advance(1) # We expect it to be immediately restarted self.assertEqual(self.reactor.seconds(), self.pm.timeStarted["foo"]) def test_stopProcessUnknownKeyError(self): """ L{ProcessMonitor.stopProcess} raises a C{KeyError} if the given process name isn't recognised. """ self.assertRaises(KeyError, self.pm.stopProcess, "foo") def test_stopProcessAlreadyStopped(self): """ L{ProcessMonitor.stopProcess} silently returns if the named process is already stopped. eg Process has crashed and a restart has been rescheduled, but in the meantime, the service is stopped. """ self.pm.addProcess("foo", ["foo"]) self.assertIdentical(None, self.pm.stopProcess("foo")) def test_connectionLostLongLivedProcess(self): """ L{ProcessMonitor.connectionLost} should immediately restart a process if it has been running longer than L{ProcessMonitor.threshold} seconds. """ self.pm.addProcess("foo", ["foo"]) # Schedule the process to start self.pm.startService() # advance the reactor to start the process self.reactor.advance(0) self.assertIn("foo", self.pm.protocols) # Long time passes self.reactor.advance(self.pm.threshold) # Process dies after threshold self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0))) self.assertNotIn("foo", self.pm.protocols) # Process should be restarted immediately self.reactor.advance(0) self.assertIn("foo", self.pm.protocols) def test_connectionLostMurderCancel(self): """ L{ProcessMonitor.connectionLost} cancels a scheduled process killer and deletes the DelayedCall from the L{ProcessMonitor.murder} list. """ self.pm.addProcess("foo", ["foo"]) # Schedule the process to start self.pm.startService() # Advance 1s to start the process then ask ProcMon to stop it self.reactor.advance(1) self.pm.stopProcess("foo") # A process killer has been scheduled, delayedCall is active self.assertIn("foo", self.pm.murder) delayedCall = self.pm.murder["foo"] self.assertTrue(delayedCall.active()) # Advance to the point at which the dummy process exits self.reactor.advance( self.pm.protocols["foo"].transport._terminationDelay) # Now the delayedCall has been cancelled and deleted self.assertFalse(delayedCall.active()) self.assertNotIn("foo", self.pm.murder) def test_connectionLostProtocolDeletion(self): """ L{ProcessMonitor.connectionLost} removes the corresponding ProcessProtocol instance from the L{ProcessMonitor.protocols} list. """ self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.assertIn("foo", self.pm.protocols) self.pm.protocols["foo"].transport.signalProcess("KILL") self.reactor.advance( self.pm.protocols["foo"].transport._terminationDelay) self.assertNotIn("foo", self.pm.protocols) def test_connectionLostMinMaxRestartDelay(self): """ L{ProcessMonitor.connectionLost} will wait at least minRestartDelay s and at most maxRestartDelay s """ self.pm.minRestartDelay = 2 self.pm.maxRestartDelay = 3 self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay) self.reactor.advance(self.pm.threshold - 1) self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0))) self.assertEqual(self.pm.delay["foo"], self.pm.maxRestartDelay) def test_connectionLostBackoffDelayDoubles(self): """ L{ProcessMonitor.connectionLost} doubles the restart delay each time the process dies too quickly. """ self.pm.startService() self.pm.addProcess("foo", ["foo"]) self.reactor.advance(self.pm.threshold - 1) #9s self.assertIn("foo", self.pm.protocols) self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay) # process dies within the threshold and should not restart immediately self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0))) self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay * 2) def test_startService(self): """ L{ProcessMonitor.startService} starts all monitored processes. """ self.pm.addProcess("foo", ["foo"]) # Schedule the process to start self.pm.startService() # advance the reactor to start the process self.reactor.advance(0) self.assertTrue("foo" in self.pm.protocols) def test_stopService(self): """ L{ProcessMonitor.stopService} should stop all monitored processes. """ self.pm.addProcess("foo", ["foo"]) self.pm.addProcess("bar", ["bar"]) # Schedule the process to start self.pm.startService() # advance the reactor to start the processes self.reactor.advance(self.pm.threshold) self.assertIn("foo", self.pm.protocols) self.assertIn("bar", self.pm.protocols) self.reactor.advance(1) self.pm.stopService() # Advance to beyond the killTime - all monitored processes # should have exited self.reactor.advance(self.pm.killTime + 1) # The processes shouldn't be restarted self.assertEqual({}, self.pm.protocols) def test_stopServiceCancelRestarts(self): """ L{ProcessMonitor.stopService} should cancel any scheduled process restarts. """ self.pm.addProcess("foo", ["foo"]) # Schedule the process to start self.pm.startService() # advance the reactor to start the processes self.reactor.advance(self.pm.threshold) self.assertIn("foo", self.pm.protocols) self.reactor.advance(1) # Kill the process early self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0))) self.assertTrue(self.pm.restart['foo'].active()) self.pm.stopService() # Scheduled restart should have been cancelled self.assertFalse(self.pm.restart['foo'].active()) def test_stopServiceCleanupScheduledRestarts(self): """ L{ProcessMonitor.stopService} should cancel all scheduled process restarts. """ self.pm.threshold = 5 self.pm.minRestartDelay = 5 # Start service and add a process (started immediately) self.pm.startService() self.pm.addProcess("foo", ["foo"]) # Stop the process after 1s self.reactor.advance(1) self.pm.stopProcess("foo") # Wait 1s for it to exit it will be scheduled to restart 5s later self.reactor.advance(1) # Meanwhile stop the service self.pm.stopService() # Advance to beyond the process restart time self.reactor.advance(6) # The process shouldn't have restarted because stopService has cancelled # all pending process restarts. self.assertEqual(self.pm.protocols, {})
gpl-3.0
emuikernel/Coolpad5860E-kernel
arch/ia64/scripts/unwcheck.py
13143
1714
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
gpl-2.0
yvaucher/bank-payment
__unported__/account_banking_fi_patu/__openerp__.py
3
1826
############################################################################## # # Copyright (C) 2010 Sami Haahtinen (<http://ressukka.net>). # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>). # All Rights Reserved # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract EduSense BV # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Account Banking PATU module', 'version': '0.62', 'license': 'AGPL-3', 'author': 'Sami Haahtinen', 'website': 'http://ressukka.net', 'category': 'Account Banking', 'depends': ['account_banking'], 'description': ''' Module to import Finnish PATU format transation files. This modules contains no logic, just an import filter for account_banking. ''', 'active': False, 'installable': False, }
agpl-3.0
mmcdermo/helpinghand
server/venv/lib/python2.7/site-packages/django/contrib/gis/forms/fields.py
7
4693
from __future__ import unicode_literals import warnings from django import forms from django.utils import six from django.utils.translation import ugettext_lazy as _ # While this couples the geographic forms to the GEOS library, # it decouples from database (by not importing SpatialBackend). from django.contrib.gis.geos import GEOSException, GEOSGeometry, fromstr from .widgets import OpenLayersWidget class GeometryField(forms.Field): """ This is the basic form field for a Geometry. Any textual input that is accepted by GEOSGeometry is accepted by this form. By default, this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON. """ widget = OpenLayersWidget geom_type = 'GEOMETRY' default_error_messages = { 'required' : _('No geometry value provided.'), 'invalid_geom' : _('Invalid geometry value.'), 'invalid_geom_type' : _('Invalid geometry type.'), 'transform_error' : _('An error occurred when transforming the geometry ' 'to the SRID of the geometry form field.'), } def __init__(self, **kwargs): # Pop out attributes from the database field, or use sensible # defaults (e.g., allow None). self.srid = kwargs.pop('srid', None) self.geom_type = kwargs.pop('geom_type', self.geom_type) if 'null' in kwargs: kwargs.pop('null', True) warnings.warn("Passing 'null' keyword argument to GeometryField is deprecated.", DeprecationWarning, stacklevel=2) super(GeometryField, self).__init__(**kwargs) self.widget.attrs['geom_type'] = self.geom_type def to_python(self, value): """ Transforms the value to a Geometry object. """ if value in self.empty_values: return None if not isinstance(value, GEOSGeometry): try: value = GEOSGeometry(value) if not value.srid: value.srid = self.widget.map_srid except (GEOSException, ValueError, TypeError): raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom') return value def clean(self, value): """ Validates that the input value can be converted to a Geometry object (which is returned). A ValidationError is raised if the value cannot be instantiated as a Geometry. """ geom = super(GeometryField, self).clean(value) if geom is None: return geom # Ensuring that the geometry is of the correct type (indicated # using the OGC string label). if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY': raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type') # Transforming the geometry if the SRID was set. if self.srid: if not geom.srid: # Should match that of the field if not given. geom.srid = self.srid elif self.srid != -1 and self.srid != geom.srid: try: geom.transform(self.srid) except: raise forms.ValidationError(self.error_messages['transform_error'], code='transform_error') return geom def _has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ try: data = self.to_python(data) initial = self.to_python(initial) except ValidationError: return True # Only do a geographic comparison if both values are available if initial and data: data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data) class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' class PointField(GeometryField): geom_type = 'POINT' class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' class LineStringField(GeometryField): geom_type = 'LINESTRING' class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' class PolygonField(GeometryField): geom_type = 'POLYGON' class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON'
mit
pentestfail/TA-FireEye_TAP
bin/input_module_fireeye_tap_incidents.py
1
4568
# encoding = utf-8 import os import sys import time import datetime import json def validate_input(helper, definition): api_env = definition.parameters.get('api_env', None) instanceid = definition.parameters.get('instance_id', None) apikey = definition.parameters.get('apikey', None) api_limit = definition.parameters.get('api_limit', None) api_timeout = definition.parameters.get('api_timeout', None) pass def collect_events(helper, ew): # Retrieve runtime variables opt_environment = helper.get_arg('api_env') opt_instanceid = helper.get_arg('instance_id') opt_apikey = helper.get_arg('apikey') opt_limit = helper.get_arg('api_limit') opt_timeout = float(helper.get_arg('api_timeout')) # Create checkpoint key opt_checkpoint = "incidents_" + opt_environment + "_" + opt_instanceid #Create last status entry for storage as checkpoint current_status = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #Check for last query execution data in kvstore & generate if not present try: last_status = helper.get_check_point(opt_checkpoint) or time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(0)) helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Last successful checkpoint time: " + str(last_status)) except Exception as e: helper.log_error("[" + opt_instanceid + "] TAP Incidents - Unable to retrieve last execution checkpoint!") raise e # use simple rest call to load the events header = {} data = {} parameter = {} parameter['limit'] = opt_limit parameter['sort'] = "-createDate" parameter['withCount'] = "1" parameter['includes'] = "revisions._updatedBy" parameter['query'] = str('{"updateDate":{"$gte":"' + last_status + '"}}') url = "https://" + opt_environment + ".fireeye.com/tap/id/" + opt_instanceid + "/api/v1/incidents" method = 'GET' header['x-mansfield-key'] = opt_apikey try: # Leverage helper function to send http request response = helper.send_http_request(url, method, parameters=parameter, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=opt_timeout, use_proxy=True) # Return API response code r_status = response.status_code # Return API request status_code if r_status is not 200: helper.log_error("[" + opt_instanceid + "] Incidents API unsuccessful status_code=" + str(r_status)) response.raise_for_status() # Return API request as JSON obj = response.json() if obj is None: helper.log_info("[" + opt_instanceid + "] No new incidents retrieved from TAP.") # Iterate over incidents in array & index i=0 for incident in obj.get("incidents"): singleIncident = (obj.get("incidents")[i]) singleIncident['tap_instance'] = opt_instanceid singleIncident['tap_environment'] = opt_environment # Rename underscore fields so Splunk will index values singleIncident['_alert'] = singleIncident['_alert'] singleIncident['updatedBy'] = singleIncident['_updatedBy'] singleIncident['createdBy'] = singleIncident['_createdBy'] singleIncident['assignedTo'] = singleIncident['_assignedTo'] # Remove underscore fieldnames and values del singleIncident['_alert'] del singleIncident['_updatedBy'] del singleIncident['_createdBy'] del singleIncident['_assignedTo'] event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(singleIncident)) try: ew.write_event(event) helper.log_debug("[" + opt_instanceid + "] Added incident:" + str(singleIncident['id'])) except Exception as error: helper.log_error("[" + opt_instanceid + "] Unable to add incident:" + str(singleIncident['id'])) i = i + 1 #Update last completed execution time helper.save_check_point(opt_checkpoint, current_status) helper.log_info("[" + opt_instanceid + "] Incidents collection complete. Records added: " + str(i)) helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Storing checkpoint time: " + current_status) except Exception as error: helper.log_error("[" + opt_instanceid + "] TAP Incidents - An unknown error occurred!") raise error
mit
Kiiv/Sick-Beard
lib/hachoir_parser/container/mkv.py
90
20373
# # Matroska parser # Author Julien Muchembled <jm AT jm10.no-ip.com> # Created: 8 june 2006 # from lib.hachoir_parser import Parser from lib.hachoir_core.field import (FieldSet, Link, MissingField, ParserError, Enum as _Enum, String as _String, Float32, Float64, NullBits, Bits, Bit, RawBytes, Bytes, Int16, GenericInteger) from lib.hachoir_core.endian import BIG_ENDIAN from lib.hachoir_core.iso639 import ISO639_2 from lib.hachoir_core.tools import humanDatetime from lib.hachoir_core.text_handler import textHandler, hexadecimal from lib.hachoir_parser.container.ogg import XiphInt from datetime import datetime, timedelta class RawInt(GenericInteger): """ Raw integer: have to be used in BIG_ENDIAN! """ def __init__(self, parent, name, description=None): GenericInteger.__init__(self, parent, name, False, 8, description) i = GenericInteger.createValue(self) if i == 0: raise ParserError('Invalid integer length!') while i < 0x80: self._size += 8 i <<= 1 class Unsigned(RawInt): def __init__(self, parent, name, description=None): RawInt.__init__(self, parent, name, description) def hasValue(self): return True def createValue(self): header = 1 << self._size / 8 * 7 value = RawInt.createValue(self) - header if value + 1 == header: return None return value class Signed(Unsigned): def createValue(self): header = 1 << self._size / 8 * 7 - 1 value = RawInt.createValue(self) - 3 * header + 1 if value == header: return None return value def Enum(parent, enum): return _Enum(GenericInteger(parent, 'enum', False, parent['size'].value*8), enum) def Bool(parent): return textHandler(GenericInteger(parent, 'bool', False, parent['size'].value*8), lambda chunk: str(chunk.value != 0)) def UInt(parent): return GenericInteger(parent, 'unsigned', False, parent['size'].value*8) def SInt(parent): return GenericInteger(parent, 'signed', True, parent['size'].value*8) def String(parent): return _String(parent, 'string', parent['size'].value, charset="ASCII") def EnumString(parent, enum): return _Enum(String(parent), enum) def Binary(parent): return RawBytes(parent, 'binary', parent['size'].value) class AttachedFile(Bytes): def __init__(self, parent): Bytes.__init__(self, parent, 'file', parent['size'].value, None) def _getFilename(self): if not hasattr(self, "_filename"): try: self._filename = self["../../FileName/unicode"].value except MissingField: self._filename = None return self._filename def createDescription(self): filename = self._getFilename() if filename: return 'File "%s"' % filename return "('Filename' entry not found)" def _createInputStream(self, **args): tags = args.setdefault("tags",[]) try: tags.append(("mime", self["../../FileMimeType/string"].value)) except MissingField: pass filename = self._getFilename() if filename: tags.append(("filename", filename)) return Bytes._createInputStream(self, **args) def UTF8(parent): return _String(parent,'unicode', parent['size'].value, charset='UTF-8') def Float(parent): size = parent['size'].value if size == 4: return Float32(parent, 'float') elif size == 8: return Float64(parent, 'double') else: return RawBytes(parent, 'INVALID_FLOAT', size) TIMESTAMP_T0 = datetime(2001, 1, 1) def dateToDatetime(value): return TIMESTAMP_T0 + timedelta(microseconds=value//1000) def dateToString(field): return humanDatetime(dateToDatetime(field.value)) def Date(parent): return textHandler(GenericInteger(parent, 'date', True, parent['size'].value*8), dateToString) def SeekID(parent): return textHandler(GenericInteger(parent, 'binary', False, parent['size'].value*8), lambda chunk: segment.get(chunk.value, (hexadecimal(chunk),))[0]) def CueClusterPosition(parent): class Cluster(Link): def createValue(self): parent = self.parent segment = parent['.....'] pos = parent['unsigned'].value * 8 + segment[2].address return segment.getFieldByAddress(pos, feed=False) return Cluster(parent, 'cluster') def CueTrackPositions(parent): class Block(Link): def createValue(self): parent = self.parent time = parent['../CueTime/unsigned'].value track = parent['CueTrack/unsigned'].value cluster = parent['CueClusterPosition/cluster'].value time -= cluster['Timecode/unsigned'].value for field in cluster: if field.name.startswith('BlockGroup['): for path in 'Block/block', 'SimpleBlock': try: block = field[path] if block['track'].value == track and \ block['timecode'].value == time: return field except MissingField: pass parent.error('Cue point not found') return self return Block(parent, 'block') class Lace(FieldSet): def __init__(self, parent, lacing, size): self.n_frames = parent['n_frames'].value self.createFields = ( self.parseXiph, self.parseFixed, self.parseEBML )[lacing] FieldSet.__init__(self, parent, 'Lace', size=size * 8) def parseXiph(self): for i in xrange(self.n_frames): yield XiphInt(self, 'size[]') for i in xrange(self.n_frames): yield RawBytes(self, 'frame[]', self['size['+str(i)+']'].value) yield RawBytes(self,'frame[]', (self._size - self.current_size) / 8) def parseEBML(self): yield Unsigned(self, 'size') for i in xrange(1, self.n_frames): yield Signed(self, 'dsize[]') size = self['size'].value yield RawBytes(self, 'frame[]', size) for i in xrange(self.n_frames-1): size += self['dsize['+str(i)+']'].value yield RawBytes(self, 'frame[]', size) yield RawBytes(self,'frame[]', (self._size - self.current_size) / 8) def parseFixed(self): n = self.n_frames + 1 size = self._size / 8 / n for i in xrange(n): yield RawBytes(self, 'frame[]', size) class Block(FieldSet): def __init__(self, parent): FieldSet.__init__(self, parent, 'block') self._size = 8 * parent['size'].value def lacing(self): return _Enum(Bits(self, 'lacing', 2), [ 'none', 'Xiph', 'fixed', 'EBML' ]) def createFields(self): yield Unsigned(self, 'track') yield Int16(self, 'timecode') if self.parent._name == 'Block': yield NullBits(self, 'reserved[]', 4) yield Bit(self, 'invisible') yield self.lacing() yield NullBits(self, 'reserved[]', 1) elif self.parent._name == 'SimpleBlock[]': yield Bit(self, 'keyframe') yield NullBits(self, 'reserved', 3) yield Bit(self, 'invisible') yield self.lacing() yield Bit(self, 'discardable') else: yield NullBits(self, 'reserved', 8) return size = (self._size - self.current_size) / 8 lacing = self['lacing'].value if lacing: yield textHandler(GenericInteger(self, 'n_frames', False, 8), lambda chunk: str(chunk.value+1)) yield Lace(self, lacing - 1, size - 1) else: yield RawBytes(self,'frame', size) ebml = { 0x1A45DFA3: ('EBML[]', { 0x4286: ('EBMLVersion',UInt), 0x42F7: ('EBMLReadVersion',UInt), 0x42F2: ('EBMLMaxIDLength',UInt), 0x42F3: ('EBMLMaxSizeLength',UInt), 0x4282: ('DocType',String), 0x4287: ('DocTypeVersion',UInt), 0x4285: ('DocTypeReadVersion',UInt) }) } signature = { 0x7E8A: ('SignatureAlgo', UInt), 0x7E9A: ('SignatureHash', UInt), 0x7EA5: ('SignaturePublicKey', Binary), 0x7EB5: ('Signature', Binary), 0x7E5B: ('SignatureElements', { 0x7E7B: ('SignatureElementList[]', { 0x6532: ('SignedElement[]', Binary) }) }) } chapter_atom = { 0x73C4: ('ChapterUID', UInt), 0x91: ('ChapterTimeStart', UInt), 0x92: ('ChapterTimeEnd', UInt), 0x98: ('ChapterFlagHidden', Bool), 0x4598: ('ChapterFlagEnabled', Bool), 0x6E67: ('ChapterSegmentUID', Binary), 0x6EBC: ('ChapterSegmentEditionUID', Binary), 0x63C3: ('ChapterPhysicalEquiv', UInt), 0x8F: ('ChapterTrack', { 0x89: ('ChapterTrackNumber[]', UInt) }), 0x80: ('ChapterDisplay[]', { 0x85: ('ChapString', UTF8), 0x437C: ('ChapLanguage[]', String), 0x437E: ('ChapCountry[]', String) }), 0x6944: ('ChapProcess[]', { 0x6955: ('ChapProcessCodecID', UInt), 0x450D: ('ChapProcessPrivate', Binary), 0x6911: ('ChapProcessCommand[]', { 0x6922: ('ChapProcessTime', UInt), 0x6933: ('ChapProcessData', Binary) }) }) } simple_tag = { 0x45A3: ('TagName', UTF8), 0x447A: ('TagLanguage', String), 0x44B4: ('TagDefault', Bool), # 0x4484 0x4487: ('TagString', UTF8), 0x4485: ('TagBinary', Binary) } segment_seek = { 0x4DBB: ('Seek[]', { 0x53AB: ('SeekID', SeekID), 0x53AC: ('SeekPosition', UInt) }) } segment_info = { 0x73A4: ('SegmentUID', Binary), 0x7384: ('SegmentFilename', UTF8), 0x3CB923: ('PrevUID', Binary), 0x3C83AB: ('PrevFilename', UTF8), 0x3EB923: ('NextUID', Binary), 0x3E83BB: ('NextFilename', UTF8), 0x4444: ('SegmentFamily[]', Binary), 0x6924: ('ChapterTranslate[]', { 0x69FC: ('ChapterTranslateEditionUID[]', UInt), 0x69BF: ('ChapterTranslateCodec', UInt), 0x69A5: ('ChapterTranslateID', Binary) }), 0x2AD7B1: ('TimecodeScale', UInt), 0x4489: ('Duration', Float), 0x4461: ('DateUTC', Date), 0x7BA9: ('Title', UTF8), 0x4D80: ('MuxingApp', UTF8), 0x5741: ('WritingApp', UTF8) } segment_clusters = { 0xE7: ('Timecode', UInt), 0x5854: ('SilentTracks', { 0x58D7: ('SilentTrackNumber[]', UInt) }), 0xA7: ('Position', UInt), 0xAB: ('PrevSize', UInt), 0xA0: ('BlockGroup[]', { 0xA1: ('Block', Block), 0xA2: ('BlockVirtual[]', Block), 0x75A1: ('BlockAdditions', { 0xA6: ('BlockMore[]', { 0xEE: ('BlockAddID', UInt), 0xA5: ('BlockAdditional', Binary) }) }), 0x9B: ('BlockDuration', UInt), 0xFA: ('ReferencePriority', UInt), 0xFB: ('ReferenceBlock[]', SInt), 0xFD: ('ReferenceVirtual', SInt), 0xA4: ('CodecState', Binary), 0x8E: ('Slices[]', { 0xE8: ('TimeSlice[]', { 0xCC: ('LaceNumber', UInt), 0xCD: ('FrameNumber', UInt), 0xCB: ('BlockAdditionID', UInt), 0xCE: ('Delay', UInt), 0xCF: ('Duration', UInt) }) }) }), 0xA3: ('SimpleBlock[]', Block) } tracks_video = { 0x9A: ('FlagInterlaced', Bool), 0x53B8: ('StereoMode', lambda parent: Enum(parent, \ [ 'mono', 'right eye', 'left eye', 'both eyes' ])), 0xB0: ('PixelWidth', UInt), 0xBA: ('PixelHeight', UInt), 0x54AA: ('PixelCropBottom', UInt), 0x54BB: ('PixelCropTop', UInt), 0x54CC: ('PixelCropLeft', UInt), 0x54DD: ('PixelCropRight', UInt), 0x54B0: ('DisplayWidth', UInt), 0x54BA: ('DisplayHeight', UInt), 0x54B2: ('DisplayUnit', lambda parent: Enum(parent, \ [ 'pixels', 'centimeters', 'inches' ])), 0x54B3: ('AspectRatioType', lambda parent: Enum(parent, \ [ 'free resizing', 'keep aspect ratio', 'fixed' ])), 0x2EB524: ('ColourSpace', Binary), 0x2FB523: ('GammaValue', Float) } tracks_audio = { 0xB5: ('SamplingFrequency', Float), 0x78B5: ('OutputSamplingFrequency', Float), 0x9F: ('Channels', UInt), 0x7D7B: ('ChannelPositions', Binary), 0x6264: ('BitDepth', UInt) } tracks_content_encodings = { 0x6240: ('ContentEncoding[]', { 0x5031: ('ContentEncodingOrder', UInt), 0x5032: ('ContentEncodingScope', UInt), 0x5033: ('ContentEncodingType', UInt), 0x5034: ('ContentCompression', { 0x4254: ('ContentCompAlgo', UInt), 0x4255: ('ContentCompSettings', Binary) }), 0x5035: ('ContentEncryption', { 0x47e1: ('ContentEncAlgo', UInt), 0x47e2: ('ContentEncKeyID', Binary), 0x47e3: ('ContentSignature', Binary), 0x47e4: ('ContentSigKeyID', Binary), 0x47e5: ('ContentSigAlgo', UInt), 0x47e6: ('ContentSigHashAlgo', UInt), }) }) } segment_tracks = { 0xAE: ('TrackEntry[]', { 0xD7: ('TrackNumber', UInt), 0x73C5: ('TrackUID', UInt), 0x83: ('TrackType', lambda parent: Enum(parent, { 0x01: 'video', 0x02: 'audio', 0x03: 'complex', 0x10: 'logo', 0x11: 'subtitle', 0x12: 'buttons', 0x20: 'control' })), 0xB9: ('FlagEnabled', Bool), 0x88: ('FlagDefault', Bool), 0x55AA: ('FlagForced[]', Bool), 0x9C: ('FlagLacing', Bool), 0x6DE7: ('MinCache', UInt), 0x6DF8: ('MaxCache', UInt), 0x23E383: ('DefaultDuration', UInt), 0x23314F: ('TrackTimecodeScale', Float), 0x537F: ('TrackOffset', SInt), 0x55EE: ('MaxBlockAdditionID', UInt), 0x536E: ('Name', UTF8), 0x22B59C: ('Language', lambda parent: EnumString(parent, ISO639_2)), 0x86: ('CodecID', String), 0x63A2: ('CodecPrivate', Binary), 0x258688: ('CodecName', UTF8), 0x7446: ('AttachmentLink', UInt), 0x3A9697: ('CodecSettings', UTF8), 0x3B4040: ('CodecInfoURL[]', String), 0x26B240: ('CodecDownloadURL[]', String), 0xAA: ('CodecDecodeAll', Bool), 0x6FAB: ('TrackOverlay[]', UInt), 0x6624: ('TrackTranslate[]', { 0x66FC: ('TrackTranslateEditionUID[]', UInt), 0x66BF: ('TrackTranslateCodec', UInt), 0x66A5: ('TrackTranslateTrackID', Binary) }), 0xE0: ('Video', tracks_video), 0xE1: ('Audio', tracks_audio), 0x6d80: ('ContentEncodings', tracks_content_encodings) }) } segment_cues = { 0xBB: ('CuePoint[]', { 0xB3: ('CueTime', UInt), 0xB7: ('CueTrackPositions[]', CueTrackPositions, { 0xF7: ('CueTrack', UInt), 0xF1: ('CueClusterPosition', CueClusterPosition, UInt), 0x5378: ('CueBlockNumber', UInt), 0xEA: ('CueCodecState', UInt), 0xDB: ('CueReference[]', { 0x96: ('CueRefTime', UInt), 0x97: ('CueRefCluster', UInt), 0x535F: ('CueRefNumber', UInt), 0xEB: ('CueRefCodecState', UInt) }) }) }) } segment_attachments = { 0x61A7: ('AttachedFile[]', { 0x467E: ('FileDescription', UTF8), 0x466E: ('FileName', UTF8), 0x4660: ('FileMimeType', String), 0x465C: ('FileData', AttachedFile), 0x46AE: ('FileUID', UInt), 0x4675: ('FileReferral', Binary) }) } segment_chapters = { 0x45B9: ('EditionEntry[]', { 0x45BC: ('EditionUID', UInt), 0x45BD: ('EditionFlagHidden', Bool), 0x45DB: ('EditionFlagDefault', Bool), 0x45DD: ('EditionFlagOrdered', Bool), 0xB6: ('ChapterAtom[]', chapter_atom) }) } segment_tags = { 0x7373: ('Tag[]', { 0x63C0: ('Targets', { 0x68CA: ('TargetTypeValue', UInt), 0x63CA: ('TargetType', String), 0x63C5: ('TrackUID[]', UInt), 0x63C9: ('EditionUID[]', UInt), 0x63C4: ('ChapterUID[]', UInt), 0x63C6: ('AttachmentUID[]', UInt) }), 0x67C8: ('SimpleTag[]', simple_tag) }) } segment = { 0x114D9B74: ('SeekHead[]', segment_seek), 0x1549A966: ('Info[]', segment_info), 0x1F43B675: ('Cluster[]', segment_clusters), 0x1654AE6B: ('Tracks[]', segment_tracks), 0x1C53BB6B: ('Cues', segment_cues), 0x1941A469: ('Attachments', segment_attachments), 0x1043A770: ('Chapters', segment_chapters), 0x1254C367: ('Tags[]', segment_tags) } class EBML(FieldSet): def __init__(self, parent, ids): FieldSet.__init__(self, parent, "?[]") # Set name id = self['id'].value self.val = ids.get(id) if not self.val: if id == 0xBF: self.val = 'CRC-32[]', Binary elif id == 0xEC: self.val = 'Void[]', Binary elif id == 0x1B538667: self.val = 'SignatureSlot[]', signature else: self.val = 'Unknown[]', Binary self._name = self.val[0] # Compute size size = self['size'] if size.value is not None: self._size = size.address + size.size + size.value * 8 elif self._parent._parent: raise ParserError("Unknown length (only allowed for the last Level 0 element)") elif self._parent._size is not None: self._size = self._parent._size - self.address def createFields(self): yield RawInt(self, 'id') yield Unsigned(self, 'size') for val in self.val[1:]: if callable(val): yield val(self) else: while not self.eof: yield EBML(self, val) class MkvFile(Parser): EBML_SIGNATURE = 0x1A45DFA3 PARSER_TAGS = { "id": "matroska", "category": "container", "file_ext": ("mka", "mkv", "webm"), "mime": ( u"video/x-matroska", u"audio/x-matroska", u"video/webm", u"audio/webm"), "min_size": 5*8, "magic": (("\x1A\x45\xDF\xA3", 0),), "description": "Matroska multimedia container" } endian = BIG_ENDIAN def _getDoctype(self): return self[0]['DocType/string'].value def validate(self): if self.stream.readBits(0, 32, self.endian) != self.EBML_SIGNATURE: return False try: first = self[0] except ParserError: return False if None < self._size < first._size: return "First chunk size is invalid" if self._getDoctype() not in ('matroska', 'webm'): return "Stream isn't a matroska document." return True def createFields(self): hdr = EBML(self, ebml) yield hdr while not self.eof: yield EBML(self, { 0x18538067: ('Segment[]', segment) }) def createContentSize(self): field = self["Segment[0]/size"] return field.absolute_address + field.value * 8 + field.size def createDescription(self): if self._getDoctype() == 'webm': return 'WebM video' else: return 'Matroska video' def createMimeType(self): if self._getDoctype() == 'webm': return u"video/webm" else: return u"video/x-matroska"
gpl-3.0
basicthinker/THNVM
tests/quick/fs/10.linux-boot/test.py
180
1636
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Steve Reinhardt root.system.readfile = os.path.join(tests_root, 'halt.sh')
bsd-3-clause
vinayan3/clpricehistory
django/contrib/comments/moderation.py
246
13528
""" A generic comment-moderation system which allows configuration of moderation options on a per-model basis. To use, do two things: 1. Create or import a subclass of ``CommentModerator`` defining the options you want. 2. Import ``moderator`` from this module and register one or more models, passing the models and the ``CommentModerator`` options class you want to use. Example ------- First, we define a simple model class which might represent entries in a Weblog:: from django.db import models class Entry(models.Model): title = models.CharField(maxlength=250) body = models.TextField() pub_date = models.DateField() enable_comments = models.BooleanField() Then we create a ``CommentModerator`` subclass specifying some moderation options:: from django.contrib.comments.moderation import CommentModerator, moderator class EntryModerator(CommentModerator): email_notification = True enable_field = 'enable_comments' And finally register it for moderation:: moderator.register(Entry, EntryModerator) This sample class would apply two moderation steps to each new comment submitted on an Entry: * If the entry's ``enable_comments`` field is set to ``False``, the comment will be rejected (immediately deleted). * If the comment is successfully posted, an email notification of the comment will be sent to site staff. For a full list of built-in moderation options and other configurability, see the documentation for the ``CommentModerator`` class. """ import datetime from django.conf import settings from django.core.mail import send_mail from django.contrib.comments import signals from django.db.models.base import ModelBase from django.template import Context, loader from django.contrib import comments from django.contrib.sites.models import Site class AlreadyModerated(Exception): """ Raised when a model which is already registered for moderation is attempting to be registered again. """ pass class NotModerated(Exception): """ Raised when a model which is not registered for moderation is attempting to be unregistered. """ pass class CommentModerator(object): """ Encapsulates comment-moderation options for a given model. This class is not designed to be used directly, since it doesn't enable any of the available moderation options. Instead, subclass it and override attributes to enable different options:: ``auto_close_field`` If this is set to the name of a ``DateField`` or ``DateTimeField`` on the model for which comments are being moderated, new comments for objects of that model will be disallowed (immediately deleted) when a certain number of days have passed after the date specified in that field. Must be used in conjunction with ``close_after``, which specifies the number of days past which comments should be disallowed. Default value is ``None``. ``auto_moderate_field`` Like ``auto_close_field``, but instead of outright deleting new comments when the requisite number of days have elapsed, it will simply set the ``is_public`` field of new comments to ``False`` before saving them. Must be used in conjunction with ``moderate_after``, which specifies the number of days past which comments should be moderated. Default value is ``None``. ``close_after`` If ``auto_close_field`` is used, this must specify the number of days past the value of the field specified by ``auto_close_field`` after which new comments for an object should be disallowed. Default value is ``None``. ``email_notification`` If ``True``, any new comment on an object of this model which survives moderation will generate an email to site staff. Default value is ``False``. ``enable_field`` If this is set to the name of a ``BooleanField`` on the model for which comments are being moderated, new comments on objects of that model will be disallowed (immediately deleted) whenever the value of that field is ``False`` on the object the comment would be attached to. Default value is ``None``. ``moderate_after`` If ``auto_moderate_field`` is used, this must specify the number of days past the value of the field specified by ``auto_moderate_field`` after which new comments for an object should be marked non-public. Default value is ``None``. Most common moderation needs can be covered by changing these attributes, but further customization can be obtained by subclassing and overriding the following methods. Each method will be called with three arguments: ``comment``, which is the comment being submitted, ``content_object``, which is the object the comment will be attached to, and ``request``, which is the ``HttpRequest`` in which the comment is being submitted:: ``allow`` Should return ``True`` if the comment should be allowed to post on the content object, and ``False`` otherwise (in which case the comment will be immediately deleted). ``email`` If email notification of the new comment should be sent to site staff or moderators, this method is responsible for sending the email. ``moderate`` Should return ``True`` if the comment should be moderated (in which case its ``is_public`` field will be set to ``False`` before saving), and ``False`` otherwise (in which case the ``is_public`` field will not be changed). Subclasses which want to introspect the model for which comments are being moderated can do so through the attribute ``_model``, which will be the model class. """ auto_close_field = None auto_moderate_field = None close_after = None email_notification = False enable_field = None moderate_after = None def __init__(self, model): self._model = model def _get_delta(self, now, then): """ Internal helper which will return a ``datetime.timedelta`` representing the time between ``now`` and ``then``. Assumes ``now`` is a ``datetime.date`` or ``datetime.datetime`` later than ``then``. If ``now`` and ``then`` are not of the same type due to one of them being a ``datetime.date`` and the other being a ``datetime.datetime``, both will be coerced to ``datetime.date`` before calculating the delta. """ if now.__class__ is not then.__class__: now = datetime.date(now.year, now.month, now.day) then = datetime.date(then.year, then.month, then.day) if now < then: raise ValueError("Cannot determine moderation rules because date field is set to a value in the future") return now - then def allow(self, comment, content_object, request): """ Determine whether a given comment is allowed to be posted on a given object. Return ``True`` if the comment should be allowed, ``False otherwise. """ if self.enable_field: if not getattr(content_object, self.enable_field): return False if self.auto_close_field and self.close_after is not None: close_after_date = getattr(content_object, self.auto_close_field) if close_after_date is not None and self._get_delta(datetime.datetime.now(), close_after_date).days >= self.close_after: return False return True def moderate(self, comment, content_object, request): """ Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Return ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise. """ if self.auto_moderate_field and self.moderate_after is not None: moderate_after_date = getattr(content_object, self.auto_moderate_field) if moderate_after_date is not None and self._get_delta(datetime.datetime.now(), moderate_after_date).days >= self.moderate_after: return True return False def email(self, comment, content_object, request): """ Send email notification of a new comment to site staff when email notifications have been requested. """ if not self.email_notification: return recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS] t = loader.get_template('comments/comment_notification_email.txt') c = Context({ 'comment': comment, 'content_object': content_object }) subject = '[%s] New comment posted on "%s"' % (Site.objects.get_current().name, content_object) message = t.render(c) send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True) class Moderator(object): """ Handles moderation of a set of models. An instance of this class will maintain a list of one or more models registered for comment moderation, and their associated moderation classes, and apply moderation to all incoming comments. To register a model, obtain an instance of ``Moderator`` (this module exports one as ``moderator``), and call its ``register`` method, passing the model class and a moderation class (which should be a subclass of ``CommentModerator``). Note that both of these should be the actual classes, not instances of the classes. To cease moderation for a model, call the ``unregister`` method, passing the model class. For convenience, both ``register`` and ``unregister`` can also accept a list of model classes in place of a single model; this allows easier registration of multiple models with the same ``CommentModerator`` class. The actual moderation is applied in two phases: one prior to saving a new comment, and the other immediately after saving. The pre-save moderation may mark a comment as non-public or mark it to be removed; the post-save moderation may delete a comment which was disallowed (there is currently no way to prevent the comment being saved once before removal) and, if the comment is still around, will send any notification emails the comment generated. """ def __init__(self): self._registry = {} self.connect() def connect(self): """ Hook up the moderation methods to pre- and post-save signals from the comment models. """ signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model()) signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model()) def register(self, model_or_iterable, moderation_class): """ Register a model or a list of models for comment moderation, using a particular moderation class. Raise ``AlreadyModerated`` if any of the models are already registered. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model in self._registry: raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name) self._registry[model] = moderation_class(model) def unregister(self, model_or_iterable): """ Remove a model or a list of models from the list of models whose comments will be moderated. Raise ``NotModerated`` if any of the models are not currently registered for moderation. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model not in self._registry: raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name) del self._registry[model] def pre_save_moderation(self, sender, comment, request, **kwargs): """ Apply any necessary pre-save moderation steps to new comments. """ model = comment.content_type.model_class() if model not in self._registry: return content_object = comment.content_object moderation_class = self._registry[model] # Comment will be disallowed outright (HTTP 403 response) if not moderation_class.allow(comment, content_object, request): return False if moderation_class.moderate(comment, content_object, request): comment.is_public = False def post_save_moderation(self, sender, comment, request, **kwargs): """ Apply any necessary post-save moderation steps to new comments. """ model = comment.content_type.model_class() if model not in self._registry: return self._registry[model].email(comment, comment.content_object, request) # Import this instance in your own code to use in registering # your models for moderation. moderator = Moderator()
bsd-3-clause
boretom/pyload-apkg
source/py-mods-prebuilt-i386/site-packages/PIL/OleFileIO.py
41
16252
# # THIS IS WORK IN PROGRESS # # The Python Imaging Library # $Id$ # # stuff to deal with OLE2 Structured Storage files. this module is # used by PIL to read Image Composer and FlashPix files, but can also # be used to read other files of this type. # # History: # 1997-01-20 fl Created # 1997-01-22 fl Fixed 64-bit portability quirk # 2003-09-09 fl Fixed typo in OleFileIO.loadfat (noted by Daniel Haertle) # 2004-02-29 fl Changed long hex constants to signed integers # # Notes: # FIXME: sort out sign problem (eliminate long hex constants) # FIXME: change filename to use "a/b/c" instead of ["a", "b", "c"] # FIXME: provide a glob mechanism function (using fnmatchcase) # # Literature: # # "FlashPix Format Specification, Appendix A", Kodak and Microsoft, # September 1996. # # Quotes: # # "If this document and functionality of the Software conflict, # the actual functionality of the Software represents the correct # functionality" -- Microsoft, in the OLE format specification # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1997. # # See the README file for information on usage and redistribution. # import string, StringIO def i16(c, o = 0): return ord(c[o])+(ord(c[o+1])<<8) def i32(c, o = 0): return ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24) MAGIC = '\320\317\021\340\241\261\032\341' # # -------------------------------------------------------------------- # property types VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17; VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23; VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28; VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64; VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68; VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72; VT_VECTOR=0x1000; # map property id to name (for debugging purposes) VT = {} for k, v in vars().items(): if k[:3] == "VT_": VT[v] = k # # -------------------------------------------------------------------- # Some common document types (root.clsid fields) WORD_CLSID = "00020900-0000-0000-C000-000000000046" # # -------------------------------------------------------------------- class _OleStream(StringIO.StringIO): """OLE2 Stream Returns a read-only file object which can be used to read the contents of a OLE stream. To open a stream, use the openstream method in the OleFile class. This function can be used with either ordinary streams, or ministreams, depending on the offset, sectorsize, and fat table arguments. """ # FIXME: should store the list of sects obtained by following # the fat chain, and load new sectors on demand instead of # loading it all in one go. def __init__(self, fp, sect, size, offset, sectorsize, fat): data = [] while sect != -2: # 0xFFFFFFFEL: fp.seek(offset + sectorsize * sect) data.append(fp.read(sectorsize)) sect = fat[sect] data = string.join(data, "") # print len(data), size StringIO.StringIO.__init__(self, data[:size]) # # -------------------------------------------------------------------- # FIXME: should add a counter in here to avoid looping forever # if the tree is broken. class _OleDirectoryEntry: """OLE2 Directory Entry Encapsulates a stream directory entry. Note that the constructor builds a tree of all subentries, so we only have to call it with the root object. """ def __init__(self, sidlist, sid): # store directory parameters. the caller provides # a complete list of directory entries, as read from # the directory stream. name, type, sect, size, sids, clsid = sidlist[sid] self.sid = sid self.name = name self.type = type # 1=storage 2=stream self.sect = sect self.size = size self.clsid = clsid # process child nodes, if any self.kids = [] sid = sidlist[sid][4][2] if sid != -1: # the directory entries are organized as a red-black tree. # the following piece of code does an ordered traversal of # such a tree (at least that's what I hope ;-) stack = [self.sid] # start at leftmost position left, right, child = sidlist[sid][4] while left != -1: # 0xFFFFFFFFL: stack.append(sid) sid = left left, right, child = sidlist[sid][4] while sid != self.sid: self.kids.append(_OleDirectoryEntry(sidlist, sid)) # try to move right left, right, child = sidlist[sid][4] if right != -1: # 0xFFFFFFFFL: # and then back to the left sid = right while 1: left, right, child = sidlist[sid][4] if left == -1: # 0xFFFFFFFFL: break stack.append(sid) sid = left else: # couldn't move right; move up instead while 1: ptr = stack[-1] del stack[-1] left, right, child = sidlist[ptr][4] if right != sid: break sid = right left, right, child = sidlist[sid][4] if right != ptr: sid = ptr # in the OLE file, entries are sorted on (length, name). # for convenience, we sort them on name instead. self.kids.sort() def __cmp__(self, other): "Compare entries by name" return cmp(self.name, other.name) def dump(self, tab = 0): "Dump this entry, and all its subentries (for debug purposes only)" TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)", "(property)", "(root)"] print " "*tab + repr(self.name), TYPES[self.type], if self.type in (2, 5): print self.size, "bytes", print if self.type in (1, 5) and self.clsid: print " "*tab + "{%s}" % self.clsid for kid in self.kids: kid.dump(tab + 2) # # -------------------------------------------------------------------- ## # This class encapsulates the interface to an OLE 2 structured # storage file. Use the {@link listdir} and {@link openstream} # methods to access the contents of this file. class OleFileIO: """OLE container object This class encapsulates the interface to an OLE 2 structured storage file. Use the listdir and openstream methods to access the contents of this file. Object names are given as a list of strings, one for each subentry level. The root entry should be omitted. For example, the following code extracts all image streams from a Microsoft Image Composer file: ole = OleFileIO("fan.mic") for entry in ole.listdir(): if entry[1:2] == "Image": fin = ole.openstream(entry) fout = open(entry[0:1], "wb") while 1: s = fin.read(8192) if not s: break fout.write(s) You can use the viewer application provided with the Python Imaging Library to view the resulting files (which happens to be standard TIFF files). """ def __init__(self, filename = None): if filename: self.open(filename) ## # Open an OLE2 file. def open(self, filename): """Open an OLE2 file""" if type(filename) == type(""): self.fp = open(filename, "rb") else: self.fp = filename header = self.fp.read(512) if len(header) != 512 or header[:8] != MAGIC: raise IOError, "not an OLE2 structured storage file" # file clsid (probably never used, so we don't store it) clsid = self._clsid(header[8:24]) # FIXME: could check version and byte order fields self.sectorsize = 1 << i16(header, 30) self.minisectorsize = 1 << i16(header, 32) self.minisectorcutoff = i32(header, 56) # Load file allocation tables self.loadfat(header) # Load direcory. This sets both the sidlist (ordered by id) # and the root (ordered by hierarchy) members. self.loaddirectory(i32(header, 48)) self.ministream = None self.minifatsect = i32(header, 60) def loadfat(self, header): # Load the FAT table. The header contains a sector numbers # for the first 109 FAT sectors. Additional sectors are # described by DIF blocks (FIXME: not yet implemented) sect = header[76:512] fat = [] for i in range(0, len(sect), 4): ix = i32(sect, i) if ix == -2 or ix == -1: # ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL: break s = self.getsect(ix) fat = fat + map(lambda i, s=s: i32(s, i), range(0, len(s), 4)) self.fat = fat def loadminifat(self): # Load the MINIFAT table. This is stored in a standard sub- # stream, pointed to by a header field. s = self._open(self.minifatsect).read() self.minifat = map(lambda i, s=s: i32(s, i), range(0, len(s), 4)) def getsect(self, sect): # Read given sector self.fp.seek(512 + self.sectorsize * sect) return self.fp.read(self.sectorsize) def _unicode(self, s): # Map unicode string to Latin 1 # FIXME: some day, Python will provide an official way to handle # Unicode strings, but until then, this will have to do... return filter(ord, s) def loaddirectory(self, sect): # Load the directory. The directory is stored in a standard # substream, independent of its size. # read directory stream fp = self._open(sect) # create list of sid entries self.sidlist = [] while 1: entry = fp.read(128) if not entry: break type = ord(entry[66]) name = self._unicode(entry[0:0+i16(entry, 64)]) ptrs = i32(entry, 68), i32(entry, 72), i32(entry, 76) sect, size = i32(entry, 116), i32(entry, 120) clsid = self._clsid(entry[80:96]) self.sidlist.append((name, type, sect, size, ptrs, clsid)) # create hierarchical list of directory entries self.root = _OleDirectoryEntry(self.sidlist, 0) def dumpdirectory(self): # Dump directory (for debugging only) self.root.dump() def _clsid(self, clsid): if clsid == "\0" * len(clsid): return "" return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) % ((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) + tuple(map(ord, clsid[8:16])))) def _list(self, files, prefix, node): # listdir helper prefix = prefix + [node.name] for entry in node.kids: if entry.kids: self._list(files, prefix, entry) else: files.append(prefix[1:] + [entry.name]) def _find(self, filename): # openstream helper node = self.root for name in filename: for kid in node.kids: if kid.name == name: break else: raise IOError, "file not found" node = kid return node.sid def _open(self, start, size = 0x7FFFFFFF): # openstream helper. if size < self.minisectorcutoff: # ministream object if not self.ministream: self.loadminifat() self.ministream = self._open(self.sidlist[0][2]) return _OleStream(self.ministream, start, size, 0, self.minisectorsize, self.minifat) # standard stream return _OleStream(self.fp, start, size, 512, self.sectorsize, self.fat) ## # Returns a list of streams stored in this file. def listdir(self): """Return a list of streams stored in this file""" files = [] self._list(files, [], self.root) return files ## # Opens a stream as a read-only file object. def openstream(self, filename): """Open a stream as a read-only file object""" slot = self._find(filename) name, type, sect, size, sids, clsid = self.sidlist[slot] if type != 2: raise IOError, "this file is not a stream" return self._open(sect, size) ## # Gets a list of properties described in substream. def getproperties(self, filename): """Return properties described in substream""" fp = self.openstream(filename) data = {} # header s = fp.read(28) clsid = self._clsid(s[8:24]) # format id s = fp.read(20) fmtid = self._clsid(s[:16]) fp.seek(i32(s, 16)) # get section s = "****" + fp.read(i32(fp.read(4))-4) for i in range(i32(s, 4)): id = i32(s, 8+i*8) offset = i32(s, 12+i*8) type = i32(s, offset) # test for common types first (should perhaps use # a dictionary instead?) if type == VT_I2: value = i16(s, offset+4) if value >= 32768: value = value - 65536 elif type == VT_UI2: value = i16(s, offset+4) elif type in (VT_I4, VT_ERROR): value = i32(s, offset+4) elif type == VT_UI4: value = i32(s, offset+4) # FIXME elif type in (VT_BSTR, VT_LPSTR): count = i32(s, offset+4) value = s[offset+8:offset+8+count-1] elif type == VT_BLOB: count = i32(s, offset+4) value = s[offset+8:offset+8+count] elif type == VT_LPWSTR: count = i32(s, offset+4) value = self._unicode(s[offset+8:offset+8+count*2]) elif type == VT_FILETIME: value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32) # FIXME: this is a 64-bit int: "number of 100ns periods # since Jan 1,1601". Should map this to Python time value = value / 10000000L # seconds elif type == VT_UI1: value = ord(s[offset+4]) elif type == VT_CLSID: value = self._clsid(s[offset+4:offset+20]) elif type == VT_CF: count = i32(s, offset+4) value = s[offset+8:offset+8+count] else: value = None # everything else yields "None" # FIXME: add support for VT_VECTOR #print "%08x" % id, repr(value), #print "(%s)" % VT[i32(s, offset) & 0xFFF] data[id] = value return data # # -------------------------------------------------------------------- # This script can be used to dump the directory of any OLE2 structured # storage file. if __name__ == "__main__": import sys for file in sys.argv[1:]: try: ole = OleFileIO(file) print "-" * 68 print file print "-" * 68 ole.dumpdirectory() for file in ole.listdir(): if file[-1][0] == "\005": print file props = ole.getproperties(file) props = props.items() props.sort() for k, v in props: print " ", k, v except IOError, v: print "***", "cannot read", file, "-", v
gpl-3.0
vipul-sharma20/oh-mainline
vendor/packages/html5lib/html5lib/tests/test_whitespace_filter.py
73
5580
import unittest from html5lib.filters.whitespace import Filter from html5lib.constants import spaceCharacters spaceCharacters = u"".join(spaceCharacters) class TestCase(unittest.TestCase): def runTest(self, input, expected): output = list(Filter(input)) errorMsg = "\n".join(["\n\nInput:", str(input), "\nExpected:", str(expected), "\nReceived:", str(output)]) self.assertEquals(output, expected, errorMsg) def runTestUnmodifiedOutput(self, input): self.runTest(input, input) def testPhrasingElements(self): self.runTestUnmodifiedOutput( [{"type": u"Characters", "data": u"This is a " }, {"type": u"StartTag", "name": u"span", "data": [] }, {"type": u"Characters", "data": u"phrase" }, {"type": u"EndTag", "name": u"span", "data": []}, {"type": u"SpaceCharacters", "data": u" " }, {"type": u"Characters", "data": u"with" }, {"type": u"SpaceCharacters", "data": u" " }, {"type": u"StartTag", "name": u"em", "data": [] }, {"type": u"Characters", "data": u"emphasised text" }, {"type": u"EndTag", "name": u"em", "data": []}, {"type": u"Characters", "data": u" and an " }, {"type": u"StartTag", "name": u"img", "data": [[u"alt", u"image"]] }, {"type": u"Characters", "data": u"." }]) def testLeadingWhitespace(self): self.runTest( [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"SpaceCharacters", "data": spaceCharacters}, {"type": u"Characters", "data": u"foo"}, {"type": u"EndTag", "name": u"p", "data": []}], [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"SpaceCharacters", "data": u" "}, {"type": u"Characters", "data": u"foo"}, {"type": u"EndTag", "name": u"p", "data": []}]) def testLeadingWhitespaceAsCharacters(self): self.runTest( [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": spaceCharacters + u"foo"}, {"type": u"EndTag", "name": u"p", "data": []}], [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u" foo"}, {"type": u"EndTag", "name": u"p", "data": []}]) def testTrailingWhitespace(self): self.runTest( [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo"}, {"type": u"SpaceCharacters", "data": spaceCharacters}, {"type": u"EndTag", "name": u"p", "data": []}], [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo"}, {"type": u"SpaceCharacters", "data": u" "}, {"type": u"EndTag", "name": u"p", "data": []}]) def testTrailingWhitespaceAsCharacters(self): self.runTest( [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo" + spaceCharacters}, {"type": u"EndTag", "name": u"p", "data": []}], [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo "}, {"type": u"EndTag", "name": u"p", "data": []}]) def testWhitespace(self): self.runTest( [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"}, {"type": u"EndTag", "name": u"p", "data": []}], [{"type": u"StartTag", "name": u"p", "data": []}, {"type": u"Characters", "data": u"foo bar"}, {"type": u"EndTag", "name": u"p", "data": []}]) def testLeadingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": u"StartTag", "name": u"pre", "data": []}, {"type": u"SpaceCharacters", "data": spaceCharacters}, {"type": u"Characters", "data": u"foo"}, {"type": u"EndTag", "name": u"pre", "data": []}]) def testLeadingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": u"StartTag", "name": u"pre", "data": []}, {"type": u"Characters", "data": spaceCharacters + u"foo"}, {"type": u"EndTag", "name": u"pre", "data": []}]) def testTrailingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": u"StartTag", "name": u"pre", "data": []}, {"type": u"Characters", "data": u"foo"}, {"type": u"SpaceCharacters", "data": spaceCharacters}, {"type": u"EndTag", "name": u"pre", "data": []}]) def testTrailingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": u"StartTag", "name": u"pre", "data": []}, {"type": u"Characters", "data": u"foo" + spaceCharacters}, {"type": u"EndTag", "name": u"pre", "data": []}]) def testWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": u"StartTag", "name": u"pre", "data": []}, {"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"}, {"type": u"EndTag", "name": u"pre", "data": []}]) def buildTestSuite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) def main(): buildTestSuite() unittest.main() if __name__ == "__main__": main()
agpl-3.0
ODInfoBiz/csvengine-ui
csvengine/data_cache.py
1
7704
''' Created on Dec 7, 2015 @author: jumbrich ''' import hashlib import requests from StringIO import StringIO import os import urllib import urlnorm from werkzeug.exceptions import RequestEntityTooLarge from pyyacp.yacp import YACParser from csvengine.utils import assure_path_exists import structlog log =structlog.get_logger() class DataCache(object): DB="db" WEB="web" TMP="tmp" def __init__(self, config, max_file_size): self.submit_folder = { DataCache.WEB: assure_path_exists(config['web_submit']), DataCache.DB: assure_path_exists(config['db_submit']), DataCache.TMP: assure_path_exists(config['tmp_submit']) } self.cleaned_folder = { DataCache.WEB: assure_path_exists(config['web_cleaned']), DataCache.DB: assure_path_exists(config['db_cleaned']), DataCache.TMP: assure_path_exists(config['tmp_cleaned']) } self.max_file_size = max_file_size def submitToWeb(self, file=None, url=None, content=None): return self.submit(file=file, url=url, content=content, toFolder=DataCache.WEB) def submitToTmp(self, file=None, url=None, content=None): return self.submit(file=file, url=url, content=content, toFolder=DataCache.TMP) def submitToDB(self, file=None, url=None, content=None): return self.submit(file=file, url=url, content=content, toFolder=DataCache.DB) def submit(self, file=None, url=None, content=None, toFolder=None): """ 1) retrieve and compute hash of original content 2) store submitted content using hash as filename IFF not exist optional URL as symlink :param file: :param url: :param content: :param toFolder: :return: the md5 of the original file """ if toFolder in self.submit_folder and toFolder in self.cleaned_folder: s_folder = self.submit_folder[toFolder] c_folder = self.cleaned_folder[toFolder] else: return None if file: md5 = storeFile(file, s_folder) elif url: md5 = storeURL(url, s_folder, max_file_size=self.max_file_size) elif content: md5 = storeContent(content, s_folder) else: return None # check if cleaned exists submitted_path=os.path.join(s_folder, md5) cleaned_path = os.path.join(c_folder, md5) # at first look for stored cleaned version if os.path.exists(cleaned_path): return md5 else: # generate and store cleaned version table = YACParser(filename=submitted_path) cleaned = table.generate() storeContent(cleaned, c_folder, md5=md5) return md5 def getParser(self, fileHash, folder=None, original=False): """ returns a parser and stores cleaned file if not already available """ if folder: file_path = os.path.join(folder, fileHash) if os.path.exists(file_path): if folder in self.cleaned_folder.values(): return YACParser(filename=file_path, skip_guess_encoding=True) else: return YACParser(filename=file_path) else: if not original: for f in self.cleaned_folder: cleaned_path = os.path.join(self.cleaned_folder[f], fileHash) if os.path.exists(cleaned_path): return YACParser(filename=cleaned_path, skip_guess_encoding=True) for f in self.submit_folder: submit_path = os.path.join(self.submit_folder[f], fileHash) cleaned_path = os.path.join(self.cleaned_folder[f], fileHash) if os.path.exists(submit_path): table = YACParser(filename=submit_path) if not os.path.exists(cleaned_path): cleaned = table.generate() storeContent(cleaned, cleaned_path, md5=fileHash) return table return None def exists(self, fileHash): for f in self.cleaned_folder: cleaned_path = os.path.join(self.cleaned_folder[f], fileHash) if os.path.exists(cleaned_path): return True return False def getSubmit(self, fileHash, folder=False): if folder: submit_file = getFileContent(fileHash, self.submit_folder[folder]) if submit_file: return submit_file else: for f in self.submit_folder: submit_file = getFileContent(fileHash, self.submit_folder[f]) if submit_file: return submit_file return None def getFileName(self, url, folder=None): url_norm = urlnorm.norm(url.strip()) url_fname = urllib.quote_plus(url_norm) if folder: submit_path = os.path.join(self.submit_folder[folder], url_fname) if os.path.exists(submit_path): return os.readlink(submit_path) else: for f in self.submit_folder: submit_path = os.path.join(self.submit_folder[f], url_fname) if os.path.exists(submit_path): return os.readlink(submit_path) return None def getFileContent(fileID, path=None): if path: fileID = os.path.join(path,fileID) if not os.path.exists(fileID): return None with open(fileID) as f: return f.read() def getFileName(url, path): url_norm = urlnorm.norm(url.strip()) url_fname = urllib.quote_plus(url_norm) f=os.path.join(path,url_fname) return os.readlink(f) def getURLContent(url, path): with open(getFileName(url, path)) as f: return f.read() def storeFile(f, path): c = f.read() md5 = hashlib.md5(c).hexdigest() fpath = os.path.join(path, md5) log.debug("storing file", file=fpath) with open(fpath,'w') as f: f.write(c) log.info("file stored", file=fpath) return md5 def storeContent(content, path, md5=None): if not md5: md5 = hashlib.md5(content).hexdigest() fpath = os.path.join(path, md5) log.debug("storing content", file=fpath) with open(fpath,'w') as f: f.write(content) log.info("content stored", file=fpath) return md5 def storeURL(url, path, max_file_size): #download URL and send fileID log.debug("downloading url", url=url, max_file_size=max_file_size ) try: r = requests.get(url, stream=True) size = 0 ctt = StringIO() sig = hashlib.md5() for chunk in r.iter_content(2048): size += len(chunk) ctt.write(chunk) sig.update(chunk) if size > max_file_size: r.close() raise RequestEntityTooLarge() md5 = sig.hexdigest() ctt.seek(0) fpath=os.path.join(path, md5) if os.path.exists(fpath): print 'file exists', fpath return md5 log.debug("storing url", url=url, file=fpath) with open (fpath,'w') as fd: t = ctt.read(1048576) while t: fd.write(t) t = ctt.read(1048576) url_norm = urlnorm.norm(url.strip()) url_fname = urllib.quote_plus(url_norm) f = os.path.join(path, url_fname) os.symlink(fpath,f) log.debug("url stored", url=url, file=fpath) return md5 except Exception as e: raise e
gpl-3.0
qbit/psutil
examples/pidof.py
15
1172
#!/usr/bin/env python # Copyright (c) 2009, Giampaolo Rodola', karthikrev. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ A clone of 'pidof' cmdline utility. $ pidof python 1140 1138 1136 1134 1133 1129 1127 1125 1121 1120 1119 """ from __future__ import print_function import psutil import sys def pidof(pgname): pids = [] for proc in psutil.process_iter(): # search for matches in the process name and cmdline try: name = proc.name() except psutil.Error: pass else: if name == pgname: pids.append(str(proc.pid)) continue try: cmdline = proc.cmdline() except psutil.Error: pass else: if cmdline and cmdline[0] == pgname: pids.append(str(proc.pid)) return pids def main(): if len(sys.argv) != 2: sys.exit('usage: %s pgname' % __file__) else: pgname = sys.argv[1] pids = pidof(pgname) if pids: print(" ".join(pids)) if __name__ == '__main__': main()
bsd-3-clause
turtlewit/GSHS_RPG
AdventureEngine/CoreEngine/input.py
2
3088
#------------------------------------------------------------------------------# # Copyright 2016-2017 Golden Sierra Game Development Class # # This file is part of Verloren (GSHS_RPG). # # # # Verloren (GSHS_RPG) is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # Verloren (GSHS_RPG) is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with Verloren (GSHS_RPG). If not, see <http://www.gnu.org/licenses/>. # #------------------------------------------------------------------------------# import sys import curses class Input: #renderer = None commandHistory = [] command = None unf_command = "" cheese = "cheese" takeTextInput = False char = None def Update(self, renderer): Input.command = None Input.char = None if renderer: currentCharacter = renderer.m_screen.getch() if currentCharacter != -1: if currentCharacter != curses.KEY_RESIZE: Input.char = currentCharacter if Input.takeTextInput: if currentCharacter == ord('\n'): if len(Input.unf_command.split()) > 0: Input.commandHistory.insert(0,Input.command) Input.command = Input.unf_command else: Input.command = 10 renderer.m_cmd = "" Input.unf_command = "" if sys.platform == 'linux' \ or sys.platform == 'linux2' \ or sys.platform == 'linux-armv7l': if currentCharacter == 127 \ or currentCharacter == curses.KEY_BACKSPACE: renderer.m_cmd = renderer.m_cmd[:-1] Input.unf_command = Input.unf_command[:-1] else: if currentCharacter == 8: renderer.m_cmd = renderer.m_cmd[:-1] Input.unf_command = Input.unf_command[:-1] if currentCharacter >=32 and currentCharacter <= 126: if renderer.m_vorCmd: if len(Input.unf_command) \ < renderer.BUFFER_X \ - len(renderer.m_vorCmd) \ - 1: renderer.m_cmd += chr(currentCharacter) Input.unf_command += chr(currentCharacter) if currentCharacter in [ curses.KEY_UP, curses.KEY_DOWN, curses.KEY_LEFT, curses.KEY_RIGHT, 27 ]: Input.command = currentCharacter
gpl-3.0
talishte/ctigre
env/lib/python2.7/site-packages/mezzanine/blog/migrations/0003_categories.py
8
7742
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models try: from django.contrib.auth import get_user_model except ImportError: # django < 1.5 from django.contrib.auth.models import User else: User = get_user_model() user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name) user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name) class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." for post in orm.BlogPost.objects.all(): if post.category: post.categories.add(post.category) def backwards(self, orm): "Write your backwards methods here." models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, user_model_label: { 'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'blog.blogcategory': { 'Meta': {'object_name': 'BlogCategory'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'blog.blogpost': { 'Meta': {'object_name': 'BlogPost'}, '_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['blog.BlogCategory']", 'symmetrical': 'False', 'blank': 'True'}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'blogposts'", 'null': 'True', 'to': "orm['blog.BlogCategory']"}), 'content': ('mezzanine.core.fields.HtmlField', [], {}), 'description': ('mezzanine.core.fields.HtmlField', [], {'blank': 'True'}), 'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}), 'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['%s']" % user_orm_label}) }, 'blog.comment': { 'Meta': {'object_name': 'Comment'}, 'approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'blog_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['blog.BlogPost']"}), 'body': ('django.db.models.fields.TextField', [], {}), 'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['blog.Comment']"}), 'time_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.keyword': { 'Meta': {'object_name': 'Keyword'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['blog']
bsd-2-clause
vitan/hue
desktop/core/ext-py/Django-1.6.10/tests/test_utils/doctest_output.py
74
2387
from django.utils import six __test__ = {"API_TEST": r""" # Some checks of the doctest output normalizer. # Standard doctests do fairly >>> import json >>> from django.utils.xmlutils import SimplerXMLGenerator >>> from django.utils.six import StringIO >>> def produce_json(): ... return json.dumps(['foo', {'bar': ('baz', None, 1.0, 2), 'whiz': 42}]) >>> def produce_xml(): ... stream = StringIO() ... xml = SimplerXMLGenerator(stream, encoding='utf-8') ... xml.startDocument() ... xml.startElement("foo", {"aaa" : "1.0", "bbb": "2.0"}) ... xml.startElement("bar", {"ccc" : "3.0"}) ... xml.characters("Hello") ... xml.endElement("bar") ... xml.startElement("whiz", {}) ... xml.characters("Goodbye") ... xml.endElement("whiz") ... xml.endElement("foo") ... xml.endDocument() ... return stream.getvalue() >>> def produce_xml_fragment(): ... stream = StringIO() ... xml = SimplerXMLGenerator(stream, encoding='utf-8') ... xml.startElement("foo", {"aaa": "1.0", "bbb": "2.0"}) ... xml.characters("Hello") ... xml.endElement("foo") ... xml.startElement("bar", {"ccc": "3.0", "ddd": "4.0"}) ... xml.endElement("bar") ... return stream.getvalue() # JSON output is normalized for field order, so it doesn't matter # which order json dictionary attributes are listed in output >>> produce_json() '["foo", {"bar": ["baz", null, 1.0, 2], "whiz": 42}]' >>> produce_json() '["foo", {"whiz": 42, "bar": ["baz", null, 1.0, 2]}]' # XML output is normalized for attribute order, so it doesn't matter # which order XML element attributes are listed in output >>> produce_xml() '<?xml version="1.0" encoding="UTF-8"?>\n<foo aaa="1.0" bbb="2.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>' >>> produce_xml() '<?xml version="1.0" encoding="UTF-8"?>\n<foo bbb="2.0" aaa="1.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>' >>> produce_xml_fragment() '<foo aaa="1.0" bbb="2.0">Hello</foo><bar ccc="3.0" ddd="4.0"></bar>' >>> produce_xml_fragment() '<foo bbb="2.0" aaa="1.0">Hello</foo><bar ddd="4.0" ccc="3.0"></bar>' """} if six.PY2: __test__["API_TEST"] += """ >>> def produce_long(): ... return 42L >>> def produce_int(): ... return 42 # Long values are normalized and are comparable to normal integers ... >>> produce_long() 42 # ... and vice versa >>> produce_int() 42L """
apache-2.0
tomchristie/django
tests/forms_tests/widget_tests/test_checkboxinput.py
64
3460
from django.forms import CheckboxInput from .base import WidgetTest class CheckboxInputTest(WidgetTest): widget = CheckboxInput() def test_render_empty(self): self.check_html(self.widget, 'is_cool', '', html='<input type="checkbox" name="is_cool" />') def test_render_none(self): self.check_html(self.widget, 'is_cool', None, html='<input type="checkbox" name="is_cool" />') def test_render_false(self): self.check_html(self.widget, 'is_cool', False, html='<input type="checkbox" name="is_cool" />') def test_render_true(self): self.check_html( self.widget, 'is_cool', True, html='<input checked type="checkbox" name="is_cool" />' ) def test_render_value(self): """ Using any value that's not in ('', None, False, True) will check the checkbox and set the 'value' attribute. """ self.check_html( self.widget, 'is_cool', 'foo', html='<input checked type="checkbox" name="is_cool" value="foo" />', ) def test_render_int(self): """ Integers are handled by value, not as booleans (#17114). """ self.check_html( self.widget, 'is_cool', 0, html='<input checked type="checkbox" name="is_cool" value="0" />', ) self.check_html( self.widget, 'is_cool', 1, html='<input checked type="checkbox" name="is_cool" value="1" />', ) def test_render_check_test(self): """ You can pass 'check_test' to the constructor. This is a callable that takes the value and returns True if the box should be checked. """ widget = CheckboxInput(check_test=lambda value: value.startswith('hello')) self.check_html(widget, 'greeting', '', html=( '<input type="checkbox" name="greeting" />' )) self.check_html(widget, 'greeting', 'hello', html=( '<input checked type="checkbox" name="greeting" value="hello" />' )) self.check_html(widget, 'greeting', 'hello there', html=( '<input checked type="checkbox" name="greeting" value="hello there" />' )) self.check_html(widget, 'greeting', 'hello & goodbye', html=( '<input checked type="checkbox" name="greeting" value="hello &amp; goodbye" />' )) def test_render_check_exception(self): """ Calling check_test() shouldn't swallow exceptions (#17888). """ widget = CheckboxInput( check_test=lambda value: value.startswith('hello'), ) with self.assertRaises(AttributeError): widget.render('greeting', True) def test_value_from_datadict(self): """ The CheckboxInput widget will return False if the key is not found in the data dictionary (because HTML form submission doesn't send any result for unchecked checkboxes). """ self.assertFalse(self.widget.value_from_datadict({}, {}, 'testing')) def test_value_from_datadict_string_int(self): value = self.widget.value_from_datadict({'testing': '0'}, {}, 'testing') self.assertIs(value, True) def test_value_omitted_from_data(self): self.assertIs(self.widget.value_omitted_from_data({'field': 'value'}, {}, 'field'), False) self.assertIs(self.widget.value_omitted_from_data({}, {}, 'field'), False)
bsd-3-clause
axsauze/eventsfinder
dbindexer/resolver.py
55
1476
from django.conf import settings from django.utils.importlib import import_module from django.core.exceptions import ImproperlyConfigured class Resolver(object): def __init__(self): self.backends = [] self.load_backends(getattr(settings, 'DBINDEXER_BACKENDS', ('dbindexer.backends.BaseResolver', 'dbindexer.backends.FKNullFix'))) def load_backends(self, backend_paths): for backend in backend_paths: self.backends.append(self.load_backend(backend)) def load_backend(self, path): module_name, attr_name = path.rsplit('.', 1) try: mod = import_module(module_name) except (ImportError, ValueError), e: raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e)) try: return getattr(mod, attr_name)() except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name)) def convert_filters(self, query): for backend in self.backends: backend.convert_filters(query) def create_index(self, lookup): for backend in self.backends: backend.create_index(lookup) def convert_insert_query(self, query): for backend in self.backends: backend.convert_insert_query(query) resolver = Resolver()
bsd-3-clause
lschumm/homebrew-fTerm
lib/directory.py
2
4509
""" [fTerm] directory.py This module defines all of the standard directory operations of fTerm. """ # NOTE: this is extraneous # pylint: disable=C0103,C0303 # NOTE: no effect statement required # pylint: disable=C0301 # NOTE: unused variable 'dn' required in directory traversal # pylint: disable=W0612 # for running shell operations import subprocess # for sort import re # for reading files import os synonyms = { "tempfile":"temp", "files":"list", "contents":"list", "index":"list", "manifest":"list", "menu":"list", "directory":"list", "switch":"swap", "trade":"swap", "interchange":"swap", "change":"swap", "swop":"swap", "exchange":"swap", "remove":"delete", "annul":"delete", "wipe":"delete", "relocate":"move", "displace":"move", "duplicate":"copy", "xerox":"copy", "replicate":"copy", "organize":"sort", "organise":"sort", "reorganize":"sort", "reorganise":"sort", "dir":"where", "folder":"where", "locate":"find", } def raw_temp(): """(only for other functions) Generate a temporary file.""" tempfile = subprocess.Popen(["mktemp", "-d"], stdout=subprocess.PIPE) return tempfile.communicate()[0].replace("\n", "") def temp(): """Generate a temporary file.""" return "echo %s;" % (raw_temp()) def List(*dirs, **keywords): # name capitalised for no name conflict """List the files in a directory.""" adj_prefix = "" if ["long"] in keywords.values(): adj_prefix += "-l " return "ls %s %s;" % (adj_prefix, ' '.join(dirs)) def swap(file1, file2): """A function that swaps the names of two files.""" call = "" # make a temporary file tempfile = raw_temp() # move 1 to temp call += "mv %s %s;" % (file1, tempfile) # move 2 to 1 call += "mv %s %s;" % (file2, file1) # move temp to 1 call += "mv %s/$(basename %s) %s;" % (tempfile, file1, file2) return call def delete(*files): """Delete a file or directory.""" return 'rm -rf %s;' * len(files) % tuple(files) def move(path1, path2): """Move the file or folder at *path1* to *path2*.""" return "mv %s %s;" % (path1, path2) def copy(path1, path2): """Copy the file or folder at *path1* to *path2*.""" return "cp %s %s;" % (path1, path2) def sort(directory, exp): """Takes a directory *directory* and a regular expression *exp*. Sorts each file into a folder with name equal to the match of *exp* in its filename.""" call = "" # files to sort files = os.listdir(directory) files_index = enumerate(files) # make folders to sort folders = [re.search(exp, x).group(0) for x in files] # in case a directory name is the same as the name of a file tempfiles = [raw_temp() for i in files] call1, call3 = "", "" call2 = [] # no identical mkdir, throws warning for i, item in files_index: call1 += "mv %s %s/%s;" % (files[i], tempfiles[i], files[i]) call2.append("mkdir %s;" % (re.search(exp, files[i]).group(0))) call3 += "mv %s/%s %s/$(basename %s);" % (tempfiles[i], files[i], folders[i], files[i]) call = call1 + "".join(set(call2)) + call3 return call def where(): """(For shells that do not have a path string) show the current directory.""" # Might be useful in a possible electron version? Scripting also. idk i was bored. return "echo 'You are in '; pwd;" def raw_find(directory): """(dev only) Recursively find all files within a directory, return as Python list.""" return [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(directory)) for f in fn] def find(directory, exp=r"[\s\S]*", *funcs): """Find all files in *directory* that match (python) regular expression *exp*. If specified, runs *func* on these files.""" call = "echo -e '" pattern = re.compile(exp) # thanks to John La Rooy (stackoverflow.com/users/174728/john-la-rooy) for x in raw_find(directory): try: # throws an AttributeError if there isn't a match pattern.match(os.path.basename(x)).group() call += x + "\\n" # in case there isn't a match except AttributeError: continue if call == "echo -e '": return ":;" # remove last newline call = call[:-2] if len(funcs) != 0: return call + "' | xargs %s;" % (" ".join(funcs)) else: return call + "';"
gpl-3.0
tima/beets
beetsplug/fromfilename.py
25
5733
# This file is part of beets. # Copyright 2015, Jan-Erik Dahlin # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """If the title is empty, try to extract track and title from the filename. """ from __future__ import (division, absolute_import, print_function, unicode_literals) from beets import plugins from beets.util import displayable_path import os import re # Filename field extraction patterns. PATTERNS = [ # "01 - Track 01" and "01": do nothing r'^(\d+)\s*-\s*track\s*\d$', r'^\d+$', # Useful patterns. r'^(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<artist>.+)-(?P<title>.+)$', r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$', r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$', r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$', r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$', r'^(?P<title>.+)$', r'^(?P<track>\d+)\.\s*(?P<title>.+)$', r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$', r'^(?P<track>\d+)\s(?P<title>.+)$', r'^(?P<title>.+) by (?P<artist>.+)$', ] # Titles considered "empty" and in need of replacement. BAD_TITLE_PATTERNS = [ r'^$', r'\d+?\s?-?\s*track\s*\d+', ] def equal(seq): """Determine whether a sequence holds identical elements. """ return len(set(seq)) <= 1 def equal_fields(matchdict, field): """Do all items in `matchdict`, whose values are dictionaries, have the same value for `field`? (If they do, the field is probably not the title.) """ return equal(m[field] for m in matchdict.values()) def all_matches(names, pattern): """If all the filenames in the item/filename mapping match the pattern, return a dictionary mapping the items to dictionaries giving the value for each named subpattern in the match. Otherwise, return None. """ matches = {} for item, name in names.items(): m = re.match(pattern, name, re.IGNORECASE) if m and m.groupdict(): # Only yield a match when the regex applies *and* has # capture groups. Otherwise, no information can be extracted # from the filename. matches[item] = m.groupdict() else: return None return matches def bad_title(title): """Determine whether a given title is "bad" (empty or otherwise meaningless) and in need of replacement. """ for pat in BAD_TITLE_PATTERNS: if re.match(pat, title, re.IGNORECASE): return True return False def apply_matches(d): """Given a mapping from items to field dicts, apply the fields to the objects. """ some_map = d.values()[0] keys = some_map.keys() # Only proceed if the "tag" field is equal across all filenames. if 'tag' in keys and not equal_fields(d, 'tag'): return # Given both an "artist" and "title" field, assume that one is # *actually* the artist, which must be uniform, and use the other # for the title. This, of course, won't work for VA albums. if 'artist' in keys: if equal_fields(d, 'artist'): artist = some_map['artist'] title_field = 'title' elif equal_fields(d, 'title'): artist = some_map['title'] title_field = 'artist' else: # Both vary. Abort. return for item in d: if not item.artist: item.artist = artist # No artist field: remaining field is the title. else: title_field = 'title' # Apply the title and track. for item in d: if bad_title(item.title): item.title = unicode(d[item][title_field]) if 'track' in d[item] and item.track == 0: item.track = int(d[item]['track']) # Plugin structure and hook into import process. class FromFilenamePlugin(plugins.BeetsPlugin): def __init__(self): super(FromFilenamePlugin, self).__init__() self.register_listener('import_task_start', filename_task) def filename_task(task, session): """Examine each item in the task to see if we can extract a title from the filename. Try to match all filenames to a number of regexps, starting with the most complex patterns and successively trying less complex patterns. As soon as all filenames match the same regex we can make an educated guess of which part of the regex that contains the title. """ items = task.items if task.is_album else [task.item] # Look for suspicious (empty or meaningless) titles. missing_titles = sum(bad_title(i.title) for i in items) if missing_titles: # Get the base filenames (no path or extension). names = {} for item in items: path = displayable_path(item.path) name, _ = os.path.splitext(os.path.basename(path)) names[item] = name # Look for useful information in the filenames. for pattern in PATTERNS: d = all_matches(names, pattern) if d: apply_matches(d)
mit
sahiljain/catapult
telemetry/telemetry/internal/browser/possible_browser.py
7
1414
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.internal.app import possible_app class PossibleBrowser(possible_app.PossibleApp): """A browser that can be controlled. Call Create() to launch the browser and begin manipulating it.. """ def __init__(self, browser_type, target_os, supports_tab_control): super(PossibleBrowser, self).__init__(app_type=browser_type, target_os=target_os) self._supports_tab_control = supports_tab_control self._credentials_path = None def __repr__(self): return 'PossibleBrowser(app_type=%s)' % self.app_type @property def browser_type(self): return self.app_type @property def supports_tab_control(self): return self._supports_tab_control def _InitPlatformIfNeeded(self): raise NotImplementedError() def Create(self, finder_options): raise NotImplementedError() def SupportsOptions(self, browser_options): """Tests for extension support.""" raise NotImplementedError() def IsRemote(self): return False def RunRemote(self): pass def UpdateExecutableIfNeeded(self): pass def last_modification_time(self): return -1 def SetCredentialsPath(self, credentials_path): self._credentials_path = credentials_path
bsd-3-clause
TheOriginalBDM/Lazy-Cleaner-9000
code/clean_sweep_vision.py
1
6258
#!/usr/bin/env python from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import time from colormath.color_diff import delta_e_cie2000 from colormath.color_objects import LabColor, sRGBColor from colormath.color_conversions import convert_color def nothing(*arg): pass def is_allowed_color(cur_int, avg_int, m_val): b = abs(cur_int[0] - avg_int[0]) g = abs(cur_int[1] - avg_int[1]) r = abs(cur_int[2] - avg_int[2]) if (b > m_val or g > m_val or r > m_val): return True else: return False def make_gt_val(val, min_val): if val < min_val: val = min_val return val def make_odd(val): if val % 2 == 0: val += 1 return val def get_avg_bgr(in_img, in_cntrs): ttlA = 0 sum_roiA_mean = (0, 0, 0) avg_roiA_mean = (0, 0, 0) ttlA = len(in_cntrs) for cnt2 in in_cntrs: x2, y2, w2, h2 = cv2.boundingRect(cnt2) roiA = in_img[y:y2+w2, x:x2+h2] roiA_mean = cv2.mean(roiA) int_roiA_mean = (int(roiA_mean[0]), int(roiA_mean[1]), int(roiA_mean[2])) sum_roiA_mean = (int_roiA_mean[0] + sum_roiA_mean[0], int_roiA_mean[1] + sum_roiA_mean[1], int_roiA_mean[2] + sum_roiA_mean[2]) if ttlA > 0: avg_roiA_mean = (sum_roiA_mean[0]/ttlA, sum_roiA_mean[1]/ttlA, sum_roiA_mean[2]/ttlA) return avg_roiA_mean window_nm = 'img_cntrls' cam_res_w = 640 cam_res_h = 480 cam_fr_rt = 32 cv2.namedWindow(window_nm) cv2.createTrackbar('blur_size', window_nm, 7 , 21, nothing) cv2.createTrackbar('canny_min', window_nm, 156, 255, nothing) cv2.createTrackbar('thresh_min', window_nm, 7 , 255, nothing) cv2.createTrackbar('min_area', window_nm, 5 , 2000, nothing) cv2.createTrackbar('max_area', window_nm, 40000 , 90000, nothing) cv2.createTrackbar('max_delta', window_nm, 20 , 100, nothing) cv2.createTrackbar('get_avg', window_nm, 0 , 1, nothing) cv2.createTrackbar('get_mode', window_nm, 0, 7, nothing) camera = PiCamera() camera.resolution = (cam_res_w, cam_res_h) camera.framerate = cam_fr_rt rawCapture = PiRGBArray(camera, size=(cam_res_w, cam_res_h)) time.sleep(0.2) avg_roi_mean = (0, 0, 0) #b, g, r delta_color = 000.0000 for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True): ############################################# ### GET THE CURRENT FRAME FROM THE CAMERA ### ############################################# im = frame.array im_raw = im #keep a copy in case we want to look at it later #################### ### GET SETTINGS ### #################### s = cv2.getTrackbarPos('get_avg', window_nm) blur_size = cv2.getTrackbarPos('blur_size',window_nm) canny_min = cv2.getTrackbarPos('canny_min',window_nm) thresh_min = cv2.getTrackbarPos('thresh_min',window_nm) min_area = cv2.getTrackbarPos('min_area',window_nm) max_area = cv2.getTrackbarPos('max_area',window_nm) max_delta = cv2.getTrackbarPos('max_delta',window_nm) mode = cv2.getTrackbarPos('get_mode', window_nm) ############################ ### ENSURE CORRECT VALUE ### ############################ blur_size = make_odd(blur_size) blur_size = make_gt_val(blur_size, 0) thresh_min = make_odd(thresh_min) thresh_min = make_gt_val(thresh_min, 0) ######################################################## ### START IMAGE PROCESSING TO FIND OBJECTS IN RANGE ### ######################################################## imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) blur = cv2.blur(imgray, (blur_size, blur_size)) #edged = cv2.Canny(blur, canny_min, 255) ret3, thresh = cv2.threshold(blur, thresh_min, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) ###S = 1 means get an average of the overall RGB picture if s == 1: blur_size == 0 thresh_size = 1 min_area = 0 ovr_avg = get_avg_bgr(im, contours) avg_roi_mean = ovr_avg print avg_roi_mean cv2.setTrackbarPos('get_avg', window_nm, 0) else: ttl_area = 0 ttl_cntrs = len(contours) ttl_color = 0 sum_roi_mean = (0, 0, 0) for cnt in contours: a = cv2.contourArea(cnt) ### DO WE HAVE SOMETHING IN THE RIGHT SIZE (NO NEED TO PICK UP CARS) ### if min_area < a < max_area: ttl_area += 1 x, y, h, w = cv2.boundingRect(cnt) roi = im[y:y+h, x:x+w] roi_mean = cv2.mean(roi) int_roi_mean = (int(roi_mean[0]), int(roi_mean[1]), int(roi_mean[2])) b, g, r = avg_roi_mean bckgrnd_lab = convert_color(sRGBColor(r, g, b), LabColor) contColor_lab = convert_color(sRGBColor(roi_mean[2],roi_mean[1], roi_mean[0]), LabColor) delta_color = round(delta_e_cie2000(bckgrnd_lab, contColor_lab),1) if delta_color >= max_delta: # if is_allowed_color(int_roi_mean, avg_roi_mean, max_dev): cv2.rectangle(im, (x, y), (x+h, y+w), int_roi_mean, 2) ttl_color += 1 strLoc = str(x) + ',' + str(y) + ':' + str(delta_color) cv2.putText(im, strLoc, (x,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,0,0), 1) strTTL = str(ttl_cntrs) + ' - ' + str(ttl_area) + ' - ' + str(ttl_color) cv2.putText(im, str(strTTL), (20,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2) cv2.putText(im, str(avg_roi_mean), (20, cam_res_h - 20) ,cv2.FONT_HERSHEY_PLAIN, 2.0, avg_roi_mean, 2) if mode == 0: cv2.imshow('imgview', im_raw) print 'Raw image view' elif mode == 1: cv2.imshow('imgview', imgray) print 'Grayscale view' elif mode == 2: cv2.imshow('imgview', blur) print 'Blur view' elif mode == 3: cv2.imshow('imgview', blur) print 'Blur view' elif mode == 4: cv2.imshow('imgview', thresh) print 'Threshold view' else: cv2.imshow('imgview', im) print 'Contour overlay on raw view' ch = cv2.waitKey(5) rawCapture.truncate(0) if ch == 27: break cv2.destroyAllWindows()
gpl-3.0
MiLk/ansible
lib/ansible/modules/network/aos/aos_logical_device_map.py
78
8922
#!/usr/bin/python # # (c) 2017 Apstra Inc, <community@apstra.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: aos_logical_device_map author: Damien Garros (@dgarros) version_added: "2.3" short_description: Manage AOS Logical Device Map description: - Apstra AOS Logical Device Map module let you manage your Logical Device Map easily. You can create create and delete Logical Device Map by Name, ID or by using a JSON File. This module is idempotent and support the I(check) mode. It's using the AOS REST API. requirements: - "aos-pyez >= 0.6.0" options: session: description: - An existing AOS session as obtained by M(aos_login) module. required: true name: description: - Name of the Logical Device Map to manage. Only one of I(name), I(id) or I(content) can be set. id: description: - AOS Id of the Logical Device Map to manage (can't be used to create a new Logical Device Map), Only one of I(name), I(id) or I(content) can be set. content: description: - Datastructure of the Logical Device Map to manage. The data can be in YAML / JSON or directly a variable. It's the same datastructure that is returned on success in I(value). Only one of I(name), I(id) or I(content) can be set. state: description: - Indicate what is the expected state of the Logical Device Map (present or not). default: present choices: ['present', 'absent'] ''' EXAMPLES = ''' - name: "Create an Logical Device Map with one subnet" aos_logical_device_map: session: "{{ aos_session }}" name: "my-logical-device-map" state: present - name: "Create an Logical Device Map with multiple subnets" aos_logical_device_map: session: "{{ aos_session }}" name: "my-other-logical-device-map" state: present - name: "Check if an Logical Device Map exist with same subnets by ID" aos_logical_device_map: session: "{{ aos_session }}" name: "45ab26fc-c2ed-4307-b330-0870488fa13e" state: present - name: "Delete an Logical Device Map by name" aos_logical_device_map: session: "{{ aos_session }}" name: "my-logical-device-map" state: absent - name: "Delete an Logical Device Map by id" aos_logical_device_map: session: "{{ aos_session }}" id: "45ab26fc-c2ed-4307-b330-0870488fa13e" state: absent # Save an Logical Device Map to a file - name: "Access Logical Device Map 1/3" aos_logical_device_map: session: "{{ aos_session }}" name: "my-logical-device-map" state: present register: logical_device_map - name: "Save Logical Device Map into a file in JSON 2/3" copy: content: "{{ logical_device_map.value | to_nice_json }}" dest: logical_device_map_saved.json - name: "Save Logical Device Map into a file in YAML 3/3" copy: content: "{{ logical_device_map.value | to_nice_yaml }}" dest: logical_device_map_saved.yaml - name: "Load Logical Device Map from a JSON file" aos_logical_device_map: session: "{{ aos_session }}" content: "{{ lookup('file', 'resources/logical_device_map_saved.json') }}" state: present - name: "Load Logical Device Map from a YAML file" aos_logical_device_map: session: "{{ aos_session }}" content: "{{ lookup('file', 'resources/logical_device_map_saved.yaml') }}" state: present ''' RETURNS = ''' name: description: Name of the Logical Device Map returned: always type: str sample: Server-IpAddrs id: description: AOS unique ID assigned to the Logical Device Map returned: always type: str sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06 value: description: Value of the object as returned by the AOS Server returned: always type: dict sample: {'...'} ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict ######################################################### # State Processing ######################################################### def logical_device_map_absent(module, aos, my_log_dev_map): margs = module.params # If the module do not exist, return directly if my_log_dev_map.exists is False: module.exit_json(changed=False, name=margs['name'], id='', value={}) # If not in check mode, delete Logical Device Map if not module.check_mode: try: # Need to wait for 1sec before a delete to workaround a current # limitation in AOS time.sleep(1) my_log_dev_map.delete() except: module.fail_json(msg="An error occurred, while trying to delete the Logical Device Map") module.exit_json( changed=True, name=my_log_dev_map.name, id=my_log_dev_map.id, value={} ) def logical_device_map_present(module, aos, my_log_dev_map): margs = module.params # if content is defined, create object from Content if margs['content'] is not None: if 'display_name' in module.params['content'].keys(): do_load_resource(module, aos.LogicalDeviceMaps, module.params['content']['display_name']) else: module.fail_json(msg="Unable to find display_name in 'content', Mandatory") # if my_log_dev_map doesn't exist already, create a new one if my_log_dev_map.exists is False and 'content' not in margs.keys(): module.fail_json(msg="'Content' is mandatory for module that don't exist currently") module.exit_json( changed=False, name=my_log_dev_map.name, id=my_log_dev_map.id, value=my_log_dev_map.value ) ######################################################### # Main Function ######################################################### def logical_device_map(module): margs = module.params try: aos = get_aos_session(module, margs['session']) except: module.fail_json(msg="Unable to login to the AOS server") item_name = False item_id = False if margs['content'] is not None: content = content_to_dict(module, margs['content'] ) if 'display_name' in content.keys(): item_name = content['display_name'] else: module.fail_json(msg="Unable to extract 'display_name' from 'content'") elif margs['name'] is not None: item_name = margs['name'] elif margs['id'] is not None: item_id = margs['id'] #---------------------------------------------------- # Find Object if available based on ID or Name #---------------------------------------------------- try: my_log_dev_map = find_collection_item(aos.LogicalDeviceMaps, item_name=item_name, item_id=item_id) except: module.fail_json(msg="Unable to find the Logical Device Map based on name or ID, something went wrong") #---------------------------------------------------- # Proceed based on State value #---------------------------------------------------- if margs['state'] == 'absent': logical_device_map_absent(module, aos, my_log_dev_map) elif margs['state'] == 'present': logical_device_map_present(module, aos, my_log_dev_map) def main(): module = AnsibleModule( argument_spec=dict( session=dict(required=True, type="dict"), name=dict(required=False ), id=dict(required=False ), content=dict(required=False, type="json"), state=dict( required=False, choices=['present', 'absent'], default="present") ), mutually_exclusive = [('name', 'id', 'content')], required_one_of=[('name', 'id', 'content')], supports_check_mode=True ) # Check if aos-pyez is present and match the minimum version check_aos_version(module, '0.6.0') logical_device_map(module) if __name__ == "__main__": main()
gpl-3.0
52ai/django-ccsds
tests/admin_checks/tests.py
5
23212
from __future__ import unicode_literals from django import forms from django.contrib import admin from django.contrib.contenttypes.admin import GenericStackedInline from django.core import checks from django.core.exceptions import ImproperlyConfigured from django.test import TestCase, ignore_warnings, override_settings from .models import Album, Book, City, Influence, Song, State, TwoAlbumFKAndAnE class SongForm(forms.ModelForm): pass class ValidFields(admin.ModelAdmin): form = SongForm fields = ['title'] class ValidFormFieldsets(admin.ModelAdmin): def get_form(self, request, obj=None, **kwargs): class ExtraFieldForm(SongForm): name = forms.CharField(max_length=50) return ExtraFieldForm fieldsets = ( (None, { 'fields': ('name',), }), ) class MyAdmin(admin.ModelAdmin): @classmethod def check(cls, model, **kwargs): return ['error!'] @override_settings( SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True) INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks'] ) class SystemChecksTestCase(TestCase): @override_settings(DEBUG=True) def test_checks_are_performed(self): admin.site.register(Song, MyAdmin) try: errors = checks.run_checks() expected = ['error!'] self.assertEqual(errors, expected) finally: admin.site.unregister(Song) admin.sites.system_check_errors = [] @override_settings(DEBUG=True) def test_custom_adminsite(self): class CustomAdminSite(admin.AdminSite): pass custom_site = CustomAdminSite() custom_site.register(Song, MyAdmin) try: errors = checks.run_checks() expected = ['error!'] self.assertEqual(errors, expected) finally: custom_site.unregister(Song) admin.sites.system_check_errors = [] def test_field_name_not_in_list_display(self): class SongAdmin(admin.ModelAdmin): list_editable = ["original_release"] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "The value of 'list_editable[0]' refers to 'original_release', " "which is not contained in 'list_display'.", hint=None, obj=SongAdmin, id='admin.E122', ) ] self.assertEqual(errors, expected) def test_readonly_and_editable(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ["original_release"] list_display = ["pk", "original_release"] list_editable = ["original_release"] fieldsets = [ (None, { "fields": ["title", "original_release"], }), ] errors = SongAdmin.check(model=Song) expected = [ checks.Error( ("The value of 'list_editable[0]' refers to 'original_release', " "which is not editable through the admin."), hint=None, obj=SongAdmin, id='admin.E125', ) ] self.assertEqual(errors, expected) def test_editable(self): class SongAdmin(admin.ModelAdmin): list_display = ["pk", "title"] list_editable = ["title"] fieldsets = [ (None, { "fields": ["title", "original_release"], }), ] errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_custom_modelforms_with_fields_fieldsets(self): """ # Regression test for #8027: custom ModelForms with fields/fieldsets """ errors = ValidFields.check(model=Song) self.assertEqual(errors, []) def test_custom_get_form_with_fieldsets(self): """ Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method is overridden. Refs #19445. """ errors = ValidFormFieldsets.check(model=Song) self.assertEqual(errors, []) def test_fieldsets_fields_non_tuple(self): """ Tests for a tuple/list for the first fieldset's fields. """ class NotATupleAdmin(admin.ModelAdmin): list_display = ["pk", "title"] list_editable = ["title"] fieldsets = [ (None, { "fields": "title" # not a tuple }), ] errors = NotATupleAdmin.check(model=Song) expected = [ checks.Error( "The value of 'fieldsets[0][1]['fields']' must be a list or tuple.", hint=None, obj=NotATupleAdmin, id='admin.E008', ) ] self.assertEqual(errors, expected) def test_nonfirst_fieldset(self): """ Tests for a tuple/list for the second fieldset's fields. """ class NotATupleAdmin(admin.ModelAdmin): fieldsets = [ (None, { "fields": ("title",) }), ('foo', { "fields": "author" # not a tuple }), ] errors = NotATupleAdmin.check(model=Song) expected = [ checks.Error( "The value of 'fieldsets[1][1]['fields']' must be a list or tuple.", hint=None, obj=NotATupleAdmin, id='admin.E008', ) ] self.assertEqual(errors, expected) def test_exclude_values(self): """ Tests for basic system checks of 'exclude' option values (#12689) """ class ExcludedFields1(admin.ModelAdmin): exclude = 'foo' errors = ExcludedFields1.check(model=Book) expected = [ checks.Error( "The value of 'exclude' must be a list or tuple.", hint=None, obj=ExcludedFields1, id='admin.E014', ) ] self.assertEqual(errors, expected) def test_exclude_duplicate_values(self): class ExcludedFields2(admin.ModelAdmin): exclude = ('name', 'name') errors = ExcludedFields2.check(model=Book) expected = [ checks.Error( "The value of 'exclude' contains duplicate field(s).", hint=None, obj=ExcludedFields2, id='admin.E015', ) ] self.assertEqual(errors, expected) def test_exclude_in_inline(self): class ExcludedFieldsInline(admin.TabularInline): model = Song exclude = 'foo' class ExcludedFieldsAlbumAdmin(admin.ModelAdmin): model = Album inlines = [ExcludedFieldsInline] errors = ExcludedFieldsAlbumAdmin.check(model=Album) expected = [ checks.Error( "The value of 'exclude' must be a list or tuple.", hint=None, obj=ExcludedFieldsInline, id='admin.E014', ) ] self.assertEqual(errors, expected) def test_exclude_inline_model_admin(self): """ Regression test for #9932 - exclude in InlineModelAdmin should not contain the ForeignKey field used in ModelAdmin.model """ class SongInline(admin.StackedInline): model = Song exclude = ['album'] class AlbumAdmin(admin.ModelAdmin): model = Album inlines = [SongInline] errors = AlbumAdmin.check(model=Album) expected = [ checks.Error( ("Cannot exclude the field 'album', because it is the foreign key " "to the parent model 'admin_checks.Album'."), hint=None, obj=SongInline, id='admin.E201', ) ] self.assertEqual(errors, expected) def test_valid_generic_inline_model_admin(self): """ Regression test for #22034 - check that generic inlines don't look for normal ForeignKey relations. """ class InfluenceInline(GenericStackedInline): model = Influence class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_generic_inline_model_admin_non_generic_model(self): """ Ensure that a model without a GenericForeignKey raises problems if it's included in an GenericInlineModelAdmin definition. """ class BookInline(GenericStackedInline): model = Book class SongAdmin(admin.ModelAdmin): inlines = [BookInline] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "'admin_checks.Book' has no GenericForeignKey.", hint=None, obj=BookInline, id='admin.E301', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_bad_ct_field(self): "A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field." class InfluenceInline(GenericStackedInline): model = Influence ct_field = 'nonexistent' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.", hint=None, obj=InfluenceInline, id='admin.E302', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_bad_fk_field(self): "A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field." class InfluenceInline(GenericStackedInline): model = Influence ct_fk_field = 'nonexistent' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.", hint=None, obj=InfluenceInline, id='admin.E303', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_non_gfk_ct_field(self): "A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey" class InfluenceInline(GenericStackedInline): model = Influence ct_field = 'name' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "'admin_checks.Influence' has no GenericForeignKey using content type field 'name' and object ID field 'object_id'.", hint=None, obj=InfluenceInline, id='admin.E304', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_non_gfk_fk_field(self): "A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey" class InfluenceInline(GenericStackedInline): model = Influence ct_fk_field = 'name' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin.check(model=Song) expected = [ checks.Error( "'admin_checks.Influence' has no GenericForeignKey using content type field 'content_type' and object ID field 'name'.", hint=None, obj=InfluenceInline, id='admin.E304', ) ] self.assertEqual(errors, expected) def test_app_label_in_admin_checks(self): """ Regression test for #15669 - Include app label in admin system check messages """ class RawIdNonexistingAdmin(admin.ModelAdmin): raw_id_fields = ('nonexisting',) errors = RawIdNonexistingAdmin.check(model=Album) expected = [ checks.Error( ("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is " "not an attribute of 'admin_checks.Album'."), hint=None, obj=RawIdNonexistingAdmin, id='admin.E002', ) ] self.assertEqual(errors, expected) def test_fk_exclusion(self): """ Regression test for #11709 - when testing for fk excluding (when exclude is given) make sure fk_name is honored or things blow up when there is more than one fk to the parent model. """ class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE exclude = ("e",) fk_name = "album1" class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin.check(model=Album) self.assertEqual(errors, []) def test_inline_self_check(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin.check(model=Album) expected = [ checks.Error( "'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.", hint=None, obj=TwoAlbumFKAndAnEInline, id='admin.E202', ) ] self.assertEqual(errors, expected) def test_inline_with_specified(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE fk_name = "album1" class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin.check(model=Album) self.assertEqual(errors, []) def test_readonly(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title",) errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_readonly_on_method(self): def my_function(obj): pass class SongAdmin(admin.ModelAdmin): readonly_fields = (my_function,) errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_readonly_on_modeladmin(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_modeladmin",) def readonly_method_on_modeladmin(self, obj): pass errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_readonly_method_on_model(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_model",) errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_nonexistent_field(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title", "nonexistent") errors = SongAdmin.check(model=Song) expected = [ checks.Error( ("The value of 'readonly_fields[1]' is not a callable, an attribute " "of 'SongAdmin', or an attribute of 'admin_checks.Song'."), hint=None, obj=SongAdmin, id='admin.E035', ) ] self.assertEqual(errors, expected) def test_nonexistent_field_on_inline(self): class CityInline(admin.TabularInline): model = City readonly_fields = ['i_dont_exist'] # Missing attribute errors = CityInline.check(State) expected = [ checks.Error( ("The value of 'readonly_fields[0]' is not a callable, an attribute " "of 'CityInline', or an attribute of 'admin_checks.City'."), hint=None, obj=CityInline, id='admin.E035', ) ] self.assertEqual(errors, expected) def test_extra(self): class SongAdmin(admin.ModelAdmin): def awesome_song(self, instance): if instance.title == "Born to Run": return "Best Ever!" return "Status unknown." errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_readonly_lambda(self): class SongAdmin(admin.ModelAdmin): readonly_fields = (lambda obj: "test",) errors = SongAdmin.check(model=Song) self.assertEqual(errors, []) def test_graceful_m2m_fail(self): """ Regression test for #12203/#12237 - Fail more gracefully when a M2M field that specifies the 'through' option is included in the 'fields' or the 'fieldsets' ModelAdmin options. """ class BookAdmin(admin.ModelAdmin): fields = ['authors'] errors = BookAdmin.check(model=Book) expected = [ checks.Error( ("The value of 'fields' cannot include the ManyToManyField 'authors', " "because that field manually specifies a relationship model."), hint=None, obj=BookAdmin, id='admin.E013', ) ] self.assertEqual(errors, expected) def test_cannot_include_through(self): class FieldsetBookAdmin(admin.ModelAdmin): fieldsets = ( ('Header 1', {'fields': ('name',)}), ('Header 2', {'fields': ('authors',)}), ) errors = FieldsetBookAdmin.check(model=Book) expected = [ checks.Error( ("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField " "'authors', because that field manually specifies a relationship model."), hint=None, obj=FieldsetBookAdmin, id='admin.E013', ) ] self.assertEqual(errors, expected) def test_nested_fields(self): class NestedFieldsAdmin(admin.ModelAdmin): fields = ('price', ('name', 'subtitle')) errors = NestedFieldsAdmin.check(model=Book) self.assertEqual(errors, []) def test_nested_fieldsets(self): class NestedFieldsetAdmin(admin.ModelAdmin): fieldsets = ( ('Main', {'fields': ('price', ('name', 'subtitle'))}), ) errors = NestedFieldsetAdmin.check(model=Book) self.assertEqual(errors, []) def test_explicit_through_override(self): """ Regression test for #12209 -- If the explicitly provided through model is specified as a string, the admin should still be able use Model.m2m_field.through """ class AuthorsInline(admin.TabularInline): model = Book.authors.through class BookAdmin(admin.ModelAdmin): inlines = [AuthorsInline] errors = BookAdmin.check(model=Book) self.assertEqual(errors, []) def test_non_model_fields(self): """ Regression for ensuring ModelAdmin.fields can contain non-model fields that broke with r11737 """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['title', 'extra_data'] errors = FieldsOnFormOnlyAdmin.check(model=Song) self.assertEqual(errors, []) def test_non_model_first_field(self): """ Regression for ensuring ModelAdmin.field can handle first elem being a non-model field (test fix for UnboundLocalError introduced with r16225). """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class Meta: model = Song fields = '__all__' class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['extra_data', 'title'] errors = FieldsOnFormOnlyAdmin.check(model=Song) self.assertEqual(errors, []) @ignore_warnings(module='django.contrib.admin.options') def test_validator_compatibility(self): class MyValidator(object): def validate(self, cls, model): raise ImproperlyConfigured("error!") class MyModelAdmin(admin.ModelAdmin): validator_class = MyValidator errors = MyModelAdmin.check(model=Song) expected = [ checks.Error( 'error!', hint=None, obj=MyModelAdmin, ) ] self.assertEqual(errors, expected) def test_check_sublists_for_duplicates(self): class MyModelAdmin(admin.ModelAdmin): fields = ['state', ['state']] errors = MyModelAdmin.check(model=Song) expected = [ checks.Error( "The value of 'fields' contains duplicate field(s).", hint=None, obj=MyModelAdmin, id='admin.E006' ) ] self.assertEqual(errors, expected) def test_check_fieldset_sublists_for_duplicates(self): class MyModelAdmin(admin.ModelAdmin): fieldsets = [ (None, { 'fields': ['title', 'album', ('title', 'album')] }), ] errors = MyModelAdmin.check(model=Song) expected = [ checks.Error( "There are duplicate field(s) in 'fieldsets[0][1]'.", hint=None, obj=MyModelAdmin, id='admin.E012' ) ] self.assertEqual(errors, expected) def test_list_filter_works_on_through_field_even_when_apps_not_ready(self): """ Ensure list_filter can access reverse fields even when the app registry is not ready; refs #24146. """ class BookAdminWithListFilter(admin.ModelAdmin): list_filter = ['authorsbooks__featured'] # Temporarily pretending apps are not ready yet. This issue can happen # if the value of 'list_filter' refers to a 'through__field'. Book._meta.apps.ready = False try: errors = BookAdminWithListFilter.check(model=Book) self.assertEqual(errors, []) finally: Book._meta.apps.ready = True
bsd-3-clause
CliffYuan/zookeeper
src/contrib/huebrowser/zkui/src/zkui/settings.py
114
1103
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DJANGO_APPS = [ "zkui" ] NICE_NAME = "ZooKeeper Browser" REQUIRES_HADOOP = False CLUSTERS = [{ 'nice_name': 'Default', 'hostport': 'localhost:2181,localhost:2182,localhost:2183', 'rest_gateway': 'http://localhost:9998' } ] DEPENDER_PACKAGE_YMLS = [ "src/zkui/static/js/package.yml", ]
apache-2.0
airodactyl/qutebrowser
tests/end2end/features/test_private_bdd.py
5
1685
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2017-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. import json import pytest_bdd as bdd bdd.scenarios('private.feature') @bdd.then(bdd.parsers.parse('the cookie {name} should be set to {value}')) def check_cookie(quteproc, name, value): """Check if a given cookie is set correctly. This assumes we're on the server cookies page. """ content = quteproc.get_content() data = json.loads(content) print(data) assert data['cookies'][name] == value @bdd.then(bdd.parsers.parse('the cookie {name} should not be set')) def check_cookie_not_set(quteproc, name): """Check if a given cookie is not set.""" content = quteproc.get_content() data = json.loads(content) print(data) assert name not in data['cookies'] @bdd.then(bdd.parsers.parse('the file {name} should not contain "{text}"')) def check_not_contain(tmpdir, name, text): path = tmpdir / name assert text not in path.read()
gpl-3.0
bhargav2408/python-for-android
python3-alpha/python3-src/Lib/wsgiref/handlers.py
51
20582
"""Base classes for server/gateway implementations""" from .util import FileWrapper, guess_scheme, is_hop_by_hop from .headers import Headers import sys, os, time __all__ = [ 'BaseHandler', 'SimpleHandler', 'BaseCGIHandler', 'CGIHandler', 'IISCGIHandler', 'read_environ' ] # Weekday and month names for HTTP date/time formatting; always English! _weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] _monthname = [None, # Dummy so we can use 1-based month numbers "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] def format_date_time(timestamp): year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( _weekdayname[wd], day, _monthname[month], year, hh, mm, ss ) _is_request = { 'SCRIPT_NAME', 'PATH_INFO', 'QUERY_STRING', 'REQUEST_METHOD', 'AUTH_TYPE', 'CONTENT_TYPE', 'CONTENT_LENGTH', 'HTTPS', 'REMOTE_USER', 'REMOTE_IDENT', }.__contains__ def _needs_transcode(k): return _is_request(k) or k.startswith('HTTP_') or k.startswith('SSL_') \ or (k.startswith('REDIRECT_') and _needs_transcode(k[9:])) def read_environ(): """Read environment, fixing HTTP variables""" enc = sys.getfilesystemencoding() esc = 'surrogateescape' try: ''.encode('utf-8', esc) except LookupError: esc = 'replace' environ = {} # Take the basic environment from native-unicode os.environ. Attempt to # fix up the variables that come from the HTTP request to compensate for # the bytes->unicode decoding step that will already have taken place. for k, v in os.environ.items(): if _needs_transcode(k): # On win32, the os.environ is natively Unicode. Different servers # decode the request bytes using different encodings. if sys.platform == 'win32': software = os.environ.get('SERVER_SOFTWARE', '').lower() # On IIS, the HTTP request will be decoded as UTF-8 as long # as the input is a valid UTF-8 sequence. Otherwise it is # decoded using the system code page (mbcs), with no way to # detect this has happened. Because UTF-8 is the more likely # encoding, and mbcs is inherently unreliable (an mbcs string # that happens to be valid UTF-8 will not be decoded as mbcs) # always recreate the original bytes as UTF-8. if software.startswith('microsoft-iis/'): v = v.encode('utf-8').decode('iso-8859-1') # Apache mod_cgi writes bytes-as-unicode (as if ISO-8859-1) direct # to the Unicode environ. No modification needed. elif software.startswith('apache/'): pass # Python 3's http.server.CGIHTTPRequestHandler decodes # using the urllib.unquote default of UTF-8, amongst other # issues. elif ( software.startswith('simplehttp/') and 'python/3' in software ): v = v.encode('utf-8').decode('iso-8859-1') # For other servers, guess that they have written bytes to # the environ using stdio byte-oriented interfaces, ending up # with the system code page. else: v = v.encode(enc, 'replace').decode('iso-8859-1') # Recover bytes from unicode environ, using surrogate escapes # where available (Python 3.1+). else: v = v.encode(enc, esc).decode('iso-8859-1') environ[k] = v return environ class BaseHandler: """Manage the invocation of a WSGI application""" # Configuration parameters; can override per-subclass or per-instance wsgi_version = (1,0) wsgi_multithread = True wsgi_multiprocess = True wsgi_run_once = False origin_server = True # We are transmitting direct to client http_version = "1.0" # Version that should be used for response server_software = None # String name of server software, if any # os_environ is used to supply configuration from the OS environment: # by default it's a copy of 'os.environ' as of import time, but you can # override this in e.g. your __init__ method. os_environ= read_environ() # Collaborator classes wsgi_file_wrapper = FileWrapper # set to None to disable headers_class = Headers # must be a Headers-like class # Error handling (also per-subclass or per-instance) traceback_limit = None # Print entire traceback to self.get_stderr() error_status = "500 Internal Server Error" error_headers = [('Content-Type','text/plain')] error_body = b"A server error occurred. Please contact the administrator." # State variables (don't mess with these) status = result = None headers_sent = False headers = None bytes_sent = 0 def run(self, application): """Invoke the application""" # Note to self: don't move the close()! Asynchronous servers shouldn't # call close() from finish_response(), so if you close() anywhere but # the double-error branch here, you'll break asynchronous servers by # prematurely closing. Async servers must return from 'run()' without # closing if there might still be output to iterate over. try: self.setup_environ() self.result = application(self.environ, self.start_response) self.finish_response() except: try: self.handle_error() except: # If we get an error handling an error, just give up already! self.close() raise # ...and let the actual server figure it out. def setup_environ(self): """Set up the environment for one request""" env = self.environ = self.os_environ.copy() self.add_cgi_vars() env['wsgi.input'] = self.get_stdin() env['wsgi.errors'] = self.get_stderr() env['wsgi.version'] = self.wsgi_version env['wsgi.run_once'] = self.wsgi_run_once env['wsgi.url_scheme'] = self.get_scheme() env['wsgi.multithread'] = self.wsgi_multithread env['wsgi.multiprocess'] = self.wsgi_multiprocess if self.wsgi_file_wrapper is not None: env['wsgi.file_wrapper'] = self.wsgi_file_wrapper if self.origin_server and self.server_software: env.setdefault('SERVER_SOFTWARE',self.server_software) def finish_response(self): """Send any iterable data, then close self and the iterable Subclasses intended for use in asynchronous servers will want to redefine this method, such that it sets up callbacks in the event loop to iterate over the data, and to call 'self.close()' once the response is finished. """ if not self.result_is_file() or not self.sendfile(): for data in self.result: self.write(data) self.finish_content() self.close() def get_scheme(self): """Return the URL scheme being used""" return guess_scheme(self.environ) def set_content_length(self): """Compute Content-Length or switch to chunked encoding if possible""" try: blocks = len(self.result) except (TypeError,AttributeError,NotImplementedError): pass else: if blocks==1: self.headers['Content-Length'] = str(self.bytes_sent) return # XXX Try for chunked encoding if origin server and client is 1.1 def cleanup_headers(self): """Make any necessary header changes or defaults Subclasses can extend this to add other defaults. """ if 'Content-Length' not in self.headers: self.set_content_length() def start_response(self, status, headers,exc_info=None): """'start_response()' callable as specified by PEP 3333""" if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) finally: exc_info = None # avoid dangling circular ref elif self.headers is not None: raise AssertionError("Headers already set!") self.status = status self.headers = self.headers_class(headers) status = self._convert_string_type(status, "Status") assert len(status)>=4,"Status must be at least 4 characters" assert int(status[:3]),"Status message must begin w/3-digit code" assert status[3]==" ", "Status message must have a space after code" if __debug__: for name, val in headers: name = self._convert_string_type(name, "Header name") val = self._convert_string_type(val, "Header value") assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed" return self.write def _convert_string_type(self, value, title): """Convert/check value type.""" if type(value) is str: return value raise AssertionError( "{0} must be of type str (got {1})".format(title, repr(value)) ) def send_preamble(self): """Transmit version/status/date/server, via self._write()""" if self.origin_server: if self.client_is_modern(): self._write(('HTTP/%s %s\r\n' % (self.http_version,self.status)).encode('iso-8859-1')) if 'Date' not in self.headers: self._write( ('Date: %s\r\n' % format_date_time(time.time())).encode('iso-8859-1') ) if self.server_software and 'Server' not in self.headers: self._write(('Server: %s\r\n' % self.server_software).encode('iso-8859-1')) else: self._write(('Status: %s\r\n' % self.status).encode('iso-8859-1')) def write(self, data): """'write()' callable as specified by PEP 3333""" assert type(data) is bytes, \ "write() argument must be a bytes instance" if not self.status: raise AssertionError("write() before start_response()") elif not self.headers_sent: # Before the first output, send the stored headers self.bytes_sent = len(data) # make sure we know content-length self.send_headers() else: self.bytes_sent += len(data) # XXX check Content-Length and truncate if too many bytes written? self._write(data) self._flush() def sendfile(self): """Platform-specific file transmission Override this method in subclasses to support platform-specific file transmission. It is only called if the application's return iterable ('self.result') is an instance of 'self.wsgi_file_wrapper'. This method should return a true value if it was able to actually transmit the wrapped file-like object using a platform-specific approach. It should return a false value if normal iteration should be used instead. An exception can be raised to indicate that transmission was attempted, but failed. NOTE: this method should call 'self.send_headers()' if 'self.headers_sent' is false and it is going to attempt direct transmission of the file. """ return False # No platform-specific transmission by default def finish_content(self): """Ensure headers and content have both been sent""" if not self.headers_sent: # Only zero Content-Length if not set by the application (so # that HEAD requests can be satisfied properly, see #3839) self.headers.setdefault('Content-Length', "0") self.send_headers() else: pass # XXX check if content-length was too short? def close(self): """Close the iterable (if needed) and reset all instance vars Subclasses may want to also drop the client connection. """ try: if hasattr(self.result,'close'): self.result.close() finally: self.result = self.headers = self.status = self.environ = None self.bytes_sent = 0; self.headers_sent = False def send_headers(self): """Transmit headers to the client, via self._write()""" self.cleanup_headers() self.headers_sent = True if not self.origin_server or self.client_is_modern(): self.send_preamble() self._write(bytes(self.headers)) def result_is_file(self): """True if 'self.result' is an instance of 'self.wsgi_file_wrapper'""" wrapper = self.wsgi_file_wrapper return wrapper is not None and isinstance(self.result,wrapper) def client_is_modern(self): """True if client can accept status and headers""" return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9' def log_exception(self,exc_info): """Log the 'exc_info' tuple in the server log Subclasses may override to retarget the output or change its format. """ try: from traceback import print_exception stderr = self.get_stderr() print_exception( exc_info[0], exc_info[1], exc_info[2], self.traceback_limit, stderr ) stderr.flush() finally: exc_info = None def handle_error(self): """Log current error, and send error output to client if possible""" self.log_exception(sys.exc_info()) if not self.headers_sent: self.result = self.error_output(self.environ, self.start_response) self.finish_response() # XXX else: attempt advanced recovery techniques for HTML or text? def error_output(self, environ, start_response): """WSGI mini-app to create error output By default, this just uses the 'error_status', 'error_headers', and 'error_body' attributes to generate an output page. It can be overridden in a subclass to dynamically generate diagnostics, choose an appropriate message for the user's preferred language, etc. Note, however, that it's not recommended from a security perspective to spit out diagnostics to any old user; ideally, you should have to do something special to enable diagnostic output, which is why we don't include any here! """ start_response(self.error_status,self.error_headers[:],sys.exc_info()) return [self.error_body] # Pure abstract methods; *must* be overridden in subclasses def _write(self,data): """Override in subclass to buffer data for send to client It's okay if this method actually transmits the data; BaseHandler just separates write and flush operations for greater efficiency when the underlying system actually has such a distinction. """ raise NotImplementedError def _flush(self): """Override in subclass to force sending of recent '_write()' calls It's okay if this method is a no-op (i.e., if '_write()' actually sends the data. """ raise NotImplementedError def get_stdin(self): """Override in subclass to return suitable 'wsgi.input'""" raise NotImplementedError def get_stderr(self): """Override in subclass to return suitable 'wsgi.errors'""" raise NotImplementedError def add_cgi_vars(self): """Override in subclass to insert CGI variables in 'self.environ'""" raise NotImplementedError class SimpleHandler(BaseHandler): """Handler that's just initialized with streams, environment, etc. This handler subclass is intended for synchronous HTTP/1.0 origin servers, and handles sending the entire response output, given the correct inputs. Usage:: handler = SimpleHandler( inp,out,err,env, multithread=False, multiprocess=True ) handler.run(app)""" def __init__(self,stdin,stdout,stderr,environ, multithread=True, multiprocess=False ): self.stdin = stdin self.stdout = stdout self.stderr = stderr self.base_env = environ self.wsgi_multithread = multithread self.wsgi_multiprocess = multiprocess def get_stdin(self): return self.stdin def get_stderr(self): return self.stderr def add_cgi_vars(self): self.environ.update(self.base_env) def _write(self,data): self.stdout.write(data) def _flush(self): self.stdout.flush() self._flush = self.stdout.flush class BaseCGIHandler(SimpleHandler): """CGI-like systems using input/output/error streams and environ mapping Usage:: handler = BaseCGIHandler(inp,out,err,env) handler.run(app) This handler class is useful for gateway protocols like ReadyExec and FastCGI, that have usable input/output/error streams and an environment mapping. It's also the base class for CGIHandler, which just uses sys.stdin, os.environ, and so on. The constructor also takes keyword arguments 'multithread' and 'multiprocess' (defaulting to 'True' and 'False' respectively) to control the configuration sent to the application. It sets 'origin_server' to False (to enable CGI-like output), and assumes that 'wsgi.run_once' is False. """ origin_server = False class CGIHandler(BaseCGIHandler): """CGI-based invocation via sys.stdin/stdout/stderr and os.environ Usage:: CGIHandler().run(app) The difference between this class and BaseCGIHandler is that it always uses 'wsgi.run_once' of 'True', 'wsgi.multithread' of 'False', and 'wsgi.multiprocess' of 'True'. It does not take any initialization parameters, but always uses 'sys.stdin', 'os.environ', and friends. If you need to override any of these parameters, use BaseCGIHandler instead. """ wsgi_run_once = True # Do not allow os.environ to leak between requests in Google App Engine # and other multi-run CGI use cases. This is not easily testable. # See http://bugs.python.org/issue7250 os_environ = {} def __init__(self): BaseCGIHandler.__init__( self, sys.stdin.buffer, sys.stdout.buffer, sys.stderr, read_environ(), multithread=False, multiprocess=True ) class IISCGIHandler(BaseCGIHandler): """CGI-based invocation with workaround for IIS path bug This handler should be used in preference to CGIHandler when deploying on Microsoft IIS without having set the config allowPathInfo option (IIS>=7) or metabase allowPathInfoForScriptMappings (IIS<7). """ wsgi_run_once = True os_environ = {} # By default, IIS gives a PATH_INFO that duplicates the SCRIPT_NAME at # the front, causing problems for WSGI applications that wish to implement # routing. This handler strips any such duplicated path. # IIS can be configured to pass the correct PATH_INFO, but this causes # another bug where PATH_TRANSLATED is wrong. Luckily this variable is # rarely used and is not guaranteed by WSGI. On IIS<7, though, the # setting can only be made on a vhost level, affecting all other script # mappings, many of which break when exposed to the PATH_TRANSLATED bug. # For this reason IIS<7 is almost never deployed with the fix. (Even IIS7 # rarely uses it because there is still no UI for it.) # There is no way for CGI code to tell whether the option was set, so a # separate handler class is provided. def __init__(self): environ= read_environ() path = environ.get('PATH_INFO', '') script = environ.get('SCRIPT_NAME', '') if (path+'/').startswith(script+'/'): environ['PATH_INFO'] = path[len(script):] BaseCGIHandler.__init__( self, sys.stdin.buffer, sys.stdout.buffer, sys.stderr, environ, multithread=False, multiprocess=True )
apache-2.0
chemistryTools/ChemToolsWebService
chemistry/calcore/matrix/koc.py
2
42477
#coding:utf-8 from numpy import matrix #CAS No. nN ATSC8v SpMaxA_G/D Mor16u nROH O-058 P-117 MLOGP2 Molecular Polarizability kocX = matrix([ [0.000 ,2.487 ,1.032 ,0.672 ,0.000 ,0.000 ,0.000 ,38.020 ,309.680 ], [0.000 ,3.696 ,1.064 ,0.276 ,0.000 ,0.000 ,0.000 ,30.809 ,404.140 ], [2.000 ,3.601 ,1.027 ,0.414 ,0.000 ,1.000 ,0.000 ,12.771 ,310.980 ], [0.000 ,0.000 ,1.042 ,0.041 ,1.000 ,1.000 ,0.000 ,0.239 ,157.770 ], [0.000 ,6.106 ,1.053 ,0.502 ,0.000 ,0.000 ,0.000 ,35.045 ,433.920 ], [1.000 ,3.349 ,1.046 ,0.883 ,0.000 ,1.000 ,0.000 ,11.316 ,275.370 ], [1.000 ,0.000 ,1.000 ,0.252 ,0.000 ,1.000 ,0.000 ,1.674 ,123.040 ], [0.000 ,4.547 ,1.001 ,-0.098 ,0.000 ,0.000 ,0.000 ,6.670 ,257.100 ], [2.000 ,0.000 ,1.016 ,-0.036 ,0.000 ,1.000 ,0.000 ,0.010 ,140.890 ], [1.000 ,7.227 ,0.986 ,0.226 ,0.000 ,0.000 ,0.000 ,5.351 ,257.540 ], [0.000 ,6.886 ,1.054 ,0.410 ,0.000 ,0.000 ,0.000 ,34.194 ,398.490 ], [0.000 ,7.412 ,0.937 ,0.617 ,0.000 ,0.000 ,0.000 ,15.651 ,307.030 ], [2.000 ,0.000 ,0.892 ,0.178 ,0.000 ,1.000 ,0.000 ,3.473 ,40.060 ], [0.000 ,0.000 ,0.926 ,-0.106 ,2.000 ,0.000 ,0.000 ,0.285 ,57.720 ], [0.000 ,7.035 ,0.998 ,0.651 ,1.000 ,0.000 ,0.000 ,16.033 ,312.180 ], [0.000 ,5.788 ,1.025 ,0.505 ,0.000 ,0.000 ,0.000 ,31.636 ,387.290 ], [0.000 ,0.000 ,1.099 ,-0.703 ,0.000 ,0.000 ,0.000 ,16.744 ,190.820 ], [0.000 ,0.000 ,1.120 ,0.003 ,0.000 ,0.000 ,0.000 ,13.085 ,168.200 ], [3.000 ,4.170 ,0.913 ,0.107 ,0.000 ,0.000 ,0.000 ,10.330 ,308.410 ], [0.000 ,0.111 ,0.989 ,0.450 ,1.000 ,0.000 ,0.000 ,3.704 ,128.570 ], [1.000 ,0.000 ,0.913 ,0.118 ,0.000 ,1.000 ,0.000 ,0.628 ,44.700 ], [0.000 ,0.000 ,1.082 ,-0.116 ,0.000 ,0.000 ,0.000 ,19.817 ,277.270 ], [4.000 ,0.000 ,0.975 ,0.536 ,0.000 ,0.000 ,0.000 ,0.054 ,66.130 ], [1.000 ,0.000 ,0.997 ,0.342 ,0.000 ,0.000 ,0.000 ,2.268 ,107.220 ], [2.000 ,0.000 ,0.935 ,0.178 ,0.000 ,0.000 ,0.000 ,2.056 ,67.790 ], [0.000 ,0.000 ,1.020 ,0.127 ,0.000 ,1.000 ,1.000 ,1.553 ,136.500 ], [1.000 ,3.330 ,0.995 ,0.171 ,0.000 ,1.000 ,0.000 ,4.939 ,216.920 ], [2.000 ,0.269 ,0.995 ,0.435 ,0.000 ,1.000 ,0.000 ,0.747 ,138.480 ], [0.000 ,0.000 ,0.904 ,0.062 ,1.000 ,0.000 ,0.000 ,0.030 ,36.570 ], [0.000 ,0.000 ,0.915 ,0.126 ,1.000 ,1.000 ,0.000 ,0.149 ,38.550 ], [3.000 ,0.573 ,0.964 ,-0.300 ,1.000 ,1.000 ,0.000 ,0.082 ,215.090 ], [0.000 ,0.000 ,0.838 ,-0.039 ,1.000 ,0.000 ,0.000 ,0.663 ,21.360 ], [0.000 ,0.000 ,1.165 ,0.036 ,0.000 ,0.000 ,0.000 ,3.301 ,61.020 ], [0.000 ,0.000 ,0.929 ,0.147 ,1.000 ,0.000 ,0.000 ,0.120 ,51.640 ], [0.000 ,0.000 ,0.948 ,0.263 ,1.000 ,0.000 ,0.000 ,0.640 ,66.970 ], [0.000 ,0.000 ,0.962 ,0.383 ,1.000 ,0.000 ,0.000 ,1.463 ,82.400 ], [0.000 ,0.000 ,1.086 ,0.048 ,0.000 ,0.000 ,0.000 ,4.957 ,79.900 ], [0.000 ,0.000 ,1.082 ,-0.111 ,0.000 ,0.000 ,0.000 ,19.817 ,277.270 ], [0.000 ,5.254 ,0.983 ,-0.038 ,0.000 ,0.000 ,0.000 ,18.218 ,323.690 ], [0.000 ,2.483 ,1.029 ,0.668 ,0.000 ,0.000 ,0.000 ,34.268 ,302.150 ], [0.000 ,0.000 ,0.953 ,-0.021 ,0.000 ,0.000 ,0.000 ,1.243 ,38.300 ], [0.000 ,0.000 ,1.046 ,-0.037 ,0.000 ,0.000 ,0.000 ,1.860 ,43.420 ], [0.000 ,0.000 ,1.252 ,-0.168 ,0.000 ,0.000 ,0.000 ,5.850 ,94.410 ], [0.000 ,0.000 ,1.195 ,-0.001 ,0.000 ,0.000 ,0.000 ,4.106 ,71.930 ], [0.000 ,0.000 ,1.021 ,0.050 ,0.000 ,0.000 ,0.000 ,3.301 ,61.900 ], [0.000 ,0.000 ,1.028 ,-0.109 ,1.000 ,0.000 ,0.000 ,1.109 ,82.780 ], [0.000 ,0.000 ,1.028 ,-0.058 ,1.000 ,1.000 ,0.000 ,0.991 ,87.640 ], [0.000 ,0.000 ,1.116 ,0.043 ,1.000 ,1.000 ,0.000 ,0.991 ,87.590 ], [0.000 ,0.000 ,1.132 ,-0.634 ,0.000 ,0.000 ,0.000 ,26.252 ,269.840 ], [0.000 ,11.925 ,1.073 ,0.723 ,0.000 ,1.000 ,1.000 ,11.035 ,327.550 ], [0.000 ,0.000 ,0.973 ,0.258 ,0.000 ,1.000 ,0.000 ,3.789 ,146.680 ], [0.000 ,0.000 ,1.084 ,0.002 ,0.000 ,0.000 ,0.000 ,4.957 ,77.490 ], [0.000 ,0.000 ,1.125 ,0.069 ,0.000 ,0.000 ,0.000 ,4.332 ,76.850 ], [1.000 ,0.000 ,0.941 ,0.051 ,0.000 ,1.000 ,0.000 ,0.145 ,62.030 ], [0.000 ,5.192 ,1.007 ,-0.014 ,0.000 ,0.000 ,0.000 ,27.612 ,373.270 ], [0.000 ,3.587 ,0.972 ,0.511 ,0.000 ,0.000 ,0.000 ,10.928 ,248.130 ], [0.000 ,8.719 ,0.971 ,0.539 ,1.000 ,2.000 ,0.000 ,10.264 ,326.280 ], [1.000 ,0.000 ,1.154 ,-0.028 ,0.000 ,0.000 ,0.000 ,18.405 ,217.780 ], [0.000 ,0.000 ,1.055 ,0.387 ,0.000 ,0.000 ,0.000 ,15.670 ,196.350 ], [0.000 ,11.707 ,0.999 ,0.103 ,0.000 ,1.000 ,0.000 ,3.747 ,400.460 ], [0.000 ,4.343 ,0.975 ,0.951 ,0.000 ,2.000 ,0.000 ,6.641 ,204.320 ], [0.000 ,10.655 ,0.955 ,1.617 ,0.000 ,2.000 ,0.000 ,13.090 ,267.390 ], [0.000 ,5.119 ,0.930 ,0.351 ,0.000 ,3.000 ,0.000 ,5.209 ,241.470 ], [0.000 ,11.435 ,0.939 ,1.448 ,0.000 ,2.000 ,0.000 ,20.781 ,327.630 ], [0.000 ,1.002 ,1.045 ,0.478 ,0.000 ,0.000 ,0.000 ,18.762 ,245.370 ], [1.000 ,0.837 ,1.044 ,0.315 ,0.000 ,0.000 ,0.000 ,9.309 ,236.170 ], [0.000 ,0.266 ,1.056 ,0.255 ,1.000 ,1.000 ,0.000 ,12.121 ,182.800 ], [0.000 ,0.000 ,1.060 ,0.269 ,0.000 ,2.000 ,0.000 ,4.804 ,134.240 ], [0.000 ,9.566 ,0.966 ,0.578 ,0.000 ,2.000 ,0.000 ,16.739 ,312.120 ], [2.000 ,2.529 ,0.989 ,0.772 ,0.000 ,1.000 ,0.000 ,10.385 ,220.570 ], [3.000 ,3.224 ,1.027 ,-0.200 ,0.000 ,1.000 ,0.000 ,0.978 ,305.010 ], [0.000 ,0.520 ,1.058 ,0.781 ,0.000 ,0.000 ,0.000 ,17.204 ,213.540 ], [1.000 ,1.253 ,0.986 ,0.509 ,0.000 ,1.000 ,0.000 ,5.684 ,213.840 ], [0.000 ,0.665 ,1.002 ,0.356 ,1.000 ,1.000 ,0.000 ,7.785 ,206.520 ], [0.000 ,0.000 ,1.105 ,0.075 ,0.000 ,0.000 ,0.000 ,16.507 ,140.890 ], [0.000 ,0.000 ,1.149 ,-0.010 ,0.000 ,0.000 ,0.000 ,15.278 ,187.210 ], [0.000 ,0.000 ,1.090 ,0.119 ,0.000 ,0.000 ,0.000 ,10.982 ,149.520 ], [1.000 ,0.000 ,1.020 ,0.444 ,0.000 ,0.000 ,0.000 ,6.065 ,146.580 ], [0.000 ,0.000 ,1.008 ,0.362 ,2.000 ,2.000 ,0.000 ,1.915 ,140.660 ], [0.000 ,0.000 ,0.978 ,0.046 ,0.000 ,0.000 ,0.000 ,1.553 ,119.740 ], [0.000 ,0.000 ,1.023 ,0.414 ,0.000 ,0.000 ,0.000 ,13.524 ,183.830 ], [2.000 ,4.861 ,0.965 ,-0.327 ,0.000 ,1.000 ,0.000 ,11.688 ,356.220 ], [0.000 ,2.396 ,0.986 ,0.522 ,1.000 ,0.000 ,0.000 ,9.802 ,207.310 ], [0.000 ,0.000 ,0.969 ,-0.158 ,0.000 ,0.000 ,0.000 ,2.496 ,136.980 ], [0.000 ,0.000 ,1.036 ,0.368 ,0.000 ,0.000 ,0.000 ,11.465 ,166.120 ], [1.000 ,0.000 ,1.034 ,0.225 ,0.000 ,0.000 ,0.000 ,4.267 ,157.310 ], [0.000 ,0.447 ,1.031 ,0.208 ,0.000 ,0.000 ,0.000 ,13.524 ,185.240 ], [3.000 ,9.268 ,0.934 ,0.551 ,0.000 ,0.000 ,0.000 ,5.994 ,284.450 ], [2.000 ,2.721 ,1.036 ,-0.124 ,0.000 ,0.000 ,0.000 ,12.856 ,274.710 ], [0.000 ,3.776 ,1.063 ,0.129 ,0.000 ,0.000 ,0.000 ,26.674 ,374.120 ], [2.000 ,1.715 ,1.048 ,0.203 ,0.000 ,0.000 ,0.000 ,3.604 ,246.320 ], [2.000 ,2.487 ,1.015 ,-0.016 ,0.000 ,0.000 ,0.000 ,6.321 ,238.790 ], [0.000 ,2.220 ,1.016 ,0.742 ,0.000 ,1.000 ,0.000 ,12.835 ,250.010 ], [0.000 ,0.428 ,1.001 ,0.300 ,0.000 ,1.000 ,0.000 ,4.135 ,132.480 ], [0.000 ,1.437 ,0.970 ,0.171 ,1.000 ,1.000 ,0.000 ,5.654 ,186.570 ], [1.000 ,0.444 ,0.943 ,-0.427 ,0.000 ,1.000 ,0.000 ,3.123 ,156.820 ], [0.000 ,0.577 ,1.025 ,-0.159 ,1.000 ,1.000 ,0.000 ,8.490 ,189.810 ], [0.000 ,1.571 ,0.992 ,0.425 ,0.000 ,1.000 ,0.000 ,5.526 ,148.020 ], [0.000 ,3.938 ,1.014 ,0.543 ,0.000 ,1.000 ,0.000 ,12.378 ,216.450 ], [0.000 ,0.882 ,1.028 ,0.311 ,0.000 ,0.000 ,0.000 ,4.512 ,162.850 ], [0.000 ,0.980 ,0.998 ,-0.063 ,1.000 ,1.000 ,0.000 ,4.353 ,169.780 ], [0.000 ,0.577 ,1.023 ,0.036 ,1.000 ,1.000 ,0.000 ,5.543 ,171.290 ], [0.000 ,3.462 ,0.990 ,0.146 ,1.000 ,1.000 ,0.000 ,7.069 ,201.490 ], [3.000 ,0.000 ,1.036 ,0.610 ,0.000 ,0.000 ,0.000 ,1.113 ,121.520 ], [0.000 ,0.000 ,1.076 ,0.409 ,0.000 ,0.000 ,0.000 ,8.130 ,154.180 ], [0.000 ,0.000 ,1.071 ,0.124 ,0.000 ,0.000 ,0.000 ,12.094 ,123.050 ], [1.000 ,0.000 ,0.986 ,0.249 ,0.000 ,0.000 ,0.000 ,3.456 ,123.430 ], [0.000 ,0.000 ,1.029 ,0.201 ,0.000 ,0.000 ,0.000 ,4.525 ,112.930 ], [0.000 ,0.000 ,1.062 ,0.119 ,0.000 ,0.000 ,0.000 ,7.446 ,130.290 ], [0.000 ,0.000 ,0.989 ,0.349 ,0.000 ,0.000 ,0.000 ,12.690 ,157.830 ], [0.000 ,0.000 ,1.137 ,-0.095 ,0.000 ,0.000 ,0.000 ,21.476 ,160.770 ], [0.000 ,0.000 ,1.066 ,-0.111 ,0.000 ,0.000 ,0.000 ,8.746 ,120.490 ], [0.000 ,0.000 ,1.043 ,-0.198 ,0.000 ,0.000 ,0.000 ,6.781 ,91.240 ], [1.000 ,0.000 ,1.017 ,0.358 ,0.000 ,0.000 ,0.000 ,6.870 ,124.050 ], [0.000 ,0.000 ,0.999 ,0.336 ,0.000 ,1.000 ,0.000 ,4.420 ,128.770 ], [1.000 ,0.000 ,1.013 ,0.271 ,0.000 ,0.000 ,0.000 ,3.562 ,117.700 ], [2.000 ,0.000 ,1.008 ,0.204 ,0.000 ,0.000 ,0.000 ,1.816 ,137.700 ], [2.000 ,0.000 ,1.027 ,0.033 ,1.000 ,1.000 ,0.000 ,2.963 ,168.170 ], [1.000 ,0.000 ,1.068 ,0.038 ,0.000 ,0.000 ,0.000 ,9.673 ,156.540 ], [1.000 ,1.474 ,0.957 ,0.400 ,0.000 ,1.000 ,0.000 ,5.223 ,175.570 ], [0.000 ,0.166 ,1.009 ,0.237 ,1.000 ,1.000 ,0.000 ,1.282 ,127.010 ], [2.000 ,0.000 ,1.007 ,0.166 ,0.000 ,0.000 ,0.000 ,1.816 ,155.980 ], [1.000 ,0.000 ,1.010 ,0.191 ,0.000 ,0.000 ,0.000 ,1.816 ,132.330 ], [0.000 ,0.000 ,1.003 ,0.446 ,0.000 ,0.000 ,0.000 ,8.130 ,133.390 ], [0.000 ,0.000 ,1.000 ,0.134 ,1.000 ,0.000 ,0.000 ,2.532 ,113.450 ], [2.000 ,0.000 ,1.023 ,0.135 ,0.000 ,0.000 ,0.000 ,0.020 ,105.270 ], [0.000 ,0.000 ,0.979 ,0.088 ,0.000 ,0.000 ,0.000 ,3.456 ,113.500 ], [4.000 ,1.020 ,1.044 ,0.795 ,0.000 ,0.000 ,0.000 ,14.873 ,248.990 ], [1.000 ,4.716 ,0.953 ,0.275 ,0.000 ,1.000 ,0.000 ,5.654 ,196.930 ], [2.000 ,2.434 ,0.978 ,0.154 ,0.000 ,1.000 ,0.000 ,2.295 ,171.480 ], [0.000 ,2.915 ,1.002 ,0.249 ,0.000 ,0.000 ,0.000 ,16.545 ,220.570 ], [2.000 ,7.165 ,0.953 ,0.107 ,0.000 ,0.000 ,0.000 ,14.427 ,311.130 ], [0.000 ,2.491 ,0.987 ,0.649 ,0.000 ,0.000 ,0.000 ,11.526 ,193.230 ], [0.000 ,2.827 ,0.922 ,0.380 ,0.000 ,1.000 ,0.000 ,5.694 ,160.880 ], [1.000 ,2.856 ,0.955 ,0.575 ,0.000 ,1.000 ,0.000 ,2.295 ,162.890 ], [2.000 ,0.939 ,1.033 ,0.025 ,0.000 ,0.000 ,0.000 ,5.751 ,234.630 ], [0.000 ,2.197 ,0.953 ,0.186 ,0.000 ,1.000 ,0.000 ,5.694 ,162.020 ], [0.000 ,0.299 ,0.988 ,0.555 ,0.000 ,0.000 ,0.000 ,10.620 ,139.960 ], [0.000 ,0.143 ,1.002 ,0.351 ,1.000 ,1.000 ,0.000 ,3.117 ,128.560 ], [1.000 ,0.389 ,0.973 ,0.186 ,0.000 ,1.000 ,0.000 ,2.649 ,145.320 ], [1.000 ,0.759 ,0.995 ,0.085 ,0.000 ,1.000 ,0.000 ,5.598 ,176.970 ], [2.000 ,0.168 ,1.006 ,0.235 ,0.000 ,1.000 ,0.000 ,2.441 ,184.830 ], [0.000 ,6.921 ,1.026 ,0.609 ,0.000 ,0.000 ,0.000 ,25.901 ,236.980 ], [0.000 ,1.281 ,0.994 ,0.632 ,0.000 ,0.000 ,0.000 ,12.690 ,153.700 ], [0.000 ,2.543 ,0.974 ,0.423 ,0.000 ,1.000 ,0.000 ,5.651 ,145.220 ], [0.000 ,3.393 ,0.983 ,0.482 ,0.000 ,1.000 ,0.000 ,12.907 ,160.570 ], [1.000 ,0.000 ,1.032 ,0.237 ,0.000 ,0.000 ,0.000 ,5.272 ,136.430 ], [0.000 ,0.000 ,1.001 ,0.154 ,0.000 ,0.000 ,0.000 ,8.653 ,124.340 ], [0.000 ,0.000 ,1.000 ,0.299 ,0.000 ,0.000 ,0.000 ,3.456 ,113.440 ], [0.000 ,0.000 ,1.071 ,0.136 ,0.000 ,0.000 ,0.000 ,12.094 ,123.850 ], [1.000 ,0.000 ,0.999 ,0.193 ,0.000 ,0.000 ,0.000 ,3.456 ,124.070 ], [0.000 ,0.000 ,1.010 ,0.155 ,0.000 ,0.000 ,0.000 ,0.166 ,62.050 ], [0.000 ,0.000 ,1.055 ,0.023 ,0.000 ,0.000 ,0.000 ,4.957 ,86.480 ], [0.000 ,0.000 ,1.001 ,0.250 ,0.000 ,0.000 ,0.000 ,8.653 ,124.330 ], [0.000 ,0.000 ,1.031 ,0.277 ,0.000 ,0.000 ,0.000 ,4.525 ,113.610 ], [1.000 ,0.000 ,0.998 ,0.223 ,0.000 ,0.000 ,0.000 ,3.456 ,124.540 ], [0.000 ,0.000 ,0.954 ,0.022 ,0.000 ,0.000 ,0.000 ,5.170 ,130.290 ], [0.000 ,1.361 ,0.913 ,-0.071 ,0.000 ,0.000 ,0.000 ,0.963 ,141.540 ], [0.000 ,0.680 ,0.949 ,-0.034 ,0.000 ,1.000 ,0.000 ,0.289 ,104.920 ], [0.000 ,0.000 ,1.001 ,0.159 ,0.000 ,0.000 ,0.000 ,4.808 ,131.060 ], [0.000 ,0.000 ,1.105 ,0.230 ,0.000 ,0.000 ,0.000 ,16.507 ,142.920 ], [0.000 ,0.000 ,1.045 ,0.291 ,0.000 ,0.000 ,0.000 ,9.273 ,118.440 ], [0.000 ,0.000 ,1.036 ,0.275 ,0.000 ,0.000 ,0.000 ,8.272 ,106.290 ], [0.000 ,0.000 ,0.999 ,0.323 ,0.000 ,0.000 ,0.000 ,2.268 ,96.600 ], [0.000 ,0.000 ,0.969 ,0.172 ,0.000 ,0.000 ,0.000 ,4.957 ,76.880 ], [1.000 ,0.000 ,0.993 ,0.216 ,0.000 ,0.000 ,0.000 ,0.219 ,81.650 ], [0.000 ,0.161 ,0.974 ,0.522 ,1.000 ,0.000 ,0.000 ,2.519 ,97.780 ], [1.000 ,0.058 ,0.959 ,-0.034 ,2.000 ,0.000 ,0.000 ,0.840 ,83.590 ], [2.000 ,0.847 ,1.007 ,0.519 ,0.000 ,0.000 ,0.000 ,0.111 ,212.890 ], [0.000 ,0.707 ,0.985 ,0.608 ,1.000 ,0.000 ,0.000 ,3.765 ,113.210 ], [0.000 ,0.740 ,0.973 ,0.243 ,1.000 ,0.000 ,0.000 ,0.499 ,102.660 ], [0.000 ,1.336 ,0.990 ,0.142 ,0.000 ,0.000 ,0.000 ,2.148 ,115.850 ], [0.000 ,3.488 ,1.012 ,0.789 ,1.000 ,0.000 ,0.000 ,14.489 ,159.350 ], [0.000 ,5.402 ,1.027 ,0.793 ,1.000 ,0.000 ,0.000 ,19.175 ,190.100 ], [2.000 ,0.340 ,0.989 ,0.209 ,0.000 ,1.000 ,0.000 ,2.148 ,154.030 ], [0.000 ,0.000 ,1.086 ,-0.264 ,2.000 ,2.000 ,0.000 ,6.848 ,254.290 ], [0.000 ,0.000 ,1.101 ,-0.363 ,0.000 ,0.000 ,0.000 ,10.387 ,277.320 ], [0.000 ,8.172 ,0.986 ,0.178 ,0.000 ,0.000 ,0.000 ,4.270 ,277.590 ], [2.000 ,3.337 ,0.987 ,0.072 ,0.000 ,1.000 ,0.000 ,0.801 ,174.430 ], [1.000 ,0.000 ,1.128 ,-0.043 ,0.000 ,0.000 ,0.000 ,15.990 ,195.450 ], [0.000 ,14.970 ,0.887 ,1.408 ,0.000 ,2.000 ,0.000 ,40.175 ,389.120 ], [0.000 ,0.000 ,1.200 ,-0.035 ,0.000 ,0.000 ,0.000 ,27.110 ,199.720 ], [0.000 ,0.000 ,1.167 ,-0.259 ,0.000 ,2.000 ,0.000 ,5.081 ,179.140 ], [2.000 ,4.847 ,1.040 ,0.136 ,0.000 ,0.000 ,0.000 ,13.905 ,354.700 ], [0.000 ,1.635 ,1.052 ,0.327 ,0.000 ,0.000 ,0.000 ,18.762 ,261.540 ], [0.000 ,1.037 ,0.996 ,0.176 ,1.000 ,1.000 ,0.000 ,7.000 ,187.630 ], [1.000 ,0.356 ,0.982 ,0.389 ,0.000 ,1.000 ,0.000 ,3.782 ,160.330 ], [0.000 ,0.000 ,1.104 ,0.039 ,0.000 ,0.000 ,0.000 ,16.507 ,141.880 ], [0.000 ,0.000 ,1.061 ,0.151 ,0.000 ,0.000 ,0.000 ,7.446 ,130.500 ], [0.000 ,6.050 ,0.961 ,0.280 ,0.000 ,2.000 ,0.000 ,0.346 ,271.760 ], [3.000 ,0.000 ,1.016 ,0.126 ,0.000 ,1.000 ,0.000 ,1.730 ,175.460 ], [1.000 ,3.495 ,1.004 ,0.334 ,0.000 ,0.000 ,0.000 ,4.040 ,244.290 ], [5.000 ,2.650 ,0.998 ,0.208 ,0.000 ,0.000 ,0.000 ,5.165 ,181.720 ], [1.000 ,2.413 ,1.000 ,0.407 ,0.000 ,0.000 ,0.000 ,11.526 ,216.550 ], [1.000 ,4.334 ,0.942 ,0.366 ,0.000 ,1.000 ,0.000 ,3.306 ,179.120 ], [2.000 ,0.000 ,1.009 ,0.002 ,0.000 ,2.000 ,0.000 ,0.093 ,90.250 ], [0.000 ,2.151 ,0.973 ,0.464 ,0.000 ,1.000 ,0.000 ,4.244 ,130.350 ], [0.000 ,0.000 ,0.944 ,-0.450 ,0.000 ,0.000 ,0.000 ,0.224 ,68.730 ], [0.000 ,5.270 ,0.956 ,0.249 ,0.000 ,1.000 ,1.000 ,2.658 ,224.680 ], [0.000 ,0.000 ,1.203 ,0.172 ,0.000 ,0.000 ,0.000 ,6.047 ,94.320 ], [0.000 ,0.000 ,1.086 ,-0.118 ,0.000 ,1.000 ,0.000 ,0.991 ,97.280 ], [0.000 ,1.079 ,1.063 ,0.270 ,0.000 ,0.000 ,0.000 ,22.653 ,298.670 ], [0.000 ,1.689 ,0.985 ,0.496 ,0.000 ,2.000 ,0.000 ,4.018 ,173.030 ], [0.000 ,0.587 ,1.062 ,0.615 ,0.000 ,0.000 ,0.000 ,8.174 ,201.840 ], [1.000 ,5.663 ,0.924 ,0.614 ,1.000 ,2.000 ,0.000 ,12.755 ,319.500 ], [1.000 ,1.004 ,1.063 ,-0.016 ,0.000 ,2.000 ,0.000 ,3.317 ,228.920 ], [1.000 ,0.878 ,1.106 ,0.315 ,0.000 ,2.000 ,0.000 ,6.620 ,243.270 ], [1.000 ,0.000 ,1.024 ,0.374 ,0.000 ,0.000 ,0.000 ,6.954 ,185.230 ], [0.000 ,3.885 ,0.970 ,0.366 ,0.000 ,1.000 ,0.000 ,8.676 ,179.230 ], [2.000 ,3.253 ,1.019 ,0.004 ,0.000 ,0.000 ,0.000 ,0.206 ,252.410 ], [5.000 ,4.161 ,0.975 ,0.569 ,0.000 ,0.000 ,0.000 ,8.371 ,216.850 ], [2.000 ,2.650 ,1.004 ,0.173 ,0.000 ,0.000 ,0.000 ,9.304 ,259.410 ], [1.000 ,2.134 ,0.926 ,-0.001 ,0.000 ,2.000 ,1.000 ,0.466 ,185.270 ], [0.000 ,2.533 ,1.004 ,0.749 ,1.000 ,0.000 ,0.000 ,12.271 ,143.980 ], [0.000 ,0.000 ,1.225 ,-1.057 ,0.000 ,1.000 ,0.000 ,24.123 ,317.400 ], [0.000 ,0.000 ,0.989 ,-0.351 ,2.000 ,2.000 ,0.000 ,0.003 ,133.300 ], [0.000 ,0.000 ,1.028 ,0.116 ,1.000 ,1.000 ,0.000 ,0.184 ,133.180 ], [1.000 ,0.000 ,1.105 ,0.485 ,0.000 ,0.000 ,0.000 ,3.129 ,181.390 ], [1.000 ,0.297 ,1.007 ,0.257 ,1.000 ,1.000 ,0.000 ,0.001 ,143.110 ], [2.000 ,2.797 ,0.987 ,-0.140 ,0.000 ,1.000 ,0.000 ,4.353 ,189.090 ], [1.000 ,0.000 ,0.957 ,0.177 ,0.000 ,0.000 ,0.000 ,0.319 ,38.210 ], [0.000 ,3.837 ,1.001 ,0.531 ,1.000 ,0.000 ,0.000 ,0.061 ,268.360 ], [0.000 ,5.617 ,1.073 ,0.301 ,0.000 ,0.000 ,0.000 ,35.045 ,424.340 ], [0.000 ,5.109 ,1.084 ,0.530 ,0.000 ,0.000 ,0.000 ,35.045 ,441.750 ], [1.000 ,6.470 ,1.040 ,0.435 ,0.000 ,0.000 ,0.000 ,20.554 ,386.420 ], [0.000 ,4.527 ,1.078 ,0.568 ,0.000 ,0.000 ,0.000 ,30.809 ,392.900 ], [0.000 ,0.000 ,1.066 ,0.525 ,0.000 ,0.000 ,0.000 ,15.023 ,205.500 ], [1.000 ,4.009 ,1.051 ,0.340 ,0.000 ,0.000 ,0.000 ,15.289 ,335.520 ], [1.000 ,1.673 ,1.051 ,0.241 ,0.000 ,0.000 ,0.000 ,9.309 ,251.780 ], [1.000 ,2.880 ,0.987 ,0.119 ,0.000 ,0.000 ,0.000 ,2.865 ,227.820 ], [0.000 ,4.811 ,0.978 ,0.364 ,0.000 ,0.000 ,0.000 ,2.148 ,242.780 ], [0.000 ,5.876 ,1.007 ,0.072 ,0.000 ,0.000 ,0.000 ,3.237 ,259.650 ], [0.000 ,0.000 ,1.022 ,-0.141 ,0.000 ,1.000 ,1.000 ,5.559 ,194.930 ], [0.000 ,2.306 ,0.975 ,0.142 ,0.000 ,1.000 ,1.000 ,0.018 ,195.900 ], [0.000 ,0.000 ,1.083 ,-0.151 ,0.000 ,0.000 ,0.000 ,28.743 ,278.880 ], [2.000 ,2.688 ,0.986 ,0.064 ,0.000 ,2.000 ,0.000 ,3.233 ,196.560 ], [2.000 ,2.426 ,0.989 ,-0.003 ,0.000 ,2.000 ,0.000 ,2.252 ,180.650 ], [0.000 ,0.000 ,1.091 ,-0.183 ,0.000 ,0.000 ,0.000 ,16.744 ,189.280 ], [2.000 ,1.893 ,0.978 ,0.087 ,0.000 ,1.000 ,0.000 ,3.749 ,169.080 ], [2.000 ,3.097 ,0.999 ,0.028 ,0.000 ,1.000 ,0.000 ,7.000 ,207.700 ], [2.000 ,2.325 ,0.979 ,-0.077 ,0.000 ,1.000 ,0.000 ,3.749 ,168.640 ], [2.000 ,8.708 ,0.975 ,0.197 ,0.000 ,0.000 ,0.000 ,6.369 ,278.930 ], [0.000 ,0.381 ,1.039 ,0.238 ,1.000 ,1.000 ,0.000 ,17.390 ,138.400 ], [1.000 ,0.433 ,0.979 ,0.189 ,0.000 ,1.000 ,0.000 ,4.227 ,144.180 ], [1.000 ,0.519 ,0.986 ,0.308 ,0.000 ,1.000 ,0.000 ,7.093 ,158.170 ], [1.000 ,0.004 ,0.979 ,0.166 ,0.000 ,1.000 ,0.000 ,4.227 ,143.290 ], [0.000 ,0.595 ,1.039 ,0.265 ,1.000 ,1.000 ,0.000 ,21.686 ,154.570 ], [3.000 ,0.000 ,1.017 ,0.389 ,0.000 ,0.000 ,0.000 ,6.300 ,176.710 ], [0.000 ,9.374 ,0.976 ,0.486 ,0.000 ,1.000 ,1.000 ,19.790 ,284.490 ], [3.000 ,5.648 ,0.959 ,-0.485 ,0.000 ,0.000 ,0.000 ,12.240 ,350.970 ], [2.000 ,3.917 ,0.927 ,0.739 ,0.000 ,0.000 ,0.000 ,10.906 ,229.020 ], [0.000 ,9.215 ,0.986 ,0.237 ,1.000 ,1.000 ,0.000 ,15.463 ,298.120 ], [0.000 ,0.000 ,0.982 ,0.351 ,0.000 ,0.000 ,0.000 ,10.620 ,140.770 ], [1.000 ,0.000 ,1.132 ,0.081 ,0.000 ,0.000 ,0.000 ,15.278 ,196.710 ], [2.000 ,0.014 ,1.040 ,-0.108 ,1.000 ,1.000 ,0.000 ,1.101 ,203.790 ], [1.000 ,0.488 ,0.990 ,0.070 ,0.000 ,1.000 ,0.000 ,4.896 ,162.190 ], [2.000 ,0.000 ,0.981 ,-0.069 ,0.000 ,0.000 ,0.000 ,0.039 ,163.380 ], [2.000 ,0.000 ,1.011 ,-0.121 ,0.000 ,0.000 ,0.000 ,3.055 ,178.120 ], [0.000 ,1.581 ,0.919 ,0.149 ,0.000 ,1.000 ,0.000 ,2.981 ,115.080 ], [0.000 ,0.000 ,1.088 ,0.030 ,0.000 ,0.000 ,0.000 ,4.332 ,85.540 ], [0.000 ,0.000 ,1.071 ,0.223 ,0.000 ,0.000 ,0.000 ,12.094 ,124.030 ], [2.000 ,0.461 ,0.995 ,0.268 ,0.000 ,1.000 ,0.000 ,2.441 ,179.210 ], [1.000 ,0.000 ,1.053 ,0.196 ,0.000 ,0.000 ,0.000 ,7.446 ,140.790 ], [2.000 ,6.215 ,0.990 ,0.229 ,0.000 ,1.000 ,0.000 ,11.987 ,254.280 ], [0.000 ,8.567 ,1.022 ,0.565 ,0.000 ,0.000 ,0.000 ,0.772 ,349.490 ], [0.000 ,0.000 ,1.018 ,0.073 ,0.000 ,0.000 ,0.000 ,4.332 ,77.400 ], [0.000 ,0.000 ,1.060 ,0.129 ,0.000 ,0.000 ,0.000 ,7.446 ,130.230 ], [1.000 ,0.000 ,1.048 ,0.179 ,0.000 ,0.000 ,0.000 ,7.169 ,146.980 ], [1.000 ,0.000 ,1.048 ,0.164 ,0.000 ,0.000 ,0.000 ,7.169 ,153.160 ], [2.000 ,2.926 ,0.987 ,0.300 ,0.000 ,1.000 ,0.000 ,4.353 ,189.290 ], [0.000 ,0.000 ,1.062 ,0.320 ,0.000 ,0.000 ,0.000 ,7.446 ,131.660 ], [0.000 ,0.000 ,1.061 ,0.380 ,0.000 ,0.000 ,0.000 ,10.302 ,124.660 ], [0.000 ,0.000 ,1.090 ,-0.013 ,0.000 ,0.000 ,0.000 ,4.332 ,82.360 ], [1.000 ,0.000 ,1.051 ,0.280 ,0.000 ,0.000 ,0.000 ,7.446 ,140.240 ], [0.000 ,0.000 ,1.169 ,-0.042 ,0.000 ,0.000 ,0.000 ,24.263 ,179.650 ], [1.000 ,0.000 ,1.026 ,0.243 ,0.000 ,1.000 ,0.000 ,3.593 ,140.690 ], [2.000 ,0.000 ,1.000 ,0.320 ,0.000 ,1.000 ,0.000 ,1.510 ,148.880 ], [2.000 ,0.000 ,1.018 ,0.185 ,1.000 ,1.000 ,0.000 ,2.963 ,167.920 ], [1.000 ,2.061 ,1.049 ,0.205 ,0.000 ,0.000 ,0.000 ,12.835 ,291.410 ], [1.000 ,0.389 ,0.975 ,0.273 ,0.000 ,1.000 ,0.000 ,2.649 ,139.580 ], [1.000 ,0.514 ,1.018 ,0.212 ,0.000 ,1.000 ,0.000 ,5.598 ,172.410 ], [3.000 ,0.000 ,1.016 ,0.020 ,0.000 ,0.000 ,0.000 ,1.945 ,163.320 ], [1.000 ,0.777 ,1.000 ,0.120 ,0.000 ,1.000 ,0.000 ,2.649 ,141.800 ], [2.000 ,0.058 ,1.009 ,0.117 ,0.000 ,1.000 ,0.000 ,1.510 ,152.140 ], [1.000 ,0.000 ,1.054 ,0.280 ,0.000 ,0.000 ,0.000 ,7.446 ,142.550 ], [0.000 ,0.000 ,1.080 ,0.405 ,2.000 ,2.000 ,0.000 ,9.691 ,217.340 ], [0.000 ,0.000 ,1.137 ,-0.009 ,0.000 ,0.000 ,0.000 ,21.476 ,159.640 ], [1.000 ,0.000 ,1.107 ,0.079 ,0.000 ,0.000 ,0.000 ,13.085 ,177.730 ], [0.000 ,0.000 ,1.137 ,0.042 ,0.000 ,0.000 ,0.000 ,21.476 ,160.930 ], [0.000 ,3.669 ,1.021 ,0.458 ,0.000 ,1.000 ,0.000 ,5.748 ,272.380 ], [0.000 ,0.000 ,1.074 ,-0.052 ,0.000 ,2.000 ,0.000 ,10.382 ,143.250 ], [2.000 ,0.301 ,0.987 ,0.292 ,0.000 ,1.000 ,0.000 ,1.693 ,138.330 ], [2.000 ,0.003 ,0.977 ,0.253 ,0.000 ,1.000 ,0.000 ,1.693 ,134.500 ], [1.000 ,0.000 ,0.929 ,-0.172 ,0.000 ,1.000 ,0.000 ,0.936 ,105.480 ], [0.000 ,0.000 ,0.992 ,0.248 ,0.000 ,0.000 ,0.000 ,6.299 ,147.590 ], [1.000 ,1.912 ,1.025 ,0.136 ,0.000 ,1.000 ,0.000 ,9.459 ,195.020 ], [1.000 ,3.349 ,1.040 ,0.422 ,0.000 ,2.000 ,0.000 ,1.215 ,297.390 ], [1.000 ,13.599 ,0.913 ,0.695 ,0.000 ,2.000 ,0.000 ,2.725 ,370.480 ], [1.000 ,4.002 ,0.964 ,0.537 ,0.000 ,1.000 ,0.000 ,3.886 ,185.120 ], [0.000 ,1.541 ,1.036 ,0.376 ,0.000 ,0.000 ,0.000 ,21.047 ,279.930 ], [0.000 ,5.712 ,1.017 ,0.339 ,0.000 ,0.000 ,0.000 ,10.106 ,327.390 ], [5.000 ,6.566 ,0.965 ,0.656 ,0.000 ,0.000 ,0.000 ,6.707 ,230.610 ], [1.000 ,0.000 ,1.124 ,0.013 ,0.000 ,0.000 ,0.000 ,15.990 ,195.610 ], [5.000 ,7.796 ,0.954 ,0.455 ,0.000 ,0.000 ,0.000 ,8.371 ,247.020 ], [2.000 ,6.470 ,0.997 ,0.608 ,0.000 ,1.000 ,0.000 ,5.156 ,231.290 ], [0.000 ,0.000 ,1.090 ,0.133 ,0.000 ,0.000 ,0.000 ,10.982 ,149.300 ], [0.000 ,1.628 ,1.016 ,0.467 ,0.000 ,0.000 ,0.000 ,15.670 ,202.060 ], [0.000 ,4.572 ,1.017 ,0.113 ,0.000 ,0.000 ,0.000 ,7.914 ,261.280 ], [0.000 ,3.303 ,1.026 ,-0.050 ,0.000 ,0.000 ,0.000 ,6.794 ,295.800 ], [1.000 ,7.628 ,0.959 ,1.005 ,0.000 ,1.000 ,0.000 ,11.191 ,265.230 ], [0.000 ,0.000 ,1.101 ,-0.369 ,0.000 ,0.000 ,0.000 ,10.387 ,277.270 ], [2.000 ,1.395 ,0.960 ,0.238 ,0.000 ,1.000 ,0.000 ,1.435 ,152.540 ], [5.000 ,5.364 ,0.984 ,0.188 ,0.000 ,0.000 ,0.000 ,5.165 ,213.210 ], [0.000 ,0.000 ,1.132 ,-0.448 ,0.000 ,0.000 ,0.000 ,17.759 ,268.610 ], [1.000 ,5.541 ,0.978 ,0.478 ,0.000 ,1.000 ,0.000 ,5.174 ,200.930 ], [0.000 ,0.407 ,1.007 ,0.366 ,0.000 ,0.000 ,0.000 ,15.670 ,199.890 ], [1.000 ,2.968 ,0.939 ,0.386 ,0.000 ,1.000 ,0.000 ,5.054 ,172.410 ], [0.000 ,5.913 ,1.032 ,-0.493 ,0.000 ,2.000 ,0.000 ,3.524 ,316.570 ], [0.000 ,5.952 ,1.022 ,-0.376 ,0.000 ,2.000 ,0.000 ,3.726 ,319.540 ], [1.000 ,0.000 ,1.087 ,0.086 ,0.000 ,0.000 ,0.000 ,8.735 ,148.620 ], [0.000 ,1.885 ,1.033 ,0.123 ,1.000 ,0.000 ,0.000 ,12.750 ,285.200 ], [0.000 ,0.000 ,1.028 ,0.194 ,0.000 ,0.000 ,0.000 ,6.299 ,142.400 ], [1.000 ,4.429 ,0.954 ,0.824 ,0.000 ,1.000 ,0.000 ,4.451 ,193.590 ], [3.000 ,5.303 ,0.947 ,0.208 ,0.000 ,0.000 ,0.000 ,17.698 ,283.030 ], [2.000 ,0.815 ,0.928 ,0.190 ,1.000 ,2.000 ,0.000 ,0.003 ,127.410 ], [5.000 ,6.351 ,0.954 ,0.570 ,0.000 ,0.000 ,0.000 ,4.946 ,202.720 ], [2.000 ,0.000 ,0.953 ,0.227 ,0.000 ,0.000 ,0.000 ,0.535 ,89.340 ], [2.000 ,3.463 ,0.938 ,-0.036 ,0.000 ,1.000 ,0.000 ,0.000 ,181.700 ], [2.000 ,3.580 ,0.975 ,0.025 ,0.000 ,3.000 ,0.000 ,0.003 ,180.510 ], [3.000 ,1.856 ,1.024 ,0.257 ,0.000 ,1.000 ,0.000 ,3.809 ,217.210 ], [1.000 ,0.000 ,1.065 ,0.218 ,1.000 ,1.000 ,0.000 ,1.800 ,145.760 ], [2.000 ,2.647 ,0.988 ,0.542 ,0.000 ,1.000 ,1.000 ,4.056 ,177.190 ], [0.000 ,0.721 ,1.042 ,0.478 ,1.000 ,2.000 ,0.000 ,15.048 ,171.920 ], [1.000 ,2.048 ,1.009 ,0.086 ,0.000 ,0.000 ,0.000 ,16.321 ,263.360 ], [0.000 ,0.735 ,1.049 ,-0.047 ,0.000 ,2.000 ,0.000 ,13.311 ,247.690 ], [3.000 ,4.849 ,0.941 ,0.046 ,0.000 ,0.000 ,0.000 ,17.698 ,282.330 ], [2.000 ,0.000 ,1.153 ,-0.096 ,0.000 ,0.000 ,0.000 ,9.814 ,218.190 ], [5.000 ,5.414 ,0.949 ,0.744 ,0.000 ,0.000 ,0.000 ,10.142 ,233.520 ], [5.000 ,4.450 ,0.957 ,0.554 ,0.000 ,0.000 ,0.000 ,8.371 ,215.930 ], [0.000 ,0.000 ,1.010 ,0.003 ,1.000 ,1.000 ,0.000 ,6.877 ,173.010 ], [1.000 ,0.000 ,1.055 ,0.207 ,0.000 ,0.000 ,0.000 ,8.735 ,190.340 ], [1.000 ,1.163 ,0.961 ,0.200 ,0.000 ,1.000 ,0.000 ,7.956 ,199.710 ], [1.000 ,1.609 ,1.008 ,0.185 ,0.000 ,1.000 ,0.000 ,5.543 ,183.790 ], [1.000 ,0.000 ,1.101 ,0.381 ,0.000 ,0.000 ,0.000 ,7.232 ,169.820 ], [1.000 ,3.923 ,0.995 ,0.178 ,0.000 ,1.000 ,0.000 ,6.620 ,206.100 ], [2.000 ,0.510 ,1.006 ,0.203 ,0.000 ,1.000 ,0.000 ,2.645 ,170.860 ], [2.000 ,5.333 ,0.975 ,0.051 ,0.000 ,1.000 ,0.000 ,10.110 ,290.480 ], [2.000 ,7.390 ,0.970 ,0.615 ,0.000 ,1.000 ,0.000 ,6.422 ,245.350 ], [1.000 ,7.816 ,0.924 ,0.588 ,0.000 ,1.000 ,0.000 ,6.585 ,219.340 ], [1.000 ,3.680 ,0.948 ,0.057 ,0.000 ,1.000 ,0.000 ,5.654 ,221.430 ], [0.000 ,1.033 ,1.033 ,0.531 ,0.000 ,0.000 ,0.000 ,19.499 ,208.790 ], [0.000 ,1.011 ,1.034 ,0.298 ,0.000 ,0.000 ,0.000 ,19.499 ,213.400 ], [3.000 ,2.030 ,0.986 ,0.291 ,0.000 ,0.000 ,0.000 ,11.583 ,240.220 ], [1.000 ,8.849 ,0.998 ,0.387 ,0.000 ,0.000 ,0.000 ,11.494 ,325.020 ], [0.000 ,4.275 ,0.991 ,0.388 ,0.000 ,1.000 ,0.000 ,11.059 ,236.470 ], [1.000 ,1.545 ,0.992 ,0.354 ,0.000 ,1.000 ,0.000 ,3.179 ,165.260 ], [1.000 ,0.510 ,1.010 ,0.032 ,0.000 ,1.000 ,0.000 ,7.751 ,181.890 ], [2.000 ,6.074 ,0.999 ,0.331 ,0.000 ,2.000 ,0.000 ,4.960 ,223.430 ], [1.000 ,3.018 ,0.951 ,0.180 ,0.000 ,1.000 ,0.000 ,2.494 ,181.320 ], [0.000 ,0.000 ,1.179 ,-0.125 ,0.000 ,0.000 ,0.000 ,31.741 ,339.040 ], [0.000 ,0.000 ,1.191 ,0.245 ,0.000 ,0.000 ,0.000 ,42.931 ,331.170 ], [1.000 ,5.697 ,0.953 ,0.026 ,0.000 ,1.000 ,0.000 ,9.069 ,253.610 ], [1.000 ,5.913 ,0.921 ,0.304 ,0.000 ,1.000 ,0.000 ,13.133 ,236.790 ], [1.000 ,6.405 ,0.997 ,0.731 ,0.000 ,1.000 ,0.000 ,3.032 ,329.800 ], [2.000 ,0.338 ,1.011 ,0.219 ,0.000 ,1.000 ,0.000 ,4.206 ,173.720 ], [0.000 ,0.000 ,1.228 ,-0.925 ,0.000 ,0.000 ,0.000 ,42.449 ,350.500 ], [1.000 ,3.771 ,1.073 ,0.005 ,0.000 ,2.000 ,0.000 ,5.553 ,264.560 ], [2.000 ,1.024 ,1.095 ,0.505 ,0.000 ,1.000 ,0.000 ,0.199 ,267.410 ], [0.000 ,6.505 ,1.049 ,0.479 ,0.000 ,2.000 ,0.000 ,0.511 ,263.890 ], [0.000 ,0.000 ,1.064 ,-0.140 ,0.000 ,0.000 ,0.000 ,9.168 ,192.940 ], [0.000 ,9.792 ,1.000 ,0.528 ,0.000 ,1.000 ,0.000 ,2.962 ,301.670 ], [1.000 ,1.340 ,0.974 ,0.423 ,0.000 ,1.000 ,0.000 ,1.435 ,147.590 ], [1.000 ,3.580 ,0.941 ,0.126 ,0.000 ,1.000 ,0.000 ,5.716 ,207.340 ], [1.000 ,2.950 ,0.976 ,0.099 ,0.000 ,1.000 ,0.000 ,3.306 ,178.650 ], [0.000 ,0.000 ,1.036 ,0.023 ,0.000 ,0.000 ,0.000 ,7.488 ,172.380 ], [0.000 ,0.796 ,0.999 ,-0.057 ,0.000 ,0.000 ,0.000 ,7.488 ,172.260 ], [1.000 ,7.214 ,0.972 ,0.351 ,0.000 ,0.000 ,0.000 ,10.912 ,279.400 ], [2.000 ,0.359 ,1.007 ,0.261 ,0.000 ,1.000 ,0.000 ,2.645 ,169.800 ], [2.000 ,3.639 ,0.983 ,0.035 ,0.000 ,1.000 ,0.000 ,5.007 ,210.290 ], [0.000 ,26.034 ,0.893 ,-0.396 ,1.000 ,3.000 ,0.000 ,3.363 ,438.640 ], [2.000 ,2.675 ,0.968 ,0.709 ,0.000 ,3.000 ,0.000 ,0.067 ,198.230 ], [0.000 ,8.783 ,0.955 ,0.230 ,0.000 ,0.000 ,0.000 ,11.048 ,429.500 ], [0.000 ,2.741 ,1.027 ,0.699 ,0.000 ,0.000 ,0.000 ,34.268 ,300.020 ], [1.000 ,6.401 ,0.954 ,0.133 ,0.000 ,1.000 ,0.000 ,16.456 ,301.790 ], [1.000 ,0.000 ,1.105 ,0.037 ,0.000 ,0.000 ,0.000 ,13.085 ,177.630 ], [0.000 ,2.331 ,1.017 ,0.090 ,1.000 ,0.000 ,0.000 ,9.802 ,219.230 ], [1.000 ,4.442 ,0.919 ,0.630 ,0.000 ,1.000 ,0.000 ,5.716 ,206.950 ], [1.000 ,0.631 ,1.037 ,0.090 ,1.000 ,2.000 ,0.000 ,2.555 ,208.160 ], [1.000 ,1.709 ,0.960 ,0.041 ,0.000 ,1.000 ,0.000 ,3.179 ,157.870 ], [5.000 ,9.392 ,0.963 ,0.879 ,0.000 ,0.000 ,0.000 ,10.142 ,264.950 ], [1.000 ,0.000 ,1.037 ,0.179 ,1.000 ,1.000 ,0.000 ,0.548 ,126.220 ], [0.000 ,0.151 ,1.016 ,0.389 ,1.000 ,0.000 ,0.000 ,7.078 ,189.430 ], [0.000 ,0.000 ,1.120 ,0.071 ,0.000 ,0.000 ,0.000 ,13.085 ,167.200 ], [3.000 ,4.381 ,0.954 ,0.827 ,0.000 ,0.000 ,0.000 ,6.182 ,215.350 ], [1.000 ,5.255 ,1.005 ,0.250 ,0.000 ,3.000 ,0.000 ,0.315 ,253.670 ], [1.000 ,0.889 ,1.009 ,-0.003 ,0.000 ,0.000 ,0.000 ,3.415 ,147.160 ], [0.000 ,2.921 ,1.043 ,-0.076 ,0.000 ,0.000 ,0.000 ,29.732 ,314.670 ], [1.000 ,3.331 ,1.035 ,0.087 ,0.000 ,0.000 ,0.000 ,7.458 ,244.440 ], [2.000 ,3.237 ,0.978 ,0.355 ,0.000 ,2.000 ,0.000 ,2.725 ,186.280 ], [5.000 ,4.151 ,0.959 ,0.466 ,0.000 ,0.000 ,0.000 ,8.371 ,215.390 ], [0.000 ,16.667 ,0.870 ,1.510 ,0.000 ,2.000 ,0.000 ,29.446 ,391.560 ], [1.000 ,0.000 ,1.106 ,-0.072 ,0.000 ,1.000 ,0.000 ,4.536 ,146.760 ], [0.000 ,5.766 ,1.026 ,-0.332 ,1.000 ,2.000 ,0.000 ,1.250 ,322.650 ], [1.000 ,0.000 ,1.024 ,0.159 ,0.000 ,0.000 ,0.000 ,6.870 ,153.630 ], [1.000 ,3.428 ,0.931 ,-0.026 ,0.000 ,1.000 ,0.000 ,0.634 ,194.570 ], [0.000 ,0.783 ,1.061 ,0.352 ,0.000 ,0.000 ,0.000 ,29.937 ,247.320 ], [1.000 ,0.000 ,1.060 ,0.391 ,0.000 ,0.000 ,0.000 ,10.112 ,194.780 ], [2.000 ,2.704 ,0.980 ,0.203 ,0.000 ,1.000 ,0.000 ,3.306 ,189.270 ], [2.000 ,3.062 ,0.972 ,-0.122 ,0.000 ,1.000 ,0.000 ,1.635 ,194.790 ], [0.000 ,6.958 ,1.024 ,-0.345 ,0.000 ,2.000 ,0.000 ,4.040 ,322.180 ], [1.000 ,1.800 ,1.034 ,0.023 ,0.000 ,1.000 ,0.000 ,6.877 ,187.540 ], [5.000 ,7.811 ,0.961 ,0.720 ,0.000 ,0.000 ,0.000 ,8.371 ,248.510 ], [0.000 ,1.199 ,0.950 ,-0.379 ,0.000 ,2.000 ,1.000 ,0.570 ,165.200 ], [0.000 ,0.000 ,1.066 ,-0.035 ,0.000 ,0.000 ,0.000 ,31.513 ,286.600 ], [5.000 ,5.969 ,0.998 ,0.871 ,0.000 ,0.000 ,0.000 ,2.184 ,535.750 ], [0.000 ,0.000 ,1.030 ,-0.168 ,0.000 ,0.000 ,0.000 ,4.332 ,75.650 ], [1.000 ,0.000 ,1.025 ,0.127 ,0.000 ,1.000 ,1.000 ,1.493 ,106.100 ], [0.000 ,6.250 ,1.016 ,0.024 ,0.000 ,0.000 ,0.000 ,1.640 ,290.360 ], [2.000 ,0.167 ,1.021 ,0.266 ,0.000 ,0.000 ,0.000 ,0.111 ,266.900 ], [2.000 ,3.413 ,0.961 ,0.302 ,1.000 ,0.000 ,0.000 ,3.291 ,258.550 ], [0.000 ,1.158 ,1.044 ,0.706 ,0.000 ,0.000 ,0.000 ,24.476 ,222.640 ], [2.000 ,0.376 ,0.993 ,0.320 ,0.000 ,1.000 ,0.000 ,3.749 ,153.830 ], [2.000 ,3.556 ,1.012 ,0.503 ,0.000 ,1.000 ,0.000 ,2.034 ,185.750 ], [2.000 ,5.685 ,1.009 ,0.117 ,0.000 ,1.000 ,0.000 ,3.995 ,214.660 ], [0.000 ,6.645 ,0.942 ,0.173 ,0.000 ,1.000 ,1.000 ,4.244 ,227.110 ], [0.000 ,6.818 ,0.940 ,0.692 ,0.000 ,1.000 ,1.000 ,5.053 ,243.410 ], [2.000 ,3.853 ,0.994 ,-0.052 ,0.000 ,1.000 ,0.000 ,7.794 ,227.950 ], [2.000 ,6.114 ,0.984 ,0.793 ,0.000 ,2.000 ,0.000 ,4.354 ,304.510 ], [0.000 ,8.274 ,0.982 ,0.581 ,0.000 ,2.000 ,0.000 ,3.665 ,281.290 ], [1.000 ,8.602 ,0.920 ,0.215 ,0.000 ,1.000 ,0.000 ,9.613 ,297.620 ], [2.000 ,3.122 ,0.990 ,0.253 ,0.000 ,1.000 ,0.000 ,5.654 ,206.770 ], [0.000 ,1.423 ,1.065 ,0.593 ,0.000 ,0.000 ,0.000 ,35.867 ,259.610 ], [1.000 ,7.759 ,0.916 ,-0.082 ,0.000 ,1.000 ,0.000 ,10.127 ,256.520 ], [2.000 ,2.070 ,0.982 ,0.206 ,0.000 ,1.000 ,0.000 ,0.003 ,140.560 ], [4.000 ,7.713 ,0.934 ,0.805 ,0.000 ,2.000 ,0.000 ,2.760 ,283.580 ], [0.000 ,13.524 ,0.948 ,0.688 ,0.000 ,2.000 ,0.000 ,7.860 ,310.570 ], [3.000 ,2.949 ,1.030 ,0.493 ,0.000 ,1.000 ,0.000 ,3.016 ,233.560 ], [4.000 ,6.784 ,0.956 ,0.721 ,0.000 ,2.000 ,0.000 ,3.842 ,327.090 ], [2.000 ,7.261 ,0.989 ,0.444 ,0.000 ,1.000 ,0.000 ,6.422 ,245.680 ], [2.000 ,3.551 ,0.979 ,0.005 ,0.000 ,1.000 ,0.000 ,3.378 ,213.520 ], [2.000 ,1.665 ,1.050 ,0.339 ,0.000 ,2.000 ,0.000 ,6.073 ,211.880 ], [2.000 ,2.290 ,0.959 ,-0.075 ,0.000 ,1.000 ,0.000 ,2.392 ,192.910 ], [4.000 ,2.416 ,0.973 ,0.078 ,0.000 ,1.000 ,0.000 ,2.556 ,208.570 ], [0.000 ,24.586 ,0.904 ,0.255 ,1.000 ,3.000 ,0.000 ,2.529 ,398.040 ], [0.000 ,6.131 ,1.028 ,0.151 ,0.000 ,0.000 ,0.000 ,21.339 ,342.600 ], [2.000 ,1.824 ,0.976 ,0.145 ,0.000 ,1.000 ,0.000 ,4.353 ,188.050 ], [1.000 ,11.283 ,0.949 ,0.344 ,0.000 ,1.000 ,1.000 ,14.039 ,283.280 ], [0.000 ,4.447 ,0.964 ,0.064 ,0.000 ,1.000 ,1.000 ,19.896 ,275.590 ], [4.000 ,7.133 ,0.939 ,0.368 ,0.000 ,1.000 ,0.000 ,2.717 ,229.210 ], [3.000 ,2.579 ,0.948 ,-0.178 ,0.000 ,2.000 ,0.000 ,0.214 ,187.930 ], [1.000 ,14.898 ,0.909 ,-0.093 ,0.000 ,0.000 ,0.000 ,16.023 ,308.350 ], [4.000 ,4.261 ,0.943 ,0.295 ,0.000 ,2.000 ,0.000 ,0.026 ,329.950 ], [1.000 ,4.221 ,0.978 ,0.292 ,0.000 ,1.000 ,0.000 ,14.486 ,236.720 ], [1.000 ,3.875 ,0.998 ,0.744 ,0.000 ,1.000 ,0.000 ,2.864 ,213.060 ], [2.000 ,2.959 ,1.017 ,0.606 ,0.000 ,3.000 ,0.000 ,2.416 ,216.530 ], [2.000 ,0.968 ,0.974 ,-0.072 ,0.000 ,1.000 ,0.000 ,1.545 ,177.880 ], [1.000 ,14.977 ,0.945 ,0.537 ,0.000 ,1.000 ,0.000 ,9.442 ,326.480 ], [0.000 ,5.090 ,0.971 ,-0.054 ,0.000 ,2.000 ,0.000 ,2.786 ,252.150 ], [5.000 ,6.953 ,0.938 ,0.531 ,0.000 ,0.000 ,0.000 ,6.387 ,217.720 ], [3.000 ,3.520 ,0.962 ,-0.498 ,0.000 ,0.000 ,0.000 ,12.577 ,275.400 ], [4.000 ,3.439 ,0.977 ,-0.988 ,0.000 ,2.000 ,0.000 ,1.970 ,312.940 ], [0.000 ,3.106 ,0.949 ,-0.030 ,0.000 ,2.000 ,1.000 ,0.570 ,163.190 ], [0.000 ,20.384 ,0.929 ,-0.198 ,2.000 ,2.000 ,0.000 ,1.396 ,363.110 ], [0.000 ,0.000 ,1.132 ,-0.023 ,0.000 ,1.000 ,0.000 ,13.240 ,204.760 ], [0.000 ,14.375 ,0.829 ,0.923 ,0.000 ,2.000 ,0.000 ,29.446 ,391.020 ], [2.000 ,2.581 ,0.978 ,0.120 ,0.000 ,1.000 ,0.000 ,1.635 ,195.060 ], [4.000 ,22.361 ,0.918 ,0.475 ,0.000 ,4.000 ,0.000 ,0.586 ,417.850 ], [4.000 ,2.551 ,0.949 ,0.125 ,0.000 ,0.000 ,0.000 ,8.592 ,280.000 ], [4.000 ,6.389 ,0.935 ,0.619 ,0.000 ,0.000 ,0.000 ,11.962 ,318.300 ], [1.000 ,0.000 ,1.075 ,0.062 ,0.000 ,0.000 ,0.000 ,10.697 ,169.870 ], [0.000 ,5.649 ,0.996 ,-0.135 ,2.000 ,1.000 ,0.000 ,1.915 ,278.080 ], [1.000 ,0.000 ,0.995 ,0.092 ,0.000 ,2.000 ,1.000 ,0.576 ,140.360 ], [1.000 ,0.540 ,1.029 ,0.352 ,0.000 ,1.000 ,0.000 ,5.054 ,177.730 ], [1.000 ,11.404 ,0.961 ,0.100 ,0.000 ,1.000 ,1.000 ,8.757 ,288.310 ], [1.000 ,11.517 ,0.982 ,0.415 ,0.000 ,3.000 ,1.000 ,8.013 ,291.880 ], [0.000 ,0.837 ,1.073 ,0.188 ,0.000 ,0.000 ,0.000 ,35.867 ,264.010 ], [1.000 ,4.174 ,1.031 ,0.260 ,0.000 ,2.000 ,0.000 ,10.500 ,249.040 ], [3.000 ,9.899 ,0.969 ,0.579 ,0.000 ,0.000 ,0.000 ,24.001 ,380.890 ], [1.000 ,3.742 ,0.936 ,0.516 ,0.000 ,1.000 ,0.000 ,7.479 ,197.130 ], [3.000 ,2.474 ,0.948 ,0.233 ,0.000 ,0.000 ,0.000 ,17.698 ,281.840 ], [3.000 ,5.935 ,0.944 ,0.640 ,0.000 ,0.000 ,0.000 ,14.516 ,311.100 ], [0.000 ,0.546 ,1.093 ,0.526 ,0.000 ,0.000 ,0.000 ,41.857 ,298.900 ], [4.000 ,4.992 ,0.990 ,0.208 ,0.000 ,1.000 ,0.000 ,1.542 ,215.330 ], [1.000 ,8.150 ,0.922 ,0.082 ,0.000 ,1.000 ,0.000 ,10.127 ,254.890 ], [0.000 ,1.004 ,1.047 ,0.432 ,0.000 ,0.000 ,0.000 ,24.476 ,227.370 ], [0.000 ,0.375 ,1.098 ,0.235 ,0.000 ,0.000 ,0.000 ,41.857 ,298.790 ], [0.000 ,0.375 ,1.097 ,0.327 ,0.000 ,0.000 ,0.000 ,41.857 ,297.730 ], [0.000 ,10.265 ,0.996 ,-0.038 ,0.000 ,0.000 ,0.000 ,10.106 ,331.540 ], [2.000 ,6.731 ,0.962 ,0.574 ,0.000 ,0.000 ,0.000 ,9.855 ,273.880 ], [0.000 ,1.118 ,1.071 ,0.411 ,0.000 ,0.000 ,0.000 ,35.867 ,260.250 ], [0.000 ,0.071 ,1.122 ,0.302 ,0.000 ,0.000 ,0.000 ,48.018 ,337.010 ], [0.000 ,14.230 ,0.945 ,1.104 ,1.000 ,1.000 ,0.000 ,8.330 ,318.770 ], [2.000 ,4.260 ,0.982 ,0.055 ,0.000 ,1.000 ,0.000 ,4.451 ,206.040 ], [0.000 ,1.184 ,1.057 ,0.585 ,0.000 ,0.000 ,0.000 ,29.937 ,241.110 ], [0.000 ,1.035 ,1.058 ,0.656 ,0.000 ,0.000 ,0.000 ,29.937 ,241.290 ], [0.000 ,0.806 ,1.084 ,0.314 ,0.000 ,0.000 ,0.000 ,38.839 ,279.320 ], [1.000 ,4.610 ,0.994 ,1.081 ,0.000 ,4.000 ,0.000 ,6.682 ,307.100 ], [0.000 ,1.149 ,1.081 ,0.382 ,0.000 ,0.000 ,0.000 ,38.839 ,278.240 ], [0.000 ,0.806 ,1.084 ,0.447 ,0.000 ,0.000 ,0.000 ,38.839 ,278.110 ], [0.000 ,1.175 ,1.091 ,0.226 ,0.000 ,0.000 ,0.000 ,41.857 ,297.280 ], [1.000 ,9.882 ,0.907 ,-0.036 ,0.000 ,2.000 ,0.000 ,10.158 ,287.930 ], [1.000 ,3.901 ,0.925 ,0.106 ,0.000 ,1.000 ,0.000 ,5.947 ,223.830 ], [2.000 ,9.558 ,0.963 ,0.256 ,0.000 ,1.000 ,0.000 ,21.937 ,348.980 ], [3.000 ,7.335 ,0.951 ,0.349 ,0.000 ,0.000 ,0.000 ,12.577 ,290.300 ], [0.000 ,10.030 ,0.992 ,0.189 ,0.000 ,1.000 ,1.000 ,13.968 ,289.330 ], [4.000 ,3.180 ,1.005 ,0.166 ,0.000 ,1.000 ,0.000 ,3.843 ,219.670 ], [0.000 ,0.806 ,1.084 ,0.346 ,0.000 ,0.000 ,0.000 ,38.839 ,278.450 ], [3.000 ,0.000 ,1.049 ,0.743 ,0.000 ,0.000 ,0.000 ,5.727 ,194.590 ], [1.000 ,3.030 ,0.952 ,0.023 ,0.000 ,1.000 ,0.000 ,13.824 ,303.750 ], [1.000 ,3.611 ,0.992 ,0.461 ,0.000 ,0.000 ,0.000 ,16.979 ,304.960 ], [3.000 ,9.376 ,0.944 ,0.337 ,0.000 ,1.000 ,0.000 ,11.052 ,266.940 ], [1.000 ,7.751 ,0.922 ,0.145 ,0.000 ,1.000 ,0.000 ,9.155 ,271.600 ], [4.000 ,6.321 ,0.955 ,0.349 ,0.000 ,2.000 ,0.000 ,5.234 ,236.210 ], [0.000 ,4.778 ,0.969 ,0.162 ,0.000 ,1.000 ,0.000 ,13.399 ,304.120 ], [4.000 ,2.065 ,1.031 ,0.501 ,0.000 ,1.000 ,0.000 ,0.193 ,225.130 ], [1.000 ,10.091 ,0.983 ,0.999 ,0.000 ,1.000 ,0.000 ,15.264 ,402.670 ], [0.000 ,10.159 ,0.995 ,0.964 ,0.000 ,1.000 ,0.000 ,20.970 ,383.810 ], [0.000 ,6.624 ,0.979 ,0.518 ,0.000 ,1.000 ,0.000 ,11.581 ,284.520 ], [2.000 ,2.745 ,0.973 ,0.618 ,0.000 ,3.000 ,0.000 ,2.068 ,242.030 ], [0.000 ,4.671 ,1.000 ,0.295 ,0.000 ,0.000 ,0.000 ,5.740 ,233.600 ], [3.000 ,3.355 ,0.935 ,-0.229 ,0.000 ,0.000 ,0.000 ,16.946 ,291.190 ], [0.000 ,0.000 ,1.054 ,-0.120 ,0.000 ,4.000 ,0.000 ,0.053 ,163.680 ], [1.000 ,0.592 ,1.041 ,0.144 ,1.000 ,1.000 ,0.000 ,4.993 ,184.950 ], [0.000 ,7.265 ,0.996 ,0.359 ,0.000 ,2.000 ,0.000 ,1.065 ,282.350 ], [0.000 ,3.862 ,1.000 ,0.166 ,0.000 ,0.000 ,0.000 ,9.873 ,249.540 ], [0.000 ,0.000 ,1.032 ,-0.106 ,0.000 ,0.000 ,0.000 ,7.488 ,171.820 ], [4.000 ,1.351 ,0.962 ,-0.014 ,0.000 ,2.000 ,0.000 ,1.722 ,164.190 ], [0.000 ,0.480 ,1.058 ,0.184 ,0.000 ,0.000 ,0.000 ,16.429 ,242.590 ], [0.000 ,0.743 ,1.114 ,0.087 ,0.000 ,0.000 ,0.000 ,51.159 ,377.170 ], [4.000 ,7.945 ,0.957 ,0.918 ,0.000 ,2.000 ,0.000 ,0.268 ,314.340 ], [1.000 ,8.206 ,1.023 ,0.563 ,0.000 ,1.000 ,0.000 ,15.863 ,332.590 ], [2.000 ,5.409 ,0.992 ,0.547 ,1.000 ,0.000 ,0.000 ,13.101 ,323.700 ], [0.000 ,2.539 ,1.064 ,0.217 ,0.000 ,0.000 ,0.000 ,33.633 ,349.670 ], [3.000 ,5.384 ,0.955 ,0.657 ,0.000 ,0.000 ,0.000 ,25.380 ,346.220 ], [1.000 ,4.758 ,0.913 ,0.846 ,0.000 ,1.000 ,0.000 ,5.716 ,209.150 ], [0.000 ,20.587 ,0.886 ,1.684 ,0.000 ,1.000 ,1.000 ,58.403 ,456.280 ], [5.000 ,3.225 ,1.001 ,0.603 ,0.000 ,3.000 ,0.000 ,4.492 ,300.020 ], [6.000 ,1.804 ,0.983 ,0.953 ,0.000 ,0.000 ,0.000 ,1.488 ,158.800 ], [1.000 ,6.792 ,0.961 ,0.738 ,0.000 ,1.000 ,0.000 ,12.066 ,328.010 ], [1.000 ,10.919 ,0.889 ,0.718 ,0.000 ,1.000 ,0.000 ,22.983 ,488.270 ], [4.000 ,12.505 ,0.915 ,0.852 ,0.000 ,0.000 ,0.000 ,35.241 ,567.040 ], [1.000 ,10.087 ,0.963 ,1.299 ,0.000 ,1.000 ,0.000 ,17.784 ,400.580 ], [1.000 ,9.674 ,0.926 ,0.873 ,0.000 ,1.000 ,0.000 ,18.311 ,397.550 ], [0.000 ,2.007 ,1.079 ,0.120 ,0.000 ,0.000 ,0.000 ,37.645 ,381.870 ], [1.000 ,4.631 ,0.974 ,0.473 ,0.000 ,1.000 ,0.000 ,11.626 ,295.420 ], [1.000 ,7.958 ,0.891 ,0.426 ,0.000 ,1.000 ,0.000 ,14.936 ,322.080 ], [1.000 ,12.806 ,0.898 ,0.975 ,0.000 ,1.000 ,0.000 ,18.029 ,429.230 ], [2.000 ,2.181 ,0.955 ,0.516 ,0.000 ,1.000 ,0.000 ,4.370 ,203.670 ], [1.000 ,14.030 ,0.918 ,0.919 ,0.000 ,2.000 ,0.000 ,13.285 ,334.160 ], [2.000 ,3.663 ,1.004 ,0.274 ,0.000 ,3.000 ,0.000 ,7.987 ,344.260 ], [1.000 ,5.290 ,0.968 ,0.647 ,0.000 ,1.000 ,0.000 ,5.604 ,293.540 ], [1.000 ,13.464 ,0.916 ,0.481 ,1.000 ,1.000 ,0.000 ,5.279 ,324.290 ], [4.000 ,2.071 ,1.033 ,1.005 ,0.000 ,0.000 ,0.000 ,25.710 ,313.520 ], [4.000 ,4.739 ,0.987 ,0.871 ,1.000 ,4.000 ,0.000 ,0.677 ,310.350 ], [5.000 ,4.908 ,0.966 ,0.529 ,0.000 ,4.000 ,0.000 ,2.778 ,323.160 ], [2.000 ,4.017 ,0.962 ,0.368 ,1.000 ,1.000 ,0.000 ,7.616 ,334.380 ], [3.000 ,9.429 ,0.934 ,0.534 ,1.000 ,0.000 ,0.000 ,14.213 ,279.350 ], [1.000 ,5.491 ,0.976 ,0.286 ,0.000 ,2.000 ,0.000 ,17.797 ,370.090 ], [2.000 ,4.579 ,0.992 ,0.252 ,0.000 ,1.000 ,0.000 ,7.817 ,237.730 ], [2.000 ,0.822 ,0.983 ,0.044 ,0.000 ,1.000 ,0.000 ,2.645 ,151.640 ], [2.000 ,1.450 ,0.998 ,0.177 ,0.000 ,1.000 ,0.000 ,3.749 ,185.740 ], [2.000 ,10.664 ,0.972 ,0.668 ,0.000 ,2.000 ,0.000 ,9.198 ,336.870 ], [1.000 ,7.958 ,0.955 ,0.738 ,0.000 ,1.000 ,0.000 ,14.936 ,322.450 ], [5.000 ,3.720 ,0.939 ,0.580 ,0.000 ,4.000 ,0.000 ,1.452 ,319.820 ], [0.000 ,3.486 ,1.029 ,-0.028 ,0.000 ,0.000 ,0.000 ,20.902 ,321.960 ], [1.000 ,3.478 ,0.940 ,0.520 ,0.000 ,1.000 ,0.000 ,9.858 ,196.210 ], [1.000 ,4.948 ,0.922 ,0.570 ,0.000 ,1.000 ,0.000 ,12.434 ,247.980 ], [3.000 ,5.855 ,0.960 ,0.358 ,1.000 ,2.000 ,0.000 ,1.490 ,246.620 ], [3.000 ,7.774 ,0.972 ,0.556 ,1.000 ,2.000 ,0.000 ,3.029 ,281.310 ], [2.000 ,10.401 ,0.959 ,0.625 ,0.000 ,2.000 ,0.000 ,7.628 ,284.810 ], [1.000 ,5.389 ,0.983 ,-0.010 ,0.000 ,1.000 ,0.000 ,7.164 ,220.740 ], [2.000 ,8.403 ,0.868 ,-0.049 ,0.000 ,1.000 ,0.000 ,6.763 ,324.160 ], [0.000 ,10.734 ,1.003 ,1.345 ,0.000 ,1.000 ,0.000 ,29.164 ,396.070 ], [4.000 ,6.447 ,0.964 ,0.307 ,0.000 ,4.000 ,0.000 ,3.119 ,356.980 ], [4.000 ,4.444 ,0.924 ,0.884 ,0.000 ,4.000 ,0.000 ,4.038 ,341.470 ], [0.000 ,11.662 ,0.956 ,-0.260 ,0.000 ,0.000 ,0.000 ,11.335 ,282.270 ], [4.000 ,4.927 ,0.965 ,0.537 ,0.000 ,0.000 ,0.000 ,11.821 ,260.240 ], [1.000 ,10.087 ,0.873 ,1.281 ,0.000 ,1.000 ,0.000 ,17.784 ,397.500 ], [0.000 ,2.751 ,0.985 ,0.421 ,0.000 ,3.000 ,0.000 ,2.413 ,209.530 ], [6.000 ,7.703 ,0.930 ,0.720 ,0.000 ,4.000 ,0.000 ,2.598 ,356.340 ], [3.000 ,8.487 ,0.956 ,0.636 ,0.000 ,0.000 ,0.000 ,14.244 ,293.050 ], [5.000 ,7.964 ,0.945 ,0.709 ,0.000 ,4.000 ,0.000 ,3.697 ,342.440 ], [2.000 ,14.919 ,0.926 ,1.230 ,0.000 ,1.000 ,0.000 ,23.140 ,461.160 ], [10.000 ,6.171 ,0.931 ,1.211 ,0.000 ,3.000 ,0.000 ,1.991 ,352.540 ], [3.000 ,6.376 ,1.003 ,0.490 ,0.000 ,0.000 ,0.000 ,9.240 ,268.580 ], [5.000 ,6.264 ,0.933 ,0.590 ,0.000 ,5.000 ,0.000 ,0.222 ,361.100 ], [6.000 ,9.689 ,0.940 ,0.884 ,0.000 ,4.000 ,0.000 ,6.045 ,392.810 ], [2.000 ,0.653 ,1.040 ,0.813 ,0.000 ,0.000 ,0.000 ,2.029 ,215.460 ], [5.000 ,7.834 ,0.944 ,0.744 ,0.000 ,3.000 ,0.000 ,3.934 ,361.790 ], [0.000 ,4.673 ,1.000 ,0.444 ,0.000 ,3.000 ,0.000 ,4.437 ,244.680 ], [0.000 ,5.530 ,0.937 ,0.903 ,0.000 ,3.000 ,0.000 ,5.617 ,259.650 ], [0.000 ,2.923 ,0.996 ,0.297 ,0.000 ,3.000 ,0.000 ,4.421 ,229.150 ], [0.000 ,5.190 ,0.938 ,0.594 ,0.000 ,3.000 ,0.000 ,8.376 ,276.580 ], [0.000 ,3.288 ,0.998 ,0.293 ,0.000 ,3.000 ,0.000 ,5.016 ,242.270 ], [1.000 ,2.414 ,0.988 ,0.224 ,0.000 ,3.000 ,0.000 ,2.563 ,237.680 ], [1.000 ,6.551 ,0.952 ,0.381 ,0.000 ,3.000 ,0.000 ,5.843 ,288.410 ], [0.000 ,4.368 ,0.986 ,0.269 ,0.000 ,3.000 ,0.000 ,6.286 ,259.980 ], [0.000 ,5.396 ,0.987 ,0.389 ,0.000 ,3.000 ,0.000 ,7.656 ,276.220 ], [0.000 ,3.931 ,0.984 ,0.435 ,0.000 ,3.000 ,0.000 ,5.638 ,246.810 ], [0.000 ,7.870 ,0.979 ,0.154 ,0.000 ,3.000 ,0.000 ,6.896 ,277.530 ], [0.000 ,6.332 ,0.954 ,0.663 ,0.000 ,3.000 ,0.000 ,5.617 ,262.640 ], [0.000 ,3.632 ,0.975 ,0.525 ,0.000 ,3.000 ,0.000 ,3.366 ,226.350 ], [0.000 ,4.461 ,0.979 ,0.240 ,0.000 ,3.000 ,0.000 ,4.437 ,242.950 ], [0.000 ,6.551 ,0.970 ,0.857 ,0.000 ,3.000 ,0.000 ,5.617 ,259.740 ], [0.000 ,2.241 ,1.076 ,0.005 ,0.000 ,0.000 ,0.000 ,37.645 ,382.730 ], [0.000 ,1.628 ,1.080 ,-0.342 ,0.000 ,0.000 ,0.000 ,41.760 ,410.320 ], [3.000 ,1.800 ,0.997 ,0.433 ,0.000 ,0.000 ,0.000 ,5.581 ,186.350 ], [3.000 ,0.000 ,1.047 ,0.345 ,0.000 ,0.000 ,0.000 ,2.257 ,121.240 ], [3.000 ,0.000 ,1.065 ,0.259 ,0.000 ,0.000 ,0.000 ,2.810 ,140.540 ], [3.000 ,0.000 ,1.092 ,0.148 ,0.000 ,0.000 ,0.000 ,5.188 ,159.030 ], [1.000 ,1.386 ,0.951 ,0.297 ,0.000 ,1.000 ,0.000 ,3.306 ,174.590 ], [1.000 ,1.831 ,0.946 ,0.407 ,0.000 ,1.000 ,0.000 ,4.451 ,191.170 ], [1.000 ,0.774 ,0.956 ,0.082 ,0.000 ,1.000 ,0.000 ,0.433 ,148.560 ], [1.000 ,0.397 ,0.987 ,0.290 ,0.000 ,1.000 ,0.000 ,2.645 ,154.780 ], [3.000 ,1.903 ,0.995 ,-0.154 ,0.000 ,2.000 ,0.000 ,3.295 ,259.040 ], [3.000 ,3.227 ,0.954 ,0.175 ,0.000 ,2.000 ,0.000 ,5.808 ,289.970 ], [3.000 ,3.831 ,0.941 ,0.460 ,0.000 ,2.000 ,0.000 ,7.241 ,306.040 ], [3.000 ,8.197 ,0.949 ,0.398 ,0.000 ,2.000 ,0.000 ,10.410 ,339.270 ], ])
agpl-3.0
zerolab/wagtail
wagtail/core/migrations/0015_add_more_verbose_names.py
24
4248
# -*- coding: utf-8 -*- import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0014_add_verbose_name'), ] operations = [ migrations.AlterModelOptions( name='grouppagepermission', options={'verbose_name': 'Group Page Permission'}, ), migrations.AlterModelOptions( name='pagerevision', options={'verbose_name': 'Page Revision'}, ), migrations.AlterModelOptions( name='pageviewrestriction', options={'verbose_name': 'Page View Restriction'}, ), migrations.AlterModelOptions( name='site', options={'verbose_name': 'Site'}, ), migrations.AlterField( model_name='page', name='content_type', field=models.ForeignKey(on_delete=models.CASCADE, related_name='pages', verbose_name='Content type', to='contenttypes.ContentType'), ), migrations.AlterField( model_name='page', name='expired', field=models.BooleanField(default=False, verbose_name='Expired', editable=False), ), migrations.AlterField( model_name='page', name='first_published_at', field=models.DateTimeField(verbose_name='First published at', null=True, editable=False), ), migrations.AlterField( model_name='page', name='has_unpublished_changes', field=models.BooleanField(default=False, verbose_name='Has unpublished changes', editable=False), ), migrations.AlterField( model_name='page', name='latest_revision_created_at', field=models.DateTimeField(verbose_name='Latest revision created at', null=True, editable=False), ), migrations.AlterField( model_name='page', name='live', field=models.BooleanField(default=True, verbose_name='Live', editable=False), ), migrations.AlterField( model_name='page', name='locked', field=models.BooleanField(default=False, verbose_name='Locked', editable=False), ), migrations.AlterField( model_name='page', name='owner', field=models.ForeignKey( related_name='owned_pages', on_delete=django.db.models.deletion.SET_NULL, blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Owner' ), ), migrations.AlterField( model_name='page', name='url_path', field=models.CharField(verbose_name='URL path', max_length=255, editable=False, blank=True), ), migrations.AlterField( model_name='pagerevision', name='approved_go_live_at', field=models.DateTimeField(null=True, verbose_name='Approved go live at', blank=True), ), migrations.AlterField( model_name='pagerevision', name='content_json', field=models.TextField(verbose_name='Content JSON'), ), migrations.AlterField( model_name='pagerevision', name='created_at', field=models.DateTimeField(verbose_name='Created at'), ), migrations.AlterField( model_name='pagerevision', name='page', field=models.ForeignKey(on_delete=models.CASCADE, related_name='revisions', verbose_name='Page', to='wagtailcore.Page'), ), migrations.AlterField( model_name='pagerevision', name='submitted_for_moderation', field=models.BooleanField(default=False, verbose_name='Submitted for moderation'), ), migrations.AlterField( model_name='pagerevision', name='user', field=models.ForeignKey(on_delete=models.CASCADE, verbose_name='User', blank=True, to=settings.AUTH_USER_MODEL, null=True), ), ]
bsd-3-clause
bradsk88/WinkHouse
lib/flask/logging.py
838
1398
# -*- coding: utf-8 -*- """ flask.logging ~~~~~~~~~~~~~ Implements the logging support for Flask. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG def create_logger(app): """Creates a logger for the given application. This logger works similar to a regular Python logger but changes the effective logging level based on the application's debug flag. Furthermore this function also removes all attached handlers in case there was a logger with the log name before. """ Logger = getLoggerClass() class DebugLogger(Logger): def getEffectiveLevel(x): if x.level == 0 and app.debug: return DEBUG return Logger.getEffectiveLevel(x) class DebugHandler(StreamHandler): def emit(x, record): StreamHandler.emit(x, record) if app.debug else None handler = DebugHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(app.debug_log_format)) logger = getLogger(app.logger_name) # just in case that was not a new logger, get rid of all the handlers # already attached to it. del logger.handlers[:] logger.__class__ = DebugLogger logger.addHandler(handler) return logger
apache-2.0
JJones131/steam-engage-monitor
lib/werkzeug/http.py
317
33404
# -*- coding: utf-8 -*- """ werkzeug.http ~~~~~~~~~~~~~ Werkzeug comes with a bunch of utilities that help Werkzeug to deal with HTTP data. Most of the classes and functions provided by this module are used by the wrappers, but they are useful on their own, too, especially if the response and request objects are not used. This covers some of the more HTTP centric features of WSGI, some other utilities such as cookie handling are documented in the `werkzeug.utils` module. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re from time import time, gmtime try: from email.utils import parsedate_tz except ImportError: # pragma: no cover from email.Utils import parsedate_tz try: from urllib2 import parse_http_list as _parse_list_header except ImportError: # pragma: no cover from urllib.request import parse_http_list as _parse_list_header from datetime import datetime, timedelta from hashlib import md5 import base64 from werkzeug._internal import _cookie_quote, _make_cookie_domain, \ _cookie_parse_impl from werkzeug._compat import to_unicode, iteritems, text_type, \ string_types, try_coerce_native, to_bytes, PY2, \ integer_types # incorrect _cookie_charset = 'latin1' _accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?') _token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" '^_`abcdefghijklmnopqrstuvwxyz|~') _etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') _unsafe_header_chars = set('()<>@,;:\"/[]?={} \t') _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' % (_quoted_string_re, _quoted_string_re)) _entity_headers = frozenset([ 'allow', 'content-encoding', 'content-language', 'content-length', 'content-location', 'content-md5', 'content-range', 'content-type', 'expires', 'last-modified' ]) _hop_by_hop_headers = frozenset([ 'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailer', 'transfer-encoding', 'upgrade' ]) HTTP_STATUS_CODES = { 100: 'Continue', 101: 'Switching Protocols', 102: 'Processing', 200: 'OK', 201: 'Created', 202: 'Accepted', 203: 'Non Authoritative Information', 204: 'No Content', 205: 'Reset Content', 206: 'Partial Content', 207: 'Multi Status', 226: 'IM Used', # see RFC 3229 300: 'Multiple Choices', 301: 'Moved Permanently', 302: 'Found', 303: 'See Other', 304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', # unused 403: 'Forbidden', 404: 'Not Found', 405: 'Method Not Allowed', 406: 'Not Acceptable', 407: 'Proxy Authentication Required', 408: 'Request Timeout', 409: 'Conflict', 410: 'Gone', 411: 'Length Required', 412: 'Precondition Failed', 413: 'Request Entity Too Large', 414: 'Request URI Too Long', 415: 'Unsupported Media Type', 416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', 418: 'I\'m a teapot', # see RFC 2324 422: 'Unprocessable Entity', 423: 'Locked', 424: 'Failed Dependency', 426: 'Upgrade Required', 428: 'Precondition Required', # see RFC 6585 429: 'Too Many Requests', 431: 'Request Header Fields Too Large', 449: 'Retry With', # proprietary MS extension 500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway', 503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported', 507: 'Insufficient Storage', 510: 'Not Extended' } def wsgi_to_bytes(data): """coerce wsgi unicode represented bytes to real ones """ if isinstance(data, bytes): return data return data.encode('latin1') #XXX: utf8 fallback? def bytes_to_wsgi(data): assert isinstance(data, bytes), 'data must be bytes' if isinstance(data, str): return data else: return data.decode('latin1') def quote_header_value(value, extra_chars='', allow_token=True): """Quote a header value if necessary. .. versionadded:: 0.5 :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged. """ if isinstance(value, bytes): value = bytes_to_wsgi(value) value = str(value) if allow_token: token_chars = _token_chars | set(extra_chars) if set(value).issubset(token_chars): return value return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. .. versionadded:: 0.5 :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dump_options_header(header, options): """The reverse function to :func:`parse_options_header`. :param header: the header to dump :param options: a dict of options to append. """ segments = [] if header is not None: segments.append(header) for key, value in iteritems(options): if value is None: segments.append(key) else: segments.append('%s=%s' % (key, quote_header_value(value))) return '; '.join(segments) def dump_header(iterable, allow_token=True): """Dump an HTTP header again. This is the reversal of :func:`parse_list_header`, :func:`parse_set_header` and :func:`parse_dict_header`. This also quotes strings that include an equals sign unless you pass it as dict of key, value pairs. >>> dump_header({'foo': 'bar baz'}) 'foo="bar baz"' >>> dump_header(('foo', 'bar baz')) 'foo, "bar baz"' :param iterable: the iterable or dict of values to quote. :param allow_token: if set to `False` tokens as values are disallowed. See :func:`quote_header_value` for more details. """ if isinstance(iterable, dict): items = [] for key, value in iteritems(iterable): if value is None: items.append(key) else: items.append('%s=%s' % ( key, quote_header_value(value, allow_token=allow_token) )) else: items = [quote_header_value(x, allow_token=allow_token) for x in iterable] return ', '.join(items) def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_dict_header(value, cls=dict): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict (or any other mapping object created from the type with a dict like interface provided by the `cls` arugment): >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. .. versionchanged:: 0.9 Added support for `cls` argument. :param value: a string with a dict header. :param cls: callable to use for storage of parsed results. :return: an instance of `cls` """ result = cls() if not isinstance(value, text_type): #XXX: validate value = bytes_to_wsgi(value) for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result def parse_options_header(value): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('text/html; charset=utf8') ('text/html', {'charset': 'utf8'}) This should not be used to parse ``Cache-Control`` like headers that use a slightly different format. For these headers use the :func:`parse_dict_header` function. .. versionadded:: 0.5 :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value, key == 'filename') yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra def parse_accept_header(value, cls=None): """Parses an HTTP Accept-* header. This does not implement a complete valid algorithm but one that supports at least value and quality extraction. Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` tuples sorted by the quality with some additional accessor methods). The second parameter can be a subclass of :class:`Accept` that is created with the parsed values and returned. :param value: the accept header string to be parsed. :param cls: the wrapper class for the return value (can be :class:`Accept` or a subclass thereof) :return: an instance of `cls`. """ if cls is None: cls = Accept if not value: return cls(None) result = [] for match in _accept_re.finditer(value): quality = match.group(2) if not quality: quality = 1 else: quality = max(min(float(quality), 1), 0) result.append((match.group(1), quality)) return cls(result) def parse_cache_control_header(value, on_update=None, cls=None): """Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object. """ if cls is None: cls = RequestCacheControl if not value: return cls(None, on_update) return cls(parse_dict_header(value), on_update) def parse_set_header(value, on_update=None): """Parse a set-like header and return a :class:`~werkzeug.datastructures.HeaderSet` object: >>> hs = parse_set_header('token, "quoted value"') The return value is an object that treats the items case-insensitively and keeps the order of the items: >>> 'TOKEN' in hs True >>> hs.index('quoted value') 1 >>> hs HeaderSet(['token', 'quoted value']) To create a header from the :class:`HeaderSet` again, use the :func:`dump_header` function. :param value: a set header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.HeaderSet` object is changed. :return: a :class:`~werkzeug.datastructures.HeaderSet` """ if not value: return HeaderSet(None, on_update) return HeaderSet(parse_list_header(value), on_update) def parse_authorization_header(value): """Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. """ if not value: return value = wsgi_to_bytes(value) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == b'basic': try: username, password = base64.b64decode(auth_info).split(b':', 1) except Exception as e: return return Authorization('basic', {'username': bytes_to_wsgi(username), 'password': bytes_to_wsgi(password)}) elif auth_type == b'digest': auth_map = parse_dict_header(auth_info) for key in 'username', 'realm', 'nonce', 'uri', 'response': if not key in auth_map: return if 'qop' in auth_map: if not auth_map.get('nc') or not auth_map.get('cnonce'): return return Authorization('digest', auth_map) def parse_www_authenticate_header(value, on_update=None): """Parse an HTTP WWW-Authenticate header into a :class:`~werkzeug.datastructures.WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.WWWAuthenticate` object is changed. :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. """ if not value: return WWWAuthenticate(on_update=on_update) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except (ValueError, AttributeError): return WWWAuthenticate(value.strip().lower(), on_update=on_update) return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) def parse_if_range_header(value): """Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7 """ if not value: return IfRange() date = parse_date(value) if date is not None: return IfRange(date=date) # drop weakness information return IfRange(unquote_etag(value)[0]) def parse_range_header(value, make_inclusive=True): """Parses a range header into a :class:`~werkzeug.datastructures.Range` object. If the header is missing or malformed `None` is returned. `ranges` is a list of ``(start, stop)`` tuples where the ranges are non-inclusive. .. versionadded:: 0.7 """ if not value or '=' not in value: return None ranges = [] last_end = 0 units, rng = value.split('=', 1) units = units.strip().lower() for item in rng.split(','): item = item.strip() if '-' not in item: return None if item.startswith('-'): if last_end < 0: return None begin = int(item) end = None last_end = -1 elif '-' in item: begin, end = item.split('-', 1) begin = int(begin) if begin < last_end or last_end < 0: return None if end: end = int(end) + 1 if begin >= end: return None else: end = None last_end = end ranges.append((begin, end)) return Range(units, ranges) def parse_content_range_header(value, on_update=None): """Parses a range header into a :class:`~werkzeug.datastructures.ContentRange` object or `None` if parsing is not possible. .. versionadded:: 0.7 :param value: a content range header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.ContentRange` object is changed. """ if value is None: return None try: units, rangedef = (value or '').strip().split(None, 1) except ValueError: return None if '/' not in rangedef: return None rng, length = rangedef.split('/', 1) if length == '*': length = None elif length.isdigit(): length = int(length) else: return None if rng == '*': return ContentRange(units, None, None, length, on_update=on_update) elif '-' not in rng: return None start, stop = rng.split('-', 1) try: start = int(start) stop = int(stop) + 1 except ValueError: return None if is_byte_range_valid(start, stop, length): return ContentRange(units, start, stop, length, on_update=on_update) def quote_etag(etag, weak=False): """Quote an etag. :param etag: the etag to quote. :param weak: set to `True` to tag it "weak". """ if '"' in etag: raise ValueError('invalid etag') etag = '"%s"' % etag if weak: etag = 'w/' + etag return etag def unquote_etag(etag): """Unquote a single etag: >>> unquote_etag('w/"bar"') ('bar', True) >>> unquote_etag('"bar"') ('bar', False) :param etag: the etag identifier to unquote. :return: a ``(etag, weak)`` tuple. """ if not etag: return None, None etag = etag.strip() weak = False if etag[:2] in ('w/', 'W/'): weak = True etag = etag[2:] if etag[:1] == etag[-1:] == '"': etag = etag[1:-1] return etag, weak def parse_etags(value): """Parse an etag header. :param value: the tag header to parse :return: an :class:`~werkzeug.datastructures.ETags` object. """ if not value: return ETags() strong = [] weak = [] end = len(value) pos = 0 while pos < end: match = _etag_re.match(value, pos) if match is None: break is_weak, quoted, raw = match.groups() if raw == '*': return ETags(star_tag=True) elif quoted: raw = quoted if is_weak: weak.append(raw) else: strong.append(raw) pos = match.end() return ETags(strong, weak) def generate_etag(data): """Generate an etag for some data.""" return md5(data).hexdigest() def parse_date(value): """Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object. """ if value: t = parsedate_tz(value.strip()) if t is not None: try: year = t[0] # unfortunately that function does not tell us if two digit # years were part of the string, or if they were prefixed # with two zeroes. So what we do is to assume that 69-99 # refer to 1900, and everything below to 2000 if year >= 0 and year <= 68: year += 2000 elif year >= 69 and year <= 99: year += 1900 return datetime(*((year,) + t[1:7])) - \ timedelta(seconds=t[-1] or 0) except (ValueError, OverflowError): return None def _dump_date(d, delim): """Used for `http_date` and `cookie_date`.""" if d is None: d = gmtime() elif isinstance(d, datetime): d = d.utctimetuple() elif isinstance(d, (integer_types, float)): d = gmtime(d) return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % ( ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday], d.tm_mday, delim, ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')[d.tm_mon - 1], delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec ) def cookie_date(expires=None): """Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current. """ return _dump_date(expires, '-') def http_date(timestamp=None): """Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current. """ return _dump_date(timestamp, ' ') def is_resource_modified(environ, etag=None, data=None, last_modified=None): """Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :return: `True` if the resource was modified, otherwise `False`. """ if etag is None and data is not None: etag = generate_etag(data) elif data is not None: raise TypeError('both data and etag given') if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'): return False unmodified = False if isinstance(last_modified, string_types): last_modified = parse_date(last_modified) # ensure that microsecond is zero because the HTTP spec does not transmit # that either and we might have some false positives. See issue #39 if last_modified is not None: last_modified = last_modified.replace(microsecond=0) modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE')) if modified_since and last_modified and last_modified <= modified_since: unmodified = True if etag: if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH')) if if_none_match: unmodified = if_none_match.contains_raw(etag) return not unmodified def remove_entity_headers(headers, allowed=('expires', 'content-location')): """Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers. """ allowed = set(x.lower() for x in allowed) headers[:] = [(key, value) for key, value in headers if not is_entity_header(key) or key.lower() in allowed] def remove_hop_by_hop_headers(headers): """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object. """ headers[:] = [(key, value) for key, value in headers if not is_hop_by_hop_header(key)] def is_entity_header(header): """Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _entity_headers def is_hop_by_hop_header(header): """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise. """ return header.lower() in _hop_by_hop_headers def parse_cookie(header, charset='utf-8', errors='replace', cls=None): """Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used. """ if isinstance(header, dict): header = header.get('HTTP_COOKIE', '') elif header is None: header = '' # If the value is an unicode string it's mangled through latin1. This # is done because on PEP 3333 on Python 3 all headers are assumed latin1 # which however is incorrect for cookies, which are sent in page encoding. # As a result we if isinstance(header, text_type): header = header.encode('latin1', 'replace') if cls is None: cls = TypeConversionDict def _parse_pairs(): for key, val in _cookie_parse_impl(header): key = to_unicode(key, charset, errors, allow_none_charset=True) val = to_unicode(val, charset, errors, allow_none_charset=True) yield try_coerce_native(key), val return cls(_parse_pairs()) def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, charset='utf-8', sync_expires=True): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) buf = [key + b'=' + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ((b'Domain', domain, True), (b'Expires', expires, False,), (b'Max-Age', max_age, False), (b'Secure', secure, None), (b'HttpOnly', httponly, None), (b'Path', path, False)): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b'=' + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b'; '.join(buf) if not PY2: rv = rv.decode('latin1') return rv def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length # circular dependency fun from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \ WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \ RequestCacheControl # DEPRECATED # backwards compatible imports from werkzeug.datastructures import MIMEAccept, CharsetAccept, \ LanguageAccept, Headers from werkzeug.urls import iri_to_uri
apache-2.0
NeuralEnsemble/neuroConstruct
lib/jython/Lib/re.py
153
12959
# # Secret Labs' Regular Expression Engine # # re-compatible interface for the sre matching engine # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # This version of the SRE library can be redistributed under CNRI's # Python 1.6 license. For any other use, please contact Secret Labs # AB (info@pythonware.com). # # Portions of this engine have been developed in cooperation with # CNRI. Hewlett-Packard provided funding for 1.6 integration and # other compatibility work. # r"""Support for regular expressions (RE). This module provides regular expression matching operations similar to those found in Perl. It supports both 8-bit and Unicode strings; both the pattern and the strings being processed can contain null bytes and characters outside the US ASCII range. Regular expressions can contain both special and ordinary characters. Most ordinary characters, like "A", "a", or "0", are the simplest regular expressions; they simply match themselves. You can concatenate ordinary characters, so last matches the string 'last'. The special characters are: "." Matches any character except a newline. "^" Matches the start of the string. "$" Matches the end of the string or just before the newline at the end of the string. "*" Matches 0 or more (greedy) repetitions of the preceding RE. Greedy means that it will match as many repetitions as possible. "+" Matches 1 or more (greedy) repetitions of the preceding RE. "?" Matches 0 or 1 (greedy) of the preceding RE. *?,+?,?? Non-greedy versions of the previous three special characters. {m,n} Matches from m to n repetitions of the preceding RE. {m,n}? Non-greedy version of the above. "\\" Either escapes special characters or signals a special sequence. [] Indicates a set of characters. A "^" as the first character indicates a complementing set. "|" A|B, creates an RE that will match either A or B. (...) Matches the RE inside the parentheses. The contents can be retrieved or matched later in the string. (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below). (?:...) Non-grouping version of regular parentheses. (?P<name>...) The substring matched by the group is accessible by name. (?P=name) Matches the text matched earlier by the group named name. (?#...) A comment; ignored. (?=...) Matches if ... matches next, but doesn't consume the string. (?!...) Matches if ... doesn't match next. (?<=...) Matches if preceded by ... (must be fixed length). (?<!...) Matches if not preceded by ... (must be fixed length). (?(id/name)yes|no) Matches yes pattern if the group with id/name matched, the (optional) no pattern otherwise. The special sequences consist of "\\" and a character from the list below. If the ordinary character is not on the list, then the resulting RE will match the second character. \number Matches the contents of the group of the same number. \A Matches only at the start of the string. \Z Matches only at the end of the string. \b Matches the empty string, but only at the start or end of a word. \B Matches the empty string, but not at the start or end of a word. \d Matches any decimal digit; equivalent to the set [0-9]. \D Matches any non-digit character; equivalent to the set [^0-9]. \s Matches any whitespace character; equivalent to [ \t\n\r\f\v]. \S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v]. \w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]. With LOCALE, it will match the set [0-9_] plus characters defined as letters for the current locale. \W Matches the complement of \w. \\ Matches a literal backslash. This module exports the following functions: match Match a regular expression pattern to the beginning of a string. search Search a string for the presence of a pattern. sub Substitute occurrences of a pattern found in a string. subn Same as sub, but also return the number of substitutions made. split Split a string by the occurrences of a pattern. findall Find all occurrences of a pattern in a string. finditer Return an iterator yielding a match object for each match. compile Compile a pattern into a RegexObject. purge Clear the regular expression cache. escape Backslash all non-alphanumerics in a string. Some of the functions in this module takes flags as optional parameters: I IGNORECASE Perform case-insensitive matching. L LOCALE Make \w, \W, \b, \B, dependent on the current locale. M MULTILINE "^" matches the beginning of lines (after a newline) as well as the string. "$" matches the end of lines (before a newline) as well as the end of the string. S DOTALL "." matches any character at all, including the newline. X VERBOSE Ignore whitespace and comments for nicer looking RE's. U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale. This module also defines an exception 'error'. """ import sys import sre_compile import sre_parse # public symbols __all__ = [ "match", "search", "sub", "subn", "split", "findall", "compile", "purge", "template", "escape", "I", "L", "M", "S", "X", "U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", "UNICODE", "error" ] __version__ = "2.2.1" # flags I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments # sre extensions (experimental, don't rely on these) T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation # sre exception error = sre_compile.error # -------------------------------------------------------------------- # public interface def match(pattern, string, flags=0): """Try to apply the pattern at the start of the string, returning a match object, or None if no match was found.""" return _compile(pattern, flags).match(string) def search(pattern, string, flags=0): """Scan through string looking for a match to the pattern, returning a match object, or None if no match was found.""" return _compile(pattern, flags).search(string) def sub(pattern, repl, string, count=0, flags=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement repl. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).sub(repl, string, count) def subn(pattern, repl, string, count=0, flags=0): """Return a 2-tuple containing (new_string, number). new_string is the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in the source string by the replacement repl. number is the number of substitutions that were made. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).subn(repl, string, count) def split(pattern, string, maxsplit=0, flags=0): """Split the source string by the occurrences of the pattern, returning a list containing the resulting substrings.""" return _compile(pattern, flags).split(string, maxsplit) def findall(pattern, string, flags=0): """Return a list of all non-overlapping matches in the string. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) if sys.hexversion >= 0x02020000: __all__.append("finditer") def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." return _compile(pattern, flags) def purge(): "Clear the regular expression cache" _cache.clear() _cache_repl.clear() def template(pattern, flags=0): "Compile a template pattern, returning a pattern object" return _compile(pattern, flags|T) _alphanum = frozenset( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") def escape(pattern): "Escape all non-alphanumeric characters in pattern." s = list(pattern) alphanum = _alphanum for i, c in enumerate(pattern): if c not in alphanum: if c == "\000": s[i] = "\\000" else: s[i] = "\\" + c return pattern[:0].join(s) # -------------------------------------------------------------------- # internals _cache = {} _cache_repl = {} _pattern_type = type(sre_compile.compile("", 0)) _MAXCACHE = 100 def _compile(*key): # internal: compile pattern cachekey = (type(key[0]),) + key p = _cache.get(cachekey) if p is not None: return p pattern, flags = key if isinstance(pattern, _pattern_type): if flags: raise ValueError('Cannot process flags argument with a compiled pattern') return pattern if not sre_compile.isstring(pattern): raise TypeError, "first argument must be string or compiled pattern" try: p = sre_compile.compile(pattern, flags) except error, v: raise error, v # invalid expression if len(_cache) >= _MAXCACHE: _cache.clear() _cache[cachekey] = p return p def _compile_repl(*key): # internal: compile replacement pattern p = _cache_repl.get(key) if p is not None: return p repl, pattern = key try: p = sre_parse.parse_template(repl, pattern) except error, v: raise error, v # invalid expression if len(_cache_repl) >= _MAXCACHE: _cache_repl.clear() _cache_repl[key] = p return p def _expand(pattern, match, template): # internal: match.expand implementation hook template = sre_parse.parse_template(template, pattern) return sre_parse.expand_template(template, match) def _subx(pattern, template): # internal: pattern.sub/subn implementation helper template = _compile_repl(template, pattern) if not template[0] and len(template[1]) == 1: # literal replacement return template[1][0] def filter(match, template=template): return sre_parse.expand_template(template, match) return filter # register myself for pickling import copy_reg def _pickle(p): return _compile, (p.pattern, p.flags) copy_reg.pickle(_pattern_type, _pickle, _compile) # -------------------------------------------------------------------- # experimental stuff (see python-dev discussions for details) class Scanner: def __init__(self, lexicon, flags=0): from sre_constants import BRANCH, SUBPATTERN self.lexicon = lexicon # combine phrases into a compound pattern p = [] s = sre_parse.Pattern() s.flags = flags for phrase, action in lexicon: p.append(sre_parse.SubPattern(s, [ (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), ])) s.groups = len(p)+1 p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def scan(self, string): result = [] append = result.append match = self.scanner.scanner(string).match i = 0 while 1: m = match() if not m: break j = m.end() if i == j: break action = self.lexicon[m.lastindex-1][1] if hasattr(action, '__call__'): self.match = m action = action(self, m.group()) if action is not None: append(action) i = j return result, string[i:]
gpl-2.0
mstriemer/addons-server
src/olympia/editors/views_themes.py
4
18329
import datetime import json from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db.models import Q from django.forms.formsets import formset_factory from django.shortcuts import get_object_or_404, redirect from django.utils.datastructures import MultiValueDictKeyError from django.utils.translation import ugettext as _, ungettext as ngettext from olympia import amo from olympia.constants import editors as rvw from olympia.access import acl from olympia.addons.models import Addon, Persona from olympia.amo.decorators import json_view, post_required from olympia.amo.urlresolvers import reverse from olympia.amo.utils import paginate, render from olympia.devhub.models import ActivityLog from olympia.editors import forms from olympia.editors.models import RereviewQueueTheme, ReviewerScore, ThemeLock from olympia.editors.views import base_context as context from olympia.search.views import name_only_query from olympia.zadmin.decorators import admin_required from .decorators import personas_reviewer_required QUEUE_PER_PAGE = 100 @personas_reviewer_required def home(request): data = context( reviews_total=ActivityLog.objects.total_reviews(theme=True)[:5], reviews_monthly=ActivityLog.objects.monthly_reviews(theme=True)[:5], queue_counts=queue_counts_themes(request) ) return render(request, 'editors/themes/home.html', data) def queue_counts_themes(request): counts = { 'themes': Persona.objects.no_cache() .filter(addon__status=amo.STATUS_PENDING) .count(), } if acl.action_allowed(request, 'SeniorPersonasTools', 'View'): counts.update({ 'flagged_themes': (Persona.objects.no_cache() .filter(addon__status=amo.STATUS_REVIEW_PENDING) .count()), 'rereview_themes': RereviewQueueTheme.objects.count() }) rv = {} if isinstance(type, basestring): return counts[type] for k, v in counts.items(): if not isinstance(type, list) or k in type: rv[k] = v return rv @personas_reviewer_required def themes_list(request, flagged=False, rereview=False): """Themes queue in list format.""" themes = [] if flagged: # TODO (ngoke): rename to STATUS_FLAGGED. themes = Addon.objects.filter(status=amo.STATUS_REVIEW_PENDING, type=amo.ADDON_PERSONA, persona__isnull=False) elif rereview: themes = [ rqt.theme.addon for rqt in RereviewQueueTheme.objects.select_related('theme__addon')] else: themes = Addon.objects.filter(status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA, persona__isnull=False) search_form = forms.ThemeSearchForm(request.GET) per_page = request.GET.get('per_page', QUEUE_PER_PAGE) pager = paginate(request, themes, per_page) return render(request, 'editors/themes/queue_list.html', context( **{'addons': pager.object_list, 'flagged': flagged, 'pager': pager, 'rereview': rereview, 'theme_search_form': search_form, 'statuses': dict((k, unicode(v)) for k, v in amo.STATUS_CHOICES_API.items()), 'tab': ('rereview_themes' if rereview else 'flagged_themes' if flagged else 'pending_themes')})) def _themes_queue(request, flagged=False, rereview=False): """Themes queue in interactive format.""" themes = _get_themes(request, request.user, flagged=flagged, rereview=rereview) ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset( initial=[{'theme': _rereview_to_theme(rereview, theme).id} for theme in themes]) return render(request, 'editors/themes/queue.html', context( **{'actions': get_actions_json(), 'formset': formset, 'flagged': flagged, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'rereview': rereview, 'reviewable': True, 'theme_formsets': zip(themes, formset), 'theme_count': len(themes), 'tab': ( 'flagged' if flagged else 'rereview' if rereview else 'pending')})) def _get_themes(request, reviewer, flagged=False, rereview=False): """Check out themes. :param flagged: Flagged themes (amo.STATUS_REVIEW_PENDING) :param rereview: Re-uploaded themes (RereviewQueueTheme) """ num = 0 themes = [] locks = [] status = (amo.STATUS_REVIEW_PENDING if flagged else amo.STATUS_PUBLIC if rereview else amo.STATUS_PENDING) if rereview: # Rereview themes. num, themes, locks = _get_rereview_themes(reviewer) else: # Pending and flagged themes. locks = ThemeLock.objects.no_cache().filter( reviewer=reviewer, theme__addon__status=status) num, themes = _calc_num_themes_checkout(locks) if themes: return themes themes = Persona.objects.no_cache().filter( addon__status=status, themelock=None) # Don't allow self-reviews. if (not settings.ALLOW_SELF_REVIEWS and not acl.action_allowed(request, 'Admin', '%')): if rereview: themes = themes.exclude(theme__addon__addonuser__user=reviewer) else: themes = themes.exclude(addon__addonuser__user=reviewer) # Check out themes by setting lock. themes = list(themes)[:num] expiry = get_updated_expiry() for theme in themes: ThemeLock.objects.create(theme=_rereview_to_theme(rereview, theme), reviewer=reviewer, expiry=expiry) # Empty pool? Go look for some expired locks. if not themes: expired_locks = ThemeLock.objects.filter( expiry__lte=datetime.datetime.now(), theme__addon__status=status)[:rvw.THEME_INITIAL_LOCKS] # Steal expired locks. for lock in expired_locks: lock.reviewer = reviewer lock.expiry = expiry lock.save() if expired_locks: locks = expired_locks if rereview: return (RereviewQueueTheme.objects.no_cache() .filter(theme__themelock__reviewer=reviewer) .exclude(theme__addon__status=amo.STATUS_REJECTED)) # New theme locks may have been created, grab all reviewer's themes again. return [lock.theme for lock in locks] @json_view @personas_reviewer_required def themes_search(request): search_form = forms.ThemeSearchForm(request.GET) if search_form.is_valid(): q = search_form.cleaned_data['q'] rereview = search_form.cleaned_data['queue_type'] == 'rereview' flagged = search_form.cleaned_data['queue_type'] == 'flagged' # ES query on name. themes = Addon.search().filter(type=amo.ADDON_PERSONA) if rereview: themes = themes.filter(has_theme_rereview=True) else: themes = themes.filter(status=(amo.STATUS_REVIEW_PENDING if flagged else amo.STATUS_PENDING), has_theme_rereview=False) themes = themes.query(or_=name_only_query(q))[:100] now = datetime.datetime.now() reviewers = [] for theme in themes: try: themelock = theme.persona.themelock if themelock.expiry > now: reviewers.append(themelock.reviewer.email) else: reviewers.append('') except ObjectDoesNotExist: reviewers.append('') themes = list(themes.values_dict('name', 'slug', 'status')) for theme, reviewer in zip(themes, reviewers): # Collapse single value fields from a list. theme['id'] = theme['id'][0] theme['slug'] = theme['slug'][0] theme['status'] = theme['status'][0] # Dehydrate. theme['reviewer'] = reviewer return {'objects': themes, 'meta': {'total_count': len(themes)}} @personas_reviewer_required def themes_queue(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_themes') return _themes_queue(request) @admin_required(theme_reviewers=True) def themes_queue_flagged(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_flagged') return _themes_queue(request, flagged=True) @admin_required(theme_reviewers=True) def themes_queue_rereview(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_rereview') return _themes_queue(request, rereview=True) def _rereview_to_theme(rereview, theme): """ Follows foreign key of RereviewQueueTheme object to theme if in rereview queue. """ if rereview: return theme.theme return theme def _calc_num_themes_checkout(locks): """ Calculate number of themes to check out based on how many themes user currently has checked out. """ current_num = locks.count() if current_num < rvw.THEME_INITIAL_LOCKS: # Check out themes from the pool if none or not enough checked out. return rvw.THEME_INITIAL_LOCKS - current_num, [] else: # Update the expiry on currently checked-out themes. locks.update(expiry=get_updated_expiry()) return 0, [lock.theme for lock in locks] def _get_rereview_themes(reviewer): """Check out re-uploaded themes.""" locks = (ThemeLock.objects.select_related().no_cache() .filter(reviewer=reviewer, theme__rereviewqueuetheme__isnull=False) .exclude(theme__addon__status=amo.STATUS_REJECTED)) num, updated_locks = _calc_num_themes_checkout(locks) if updated_locks: locks = updated_locks themes = (RereviewQueueTheme.objects.no_cache() .filter(theme__addon__isnull=False, theme__themelock=None) .exclude(theme__addon__status=amo.STATUS_REJECTED)) return num, themes, locks @post_required @personas_reviewer_required def themes_commit(request): ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset(request.POST) scores = [] for form in formset: try: lock = ThemeLock.objects.filter( theme_id=form.data[form.prefix + '-theme'], reviewer=request.user) except MultiValueDictKeyError: # Address off-by-one error caused by management form. continue if lock and form.is_valid(): scores.append(form.save()) # Success message. points = sum(scores) success = ngettext( # L10n: {0} is the number of reviews. {1} is the points just earned. # L10n: {2} is the total number of points the reviewer has overall. '{0} theme review successfully processed (+{1} points, {2} total).', '{0} theme reviews successfully processed (+{1} points, {2} total).', len(scores)).format(len(scores), points, ReviewerScore.get_total(request.user)) amo.messages.success(request, success) if 'theme_redirect_url' in request.session: return redirect(request.session['theme_redirect_url']) else: return redirect(reverse('editors.themes.queue_themes')) @personas_reviewer_required def release_locks(request): ThemeLock.objects.filter(reviewer=request.user).delete() amo.messages.success( request, _('Your theme locks have successfully been released. ' 'Other reviewers may now review those released themes. ' 'You may have to refresh the page to see the changes reflected in ' 'the table below.')) return redirect(reverse('editors.themes.list')) @personas_reviewer_required def themes_single(request, slug): """ Like a detail page, manually review a single theme if it is pending and isn't locked. """ reviewer = request.user reviewable = True # Don't review an already reviewed theme. theme = get_object_or_404(Persona, addon__slug=slug) if (theme.addon.status != amo.STATUS_PENDING and not theme.rereviewqueuetheme_set.all()): reviewable = False if (not settings.ALLOW_SELF_REVIEWS and not acl.action_allowed(request, 'Admin', '%') and theme.addon.has_author(request.user)): reviewable = False else: # Don't review a locked theme (that's not locked to self). try: lock = theme.themelock if (lock.reviewer.id != reviewer.id and lock.expiry > datetime.datetime.now()): reviewable = False elif (lock.reviewer.id != reviewer.id and lock.expiry < datetime.datetime.now()): # Steal expired lock. lock.reviewer = reviewer lock.expiry = get_updated_expiry() lock.save() else: # Update expiry. lock.expiry = get_updated_expiry() lock.save() except ThemeLock.DoesNotExist: # Create lock if not created. ThemeLock.objects.create(theme=theme, reviewer=reviewer, expiry=get_updated_expiry()) ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset(initial=[{'theme': theme.id}]) # Since we started the review on the single page, we want to return to the # single page rather than get shot back to the queue. request.session['theme_redirect_url'] = reverse('editors.themes.single', args=[theme.addon.slug]) rereview = (theme.rereviewqueuetheme_set.all()[0] if theme.rereviewqueuetheme_set.exists() else None) return render(request, 'editors/themes/single.html', context( **{'formset': formset, 'theme': rereview if rereview else theme, 'theme_formsets': zip([rereview if rereview else theme], formset), 'theme_reviews': paginate(request, ActivityLog.objects.filter( action=amo.LOG.THEME_REVIEW.id, _arguments__contains=theme.addon.id)), 'actions': get_actions_json(), 'theme_count': 1, 'rereview': rereview, 'reviewable': reviewable, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'action_dict': rvw.REVIEW_ACTIONS, 'tab': ('flagged' if theme.addon.status == amo.STATUS_REVIEW_PENDING else 'rereview' if rereview else 'pending')})) @personas_reviewer_required def themes_logs(request): data = request.GET.copy() if not data.get('start') and not data.get('end'): today = datetime.date.today() data['start'] = datetime.date(today.year, today.month, 1) form = forms.ReviewThemeLogForm(data) theme_logs = ActivityLog.objects.filter(action=amo.LOG.THEME_REVIEW.id) if form.is_valid(): data = form.cleaned_data if data.get('start'): theme_logs = theme_logs.filter(created__gte=data['start']) if data.get('end'): theme_logs = theme_logs.filter(created__lte=data['end']) if data.get('search'): term = data['search'] theme_logs = theme_logs.filter( Q(_details__icontains=term) | Q(user__display_name__icontains=term) | Q(user__username__icontains=term)).distinct() pager = paginate(request, theme_logs, 30) data = context(form=form, pager=pager, ACTION_DICT=rvw.REVIEW_ACTIONS, REJECT_REASONS=rvw.THEME_REJECT_REASONS, tab='themes') return render(request, 'editors/themes/logs.html', data) @admin_required(theme_reviewers=True) def deleted_themes(request): data = request.GET.copy() deleted = Addon.unfiltered.filter(type=amo.ADDON_PERSONA, status=amo.STATUS_DELETED) if not data.get('start') and not data.get('end'): today = datetime.date.today() data['start'] = datetime.date(today.year, today.month, 1) form = forms.DeletedThemeLogForm(data) if form.is_valid(): data = form.cleaned_data if data.get('start'): deleted = deleted.filter(modified__gte=data['start']) if data.get('end'): deleted = deleted.filter(modified__lte=data['end']) if data.get('search'): term = data['search'] deleted = deleted.filter( Q(name__localized_string__icontains=term)) return render(request, 'editors/themes/deleted.html', { 'form': form, 'pager': paginate(request, deleted.order_by('-modified'), 30), 'tab': 'deleted' }) @personas_reviewer_required def themes_history(request, username): if not username: username = request.user.username return render(request, 'editors/themes/history.html', context( **{'theme_reviews': paginate(request, ActivityLog.objects.filter( action=amo.LOG.THEME_REVIEW.id, user__username=username), 20), 'user_history': True, 'username': username, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'action_dict': rvw.REVIEW_ACTIONS})) def get_actions_json(): return json.dumps({ 'moreinfo': rvw.ACTION_MOREINFO, 'flag': rvw.ACTION_FLAG, 'duplicate': rvw.ACTION_DUPLICATE, 'reject': rvw.ACTION_REJECT, 'approve': rvw.ACTION_APPROVE, }) def get_updated_expiry(): return (datetime.datetime.now() + datetime.timedelta(minutes=rvw.THEME_LOCK_EXPIRY))
bsd-3-clause
Cinntax/home-assistant
tests/components/cloud/test_init.py
2
7144
"""Test the cloud component.""" from unittest.mock import patch import pytest from homeassistant.core import Context from homeassistant.exceptions import Unauthorized from homeassistant.auth.const import GROUP_ID_ADMIN from homeassistant.components import cloud from homeassistant.components.cloud.const import DOMAIN from homeassistant.components.cloud.prefs import STORAGE_KEY from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP from homeassistant.setup import async_setup_component from tests.common import mock_coro async def test_constructor_loads_info_from_config(hass): """Test non-dev mode loads info from SERVERS constant.""" with patch("hass_nabucasa.Cloud.start", return_value=mock_coro()): result = await async_setup_component( hass, "cloud", { "http": {}, "cloud": { cloud.CONF_MODE: cloud.MODE_DEV, "cognito_client_id": "test-cognito_client_id", "user_pool_id": "test-user_pool_id", "region": "test-region", "relayer": "test-relayer", }, }, ) assert result cl = hass.data["cloud"] assert cl.mode == cloud.MODE_DEV assert cl.cognito_client_id == "test-cognito_client_id" assert cl.user_pool_id == "test-user_pool_id" assert cl.region == "test-region" assert cl.relayer == "test-relayer" async def test_remote_services(hass, mock_cloud_fixture, hass_read_only_user): """Setup cloud component and test services.""" cloud = hass.data[DOMAIN] assert hass.services.has_service(DOMAIN, "remote_connect") assert hass.services.has_service(DOMAIN, "remote_disconnect") with patch( "hass_nabucasa.remote.RemoteUI.connect", return_value=mock_coro() ) as mock_connect: await hass.services.async_call(DOMAIN, "remote_connect", blocking=True) assert mock_connect.called assert cloud.client.remote_autostart with patch( "hass_nabucasa.remote.RemoteUI.disconnect", return_value=mock_coro() ) as mock_disconnect: await hass.services.async_call(DOMAIN, "remote_disconnect", blocking=True) assert mock_disconnect.called assert not cloud.client.remote_autostart # Test admin access required non_admin_context = Context(user_id=hass_read_only_user.id) with patch( "hass_nabucasa.remote.RemoteUI.connect", return_value=mock_coro() ) as mock_connect, pytest.raises(Unauthorized): await hass.services.async_call( DOMAIN, "remote_connect", blocking=True, context=non_admin_context ) assert mock_connect.called is False with patch( "hass_nabucasa.remote.RemoteUI.disconnect", return_value=mock_coro() ) as mock_disconnect, pytest.raises(Unauthorized): await hass.services.async_call( DOMAIN, "remote_disconnect", blocking=True, context=non_admin_context ) assert mock_disconnect.called is False async def test_startup_shutdown_events(hass, mock_cloud_fixture): """Test if the cloud will start on startup event.""" with patch("hass_nabucasa.Cloud.start", return_value=mock_coro()) as mock_start: hass.bus.async_fire(EVENT_HOMEASSISTANT_START) await hass.async_block_till_done() assert mock_start.called with patch("hass_nabucasa.Cloud.stop", return_value=mock_coro()) as mock_stop: hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP) await hass.async_block_till_done() assert mock_stop.called async def test_setup_existing_cloud_user(hass, hass_storage): """Test setup with API push default data.""" user = await hass.auth.async_create_system_user("Cloud test") hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": user.id}} with patch("hass_nabucasa.Cloud.start", return_value=mock_coro()): result = await async_setup_component( hass, "cloud", { "http": {}, "cloud": { cloud.CONF_MODE: cloud.MODE_DEV, "cognito_client_id": "test-cognito_client_id", "user_pool_id": "test-user_pool_id", "region": "test-region", "relayer": "test-relayer", }, }, ) assert result assert hass_storage[STORAGE_KEY]["data"]["cloud_user"] == user.id async def test_setup_invalid_cloud_user(hass, hass_storage): """Test setup with API push default data.""" hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": "non-existing"}} with patch("hass_nabucasa.Cloud.start", return_value=mock_coro()): result = await async_setup_component( hass, "cloud", { "http": {}, "cloud": { cloud.CONF_MODE: cloud.MODE_DEV, "cognito_client_id": "test-cognito_client_id", "user_pool_id": "test-user_pool_id", "region": "test-region", "relayer": "test-relayer", }, }, ) assert result assert hass_storage[STORAGE_KEY]["data"]["cloud_user"] != "non-existing" cloud_user = await hass.auth.async_get_user( hass_storage[STORAGE_KEY]["data"]["cloud_user"] ) assert cloud_user assert cloud_user.groups[0].id == GROUP_ID_ADMIN async def test_setup_setup_cloud_user(hass, hass_storage): """Test setup with API push default data.""" hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": None}} with patch("hass_nabucasa.Cloud.start", return_value=mock_coro()): result = await async_setup_component( hass, "cloud", { "http": {}, "cloud": { cloud.CONF_MODE: cloud.MODE_DEV, "cognito_client_id": "test-cognito_client_id", "user_pool_id": "test-user_pool_id", "region": "test-region", "relayer": "test-relayer", }, }, ) assert result cloud_user = await hass.auth.async_get_user( hass_storage[STORAGE_KEY]["data"]["cloud_user"] ) assert cloud_user assert cloud_user.groups[0].id == GROUP_ID_ADMIN async def test_on_connect(hass, mock_cloud_fixture): """Test cloud on connect triggers.""" cl = hass.data["cloud"] assert len(cl.iot._on_connect) == 4 assert len(hass.states.async_entity_ids("binary_sensor")) == 0 assert "async_setup" in str(cl.iot._on_connect[-1]) await cl.iot._on_connect[-1]() await hass.async_block_till_done() assert len(hass.states.async_entity_ids("binary_sensor")) == 1 with patch( "homeassistant.helpers.discovery.async_load_platform", side_effect=mock_coro ) as mock_load: await cl.iot._on_connect[-1]() await hass.async_block_till_done() assert len(mock_load.mock_calls) == 0
apache-2.0
SiccarPoint/numpy
numpy/lib/tests/test__datasource.py
68
10445
from __future__ import division, absolute_import, print_function import os import sys from tempfile import mkdtemp, mkstemp, NamedTemporaryFile from shutil import rmtree from numpy.compat import asbytes from numpy.testing import ( run_module_suite, TestCase, assert_, SkipTest ) import numpy.lib._datasource as datasource if sys.version_info[0] >= 3: import urllib.request as urllib_request from urllib.parse import urlparse from urllib.error import URLError else: import urllib2 as urllib_request from urlparse import urlparse from urllib2 import URLError def urlopen_stub(url, data=None): '''Stub to replace urlopen for testing.''' if url == valid_httpurl(): tmpfile = NamedTemporaryFile(prefix='urltmp_') return tmpfile else: raise URLError('Name or service not known') # setup and teardown old_urlopen = None def setup(): global old_urlopen old_urlopen = urllib_request.urlopen urllib_request.urlopen = urlopen_stub def teardown(): urllib_request.urlopen = old_urlopen # A valid website for more robust testing http_path = 'http://www.google.com/' http_file = 'index.html' http_fakepath = 'http://fake.abc.web/site/' http_fakefile = 'fake.txt' malicious_files = ['/etc/shadow', '../../shadow', '..\\system.dat', 'c:\\windows\\system.dat'] magic_line = asbytes('three is the magic number') # Utility functions used by many TestCases def valid_textfile(filedir): # Generate and return a valid temporary file. fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) os.close(fd) return path def invalid_textfile(filedir): # Generate and return an invalid filename. fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) os.close(fd) os.remove(path) return path def valid_httpurl(): return http_path+http_file def invalid_httpurl(): return http_fakepath+http_fakefile def valid_baseurl(): return http_path def invalid_baseurl(): return http_fakepath def valid_httpfile(): return http_file def invalid_httpfile(): return http_fakefile class TestDataSourceOpen(TestCase): def setUp(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) def tearDown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): fh = self.ds.open(valid_httpurl()) assert_(fh) fh.close() def test_InvalidHTTP(self): url = invalid_httpurl() self.assertRaises(IOError, self.ds.open, url) try: self.ds.open(url) except IOError as e: # Regression test for bug fixed in r4342. assert_(e.errno is None) def test_InvalidHTTPCacheURLError(self): self.assertRaises(URLError, self.ds._cache, invalid_httpurl()) def test_ValidFile(self): local_file = valid_textfile(self.tmpdir) fh = self.ds.open(local_file) assert_(fh) fh.close() def test_InvalidFile(self): invalid_file = invalid_textfile(self.tmpdir) self.assertRaises(IOError, self.ds.open, invalid_file) def test_ValidGzipFile(self): try: import gzip except ImportError: # We don't have the gzip capabilities to test. raise SkipTest # Test datasource's internal file_opener for Gzip files. filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') fp = gzip.open(filepath, 'w') fp.write(magic_line) fp.close() fp = self.ds.open(filepath) result = fp.readline() fp.close() self.assertEqual(magic_line, result) def test_ValidBz2File(self): try: import bz2 except ImportError: # We don't have the bz2 capabilities to test. raise SkipTest # Test datasource's internal file_opener for BZip2 files. filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') fp = bz2.BZ2File(filepath, 'w') fp.write(magic_line) fp.close() fp = self.ds.open(filepath) result = fp.readline() fp.close() self.assertEqual(magic_line, result) class TestDataSourceExists(TestCase): def setUp(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) def tearDown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): assert_(self.ds.exists(valid_httpurl())) def test_InvalidHTTP(self): self.assertEqual(self.ds.exists(invalid_httpurl()), False) def test_ValidFile(self): # Test valid file in destpath tmpfile = valid_textfile(self.tmpdir) assert_(self.ds.exists(tmpfile)) # Test valid local file not in destpath localdir = mkdtemp() tmpfile = valid_textfile(localdir) assert_(self.ds.exists(tmpfile)) rmtree(localdir) def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) self.assertEqual(self.ds.exists(tmpfile), False) class TestDataSourceAbspath(TestCase): def setUp(self): self.tmpdir = os.path.abspath(mkdtemp()) self.ds = datasource.DataSource(self.tmpdir) def tearDown(self): rmtree(self.tmpdir) del self.ds def test_ValidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) local_path = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) self.assertEqual(local_path, self.ds.abspath(valid_httpurl())) def test_ValidFile(self): tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only self.assertEqual(tmpfile, self.ds.abspath(tmpfilename)) # Test filename with complete path self.assertEqual(tmpfile, self.ds.abspath(tmpfile)) def test_InvalidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) invalidhttp = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl())) def test_InvalidFile(self): invalidfile = valid_textfile(self.tmpdir) tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename)) # Test filename with complete path self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile)) def test_sandboxing(self): tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) assert_(tmp_path(tmpfile).startswith(self.tmpdir)) assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) for fn in malicious_files: assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) assert_(tmp_path(fn).startswith(self.tmpdir)) def test_windows_os_sep(self): orig_os_sep = os.sep try: os.sep = '\\' self.test_ValidHTTP() self.test_ValidFile() self.test_InvalidHTTP() self.test_InvalidFile() self.test_sandboxing() finally: os.sep = orig_os_sep class TestRepositoryAbspath(TestCase): def setUp(self): self.tmpdir = os.path.abspath(mkdtemp()) self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) def tearDown(self): rmtree(self.tmpdir) del self.repos def test_ValidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) local_path = os.path.join(self.repos._destpath, netloc, upath.strip(os.sep).strip('/')) filepath = self.repos.abspath(valid_httpfile()) self.assertEqual(local_path, filepath) def test_sandboxing(self): tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) for fn in malicious_files: assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) assert_(tmp_path(fn).startswith(self.tmpdir)) def test_windows_os_sep(self): orig_os_sep = os.sep try: os.sep = '\\' self.test_ValidHTTP() self.test_sandboxing() finally: os.sep = orig_os_sep class TestRepositoryExists(TestCase): def setUp(self): self.tmpdir = mkdtemp() self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) def tearDown(self): rmtree(self.tmpdir) del self.repos def test_ValidFile(self): # Create local temp file tmpfile = valid_textfile(self.tmpdir) assert_(self.repos.exists(tmpfile)) def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) self.assertEqual(self.repos.exists(tmpfile), False) def test_RemoveHTTPFile(self): assert_(self.repos.exists(valid_httpurl())) def test_CachedHTTPFile(self): localfile = valid_httpurl() # Create a locally cached temp file with an URL based # directory structure. This is similar to what Repository.open # would do. scheme, netloc, upath, pms, qry, frg = urlparse(localfile) local_path = os.path.join(self.repos._destpath, netloc) os.mkdir(local_path, 0o0700) tmpfile = valid_textfile(local_path) assert_(self.repos.exists(tmpfile)) class TestOpenFunc(TestCase): def setUp(self): self.tmpdir = mkdtemp() def tearDown(self): rmtree(self.tmpdir) def test_DataSourceOpen(self): local_file = valid_textfile(self.tmpdir) # Test case where destpath is passed in fp = datasource.open(local_file, destpath=self.tmpdir) assert_(fp) fp.close() # Test case where default destpath is used fp = datasource.open(local_file) assert_(fp) fp.close() if __name__ == "__main__": run_module_suite()
bsd-3-clause
raiden-network/raiden
raiden/utils/upgrades.py
1
8374
import os import sqlite3 import sys from contextlib import closing from glob import escape, glob from pathlib import Path import filelock import structlog from raiden.constants import RAIDEN_DB_VERSION from raiden.storage.sqlite import SQLiteStorage from raiden.storage.versions import VERSION_RE, filter_db_names, latest_db_file from raiden.utils.typing import Any, Callable, DatabasePath, List, NamedTuple class UpgradeRecord(NamedTuple): from_version: int function: Callable UPGRADES_LIST: List[UpgradeRecord] = [] log = structlog.get_logger(__name__) def get_file_lock(db_filename: Path) -> filelock.FileLock: lock_file_name = f"{db_filename}.lock" return filelock.FileLock(lock_file_name) def update_version(storage: SQLiteStorage, version: int) -> None: cursor = storage.conn.cursor() cursor.execute( 'INSERT OR REPLACE INTO settings(name, value) VALUES("version", ?)', (str(version),) ) def get_file_version(db_path: Path) -> int: match = VERSION_RE.match(os.path.basename(db_path)) assert match, f'Database name "{db_path}" does not match our format' file_version = int(match.group(1)) return file_version def get_db_version(db_filename: Path) -> int: """Return the version value stored in the db""" msg = f"Path '{db_filename}' expected, but not found" assert os.path.exists(db_filename), msg # Perform a query directly through SQL rather than using # storage.get_version() # as get_version will return the latest version if it doesn't # find a record in the database. conn = sqlite3.connect(str(db_filename), detect_types=sqlite3.PARSE_DECLTYPES) cursor = conn.cursor() try: cursor.execute('SELECT value FROM settings WHERE name="version";') result = cursor.fetchone() except sqlite3.OperationalError: raise RuntimeError("Corrupted database. Database does not the settings table.") if not result: raise RuntimeError( "Corrupted database. Settings table does not contain an entry the db version." ) return int(result[0]) def _copy(old_db_filename: Path, current_db_filename: Path) -> None: old_conn = sqlite3.connect(old_db_filename, detect_types=sqlite3.PARSE_DECLTYPES) current_conn = sqlite3.connect(current_db_filename, detect_types=sqlite3.PARSE_DECLTYPES) with closing(old_conn), closing(current_conn): old_conn.backup(current_conn) def delete_dbs_with_failed_migrations(valid_db_names: List[Path]) -> None: for db_path in valid_db_names: file_version = get_file_version(db_path) with get_file_lock(db_path): db_version = get_db_version(db_path) # The version matches, nothing to do. if db_version == file_version: continue elif db_version > file_version: raise RuntimeError( f"Impossible database version. " f"The database {db_path} has too high a version ({db_version}), " f"this should never happen." ) # The version number in the database is smaller then the current # target, this means that a migration failed to execute and the db # is partially upgraded. else: os.remove(db_path) class UpgradeManager: """Run migrations when a database upgrade is necessary. Skip the upgrade if either: - There is no previous DB - There is a current DB file and the version in settings matches. Upgrade procedure: - Delete corrupted databases. - Copy the old file to the latest version (e.g. copy version v16 as v18). - In a transaction: Run every migration. Each migration must decide whether to proceed or not. """ def __init__(self, db_filename: DatabasePath, **kwargs: Any) -> None: base_name = os.path.basename(db_filename) match = VERSION_RE.match(base_name) assert match, f'Database name "{base_name}" does not match our format' self._current_db_filename = Path(db_filename) self._kwargs = kwargs def run(self) -> None: # First clear up any partially upgraded databases. # # A database will be partially upgraded if the process receives a # SIGKILL/SIGINT while executing migrations. NOTE: It's very probable # the content of the database remains consistent, because the upgrades # are executed inside a migration, however making a second copy of the # database does no harm. escaped_path = escape(str(self._current_db_filename.parent)) paths = glob(f"{escaped_path}/v*_log.db") valid_db_names = filter_db_names(paths) delete_dbs_with_failed_migrations(valid_db_names) # At this point we know every file version and db version match # (assuming there are no concurrent runs). paths = glob(f"{escaped_path}/v*_log.db") valid_db_names = filter_db_names(paths) latest_db_path = latest_db_file(valid_db_names) # First run, there is no database file available if latest_db_path is None: return file_version = get_file_version(latest_db_path) # The latest version matches our target version, nothing to do. if file_version == RAIDEN_DB_VERSION: return if file_version > RAIDEN_DB_VERSION: raise RuntimeError( f"Conflicting database versions detected, latest db version is v{file_version}, " f"Raiden client version is v{RAIDEN_DB_VERSION}." f"\n\n" f"Running a downgraded version of Raiden after an upgrade is not supported, " f"because the transfers done with the new client are not understandable by the " f"older." ) if RAIDEN_DB_VERSION >= 27 and file_version <= 26 and file_version > 1: msg = ( f"Your Raiden database is version {file_version} and there is no compatible " f"migration to version {RAIDEN_DB_VERSION} available.\n" "You need to either start a new Raiden node with a different account, or " "close and settle all channels, and start over with a fresh database.\n\n" "More information on this topic at " "https://raiden-network.readthedocs.io/en/latest/other/known-issues.html" "#database-upgrades\n\n" "If you are on **mainnet** and affected by this, please create an issue at " "https://github.com/raiden-network/raiden/issues/new?title=Mainnet%20Migration%20" f"{file_version}%20{RAIDEN_DB_VERSION}" ) log.warning(msg) sys.exit(msg) self._upgrade( target_file=self._current_db_filename, from_file=latest_db_path, from_version=file_version, ) def _upgrade(self, target_file: Path, from_file: Path, from_version: int) -> None: with get_file_lock(from_file), get_file_lock(target_file): _copy(from_file, target_file) # Only instantiate `SQLiteStorage` after the copy. Otherwise # `_copy` will deadlock because only one connection is allowed to # `target_file`. with SQLiteStorage(target_file) as storage: log.debug(f"Upgrading database from v{from_version} to v{RAIDEN_DB_VERSION}") try: version_iteration = from_version with storage.transaction(): for upgrade_record in UPGRADES_LIST: if upgrade_record.from_version < from_version: continue version_iteration = upgrade_record.function( storage=storage, old_version=version_iteration, current_version=RAIDEN_DB_VERSION, **self._kwargs, ) update_version(storage, RAIDEN_DB_VERSION) except BaseException as e: log.error(f"Failed to upgrade database: {e}") raise
mit
valkjsaaa/sl4a
python/src/Lib/plat-linux2/CDROM.py
330
5035
# Generated by h2py from /usr/include/linux/cdrom.h CDROMPAUSE = 0x5301 CDROMRESUME = 0x5302 CDROMPLAYMSF = 0x5303 CDROMPLAYTRKIND = 0x5304 CDROMREADTOCHDR = 0x5305 CDROMREADTOCENTRY = 0x5306 CDROMSTOP = 0x5307 CDROMSTART = 0x5308 CDROMEJECT = 0x5309 CDROMVOLCTRL = 0x530a CDROMSUBCHNL = 0x530b CDROMREADMODE2 = 0x530c CDROMREADMODE1 = 0x530d CDROMREADAUDIO = 0x530e CDROMEJECT_SW = 0x530f CDROMMULTISESSION = 0x5310 CDROM_GET_MCN = 0x5311 CDROM_GET_UPC = CDROM_GET_MCN CDROMRESET = 0x5312 CDROMVOLREAD = 0x5313 CDROMREADRAW = 0x5314 CDROMREADCOOKED = 0x5315 CDROMSEEK = 0x5316 CDROMPLAYBLK = 0x5317 CDROMREADALL = 0x5318 CDROMGETSPINDOWN = 0x531d CDROMSETSPINDOWN = 0x531e CDROMCLOSETRAY = 0x5319 CDROM_SET_OPTIONS = 0x5320 CDROM_CLEAR_OPTIONS = 0x5321 CDROM_SELECT_SPEED = 0x5322 CDROM_SELECT_DISC = 0x5323 CDROM_MEDIA_CHANGED = 0x5325 CDROM_DRIVE_STATUS = 0x5326 CDROM_DISC_STATUS = 0x5327 CDROM_CHANGER_NSLOTS = 0x5328 CDROM_LOCKDOOR = 0x5329 CDROM_DEBUG = 0x5330 CDROM_GET_CAPABILITY = 0x5331 CDROMAUDIOBUFSIZ = 0x5382 DVD_READ_STRUCT = 0x5390 DVD_WRITE_STRUCT = 0x5391 DVD_AUTH = 0x5392 CDROM_SEND_PACKET = 0x5393 CDROM_NEXT_WRITABLE = 0x5394 CDROM_LAST_WRITTEN = 0x5395 CDROM_PACKET_SIZE = 12 CGC_DATA_UNKNOWN = 0 CGC_DATA_WRITE = 1 CGC_DATA_READ = 2 CGC_DATA_NONE = 3 CD_MINS = 74 CD_SECS = 60 CD_FRAMES = 75 CD_SYNC_SIZE = 12 CD_MSF_OFFSET = 150 CD_CHUNK_SIZE = 24 CD_NUM_OF_CHUNKS = 98 CD_FRAMESIZE_SUB = 96 CD_HEAD_SIZE = 4 CD_SUBHEAD_SIZE = 8 CD_EDC_SIZE = 4 CD_ZERO_SIZE = 8 CD_ECC_SIZE = 276 CD_FRAMESIZE = 2048 CD_FRAMESIZE_RAW = 2352 CD_FRAMESIZE_RAWER = 2646 CD_FRAMESIZE_RAW1 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE) CD_FRAMESIZE_RAW0 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE) CD_XA_HEAD = (CD_HEAD_SIZE+CD_SUBHEAD_SIZE) CD_XA_TAIL = (CD_EDC_SIZE+CD_ECC_SIZE) CD_XA_SYNC_HEAD = (CD_SYNC_SIZE+CD_XA_HEAD) CDROM_LBA = 0x01 CDROM_MSF = 0x02 CDROM_DATA_TRACK = 0x04 CDROM_LEADOUT = 0xAA CDROM_AUDIO_INVALID = 0x00 CDROM_AUDIO_PLAY = 0x11 CDROM_AUDIO_PAUSED = 0x12 CDROM_AUDIO_COMPLETED = 0x13 CDROM_AUDIO_ERROR = 0x14 CDROM_AUDIO_NO_STATUS = 0x15 CDC_CLOSE_TRAY = 0x1 CDC_OPEN_TRAY = 0x2 CDC_LOCK = 0x4 CDC_SELECT_SPEED = 0x8 CDC_SELECT_DISC = 0x10 CDC_MULTI_SESSION = 0x20 CDC_MCN = 0x40 CDC_MEDIA_CHANGED = 0x80 CDC_PLAY_AUDIO = 0x100 CDC_RESET = 0x200 CDC_IOCTLS = 0x400 CDC_DRIVE_STATUS = 0x800 CDC_GENERIC_PACKET = 0x1000 CDC_CD_R = 0x2000 CDC_CD_RW = 0x4000 CDC_DVD = 0x8000 CDC_DVD_R = 0x10000 CDC_DVD_RAM = 0x20000 CDS_NO_INFO = 0 CDS_NO_DISC = 1 CDS_TRAY_OPEN = 2 CDS_DRIVE_NOT_READY = 3 CDS_DISC_OK = 4 CDS_AUDIO = 100 CDS_DATA_1 = 101 CDS_DATA_2 = 102 CDS_XA_2_1 = 103 CDS_XA_2_2 = 104 CDS_MIXED = 105 CDO_AUTO_CLOSE = 0x1 CDO_AUTO_EJECT = 0x2 CDO_USE_FFLAGS = 0x4 CDO_LOCK = 0x8 CDO_CHECK_TYPE = 0x10 CD_PART_MAX = 64 CD_PART_MASK = (CD_PART_MAX - 1) GPCMD_BLANK = 0xa1 GPCMD_CLOSE_TRACK = 0x5b GPCMD_FLUSH_CACHE = 0x35 GPCMD_FORMAT_UNIT = 0x04 GPCMD_GET_CONFIGURATION = 0x46 GPCMD_GET_EVENT_STATUS_NOTIFICATION = 0x4a GPCMD_GET_PERFORMANCE = 0xac GPCMD_INQUIRY = 0x12 GPCMD_LOAD_UNLOAD = 0xa6 GPCMD_MECHANISM_STATUS = 0xbd GPCMD_MODE_SELECT_10 = 0x55 GPCMD_MODE_SENSE_10 = 0x5a GPCMD_PAUSE_RESUME = 0x4b GPCMD_PLAY_AUDIO_10 = 0x45 GPCMD_PLAY_AUDIO_MSF = 0x47 GPCMD_PLAY_AUDIO_TI = 0x48 GPCMD_PLAY_CD = 0xbc GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e GPCMD_READ_10 = 0x28 GPCMD_READ_12 = 0xa8 GPCMD_READ_CDVD_CAPACITY = 0x25 GPCMD_READ_CD = 0xbe GPCMD_READ_CD_MSF = 0xb9 GPCMD_READ_DISC_INFO = 0x51 GPCMD_READ_DVD_STRUCTURE = 0xad GPCMD_READ_FORMAT_CAPACITIES = 0x23 GPCMD_READ_HEADER = 0x44 GPCMD_READ_TRACK_RZONE_INFO = 0x52 GPCMD_READ_SUBCHANNEL = 0x42 GPCMD_READ_TOC_PMA_ATIP = 0x43 GPCMD_REPAIR_RZONE_TRACK = 0x58 GPCMD_REPORT_KEY = 0xa4 GPCMD_REQUEST_SENSE = 0x03 GPCMD_RESERVE_RZONE_TRACK = 0x53 GPCMD_SCAN = 0xba GPCMD_SEEK = 0x2b GPCMD_SEND_DVD_STRUCTURE = 0xad GPCMD_SEND_EVENT = 0xa2 GPCMD_SEND_KEY = 0xa3 GPCMD_SEND_OPC = 0x54 GPCMD_SET_READ_AHEAD = 0xa7 GPCMD_SET_STREAMING = 0xb6 GPCMD_START_STOP_UNIT = 0x1b GPCMD_STOP_PLAY_SCAN = 0x4e GPCMD_TEST_UNIT_READY = 0x00 GPCMD_VERIFY_10 = 0x2f GPCMD_WRITE_10 = 0x2a GPCMD_WRITE_AND_VERIFY_10 = 0x2e GPCMD_SET_SPEED = 0xbb GPCMD_PLAYAUDIO_TI = 0x48 GPCMD_GET_MEDIA_STATUS = 0xda GPMODE_R_W_ERROR_PAGE = 0x01 GPMODE_WRITE_PARMS_PAGE = 0x05 GPMODE_AUDIO_CTL_PAGE = 0x0e GPMODE_POWER_PAGE = 0x1a GPMODE_FAULT_FAIL_PAGE = 0x1c GPMODE_TO_PROTECT_PAGE = 0x1d GPMODE_CAPABILITIES_PAGE = 0x2a GPMODE_ALL_PAGES = 0x3f GPMODE_CDROM_PAGE = 0x0d DVD_STRUCT_PHYSICAL = 0x00 DVD_STRUCT_COPYRIGHT = 0x01 DVD_STRUCT_DISCKEY = 0x02 DVD_STRUCT_BCA = 0x03 DVD_STRUCT_MANUFACT = 0x04 DVD_LAYERS = 4 DVD_LU_SEND_AGID = 0 DVD_HOST_SEND_CHALLENGE = 1 DVD_LU_SEND_KEY1 = 2 DVD_LU_SEND_CHALLENGE = 3 DVD_HOST_SEND_KEY2 = 4 DVD_AUTH_ESTABLISHED = 5 DVD_AUTH_FAILURE = 6 DVD_LU_SEND_TITLE_KEY = 7 DVD_LU_SEND_ASF = 8 DVD_INVALIDATE_AGID = 9 DVD_LU_SEND_RPC_STATE = 10 DVD_HOST_SEND_RPC_STATE = 11 DVD_CPM_NO_COPYRIGHT = 0 DVD_CPM_COPYRIGHTED = 1 DVD_CP_SEC_NONE = 0 DVD_CP_SEC_EXIST = 1 DVD_CGMS_UNRESTRICTED = 0 DVD_CGMS_SINGLE = 2 DVD_CGMS_RESTRICTED = 3 CDROM_MAX_SLOTS = 256
apache-2.0
theolind/home-assistant
homeassistant/components/sensor/transmission.py
8
5511
""" homeassistant.components.sensor.transmission ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Monitors Transmission BitTorrent client API Configuration: To use the Transmission sensor you will need to add something like the following to your config/configuration.yaml sensor: platform: transmission name: Transmission host: 192.168.1.26 port: 9091 username: YOUR_USERNAME password: YOUR_PASSWORD monitored_variables: - type: 'current_status' - type: 'download_speed' - type: 'upload_speed' Variables: host *Required This is the IP address of your Transmission daemon. Example: 192.168.1.32 port *Optional The port your Transmission daemon uses, defaults to 9091. Example: 8080 username *Required Your Transmission username password *Required Your Transmission password name *Optional The name to use when displaying this Transmission instance. monitored_variables *Required An array specifying the variables to monitor. These are the variables for the monitored_variables array: type *Required The variable you wish to monitor, see the configuration example above for a list of all available variables. """ from homeassistant.util import Throttle from datetime import timedelta from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD from homeassistant.helpers.entity import Entity # pylint: disable=no-name-in-module, import-error import transmissionrpc from transmissionrpc.error import TransmissionError import logging REQUIREMENTS = ['transmissionrpc>=0.11'] SENSOR_TYPES = { 'current_status': ['Status', ''], 'download_speed': ['Down Speed', 'MB/s'], 'upload_speed': ['Up Speed', 'MB/s'] } _LOGGER = logging.getLogger(__name__) _THROTTLED_REFRESH = None # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up the sensors. """ host = config.get(CONF_HOST) username = config.get(CONF_USERNAME, None) password = config.get(CONF_PASSWORD, None) port = config.get('port', 9091) name = config.get("name", "Transmission") if not host: _LOGGER.error('Missing config variable %s', CONF_HOST) return False # import logging # logging.getLogger('transmissionrpc').setLevel(logging.DEBUG) transmission_api = transmissionrpc.Client( host, port=port, user=username, password=password) try: transmission_api.session_stats() except TransmissionError: _LOGGER.exception("Connection to Transmission API failed.") return False # pylint: disable=global-statement global _THROTTLED_REFRESH _THROTTLED_REFRESH = Throttle(timedelta(seconds=1))( transmission_api.session_stats) dev = [] for variable in config['monitored_variables']: if variable['type'] not in SENSOR_TYPES: _LOGGER.error('Sensor type: "%s" does not exist', variable['type']) else: dev.append(TransmissionSensor( variable['type'], transmission_api, name)) add_devices(dev) class TransmissionSensor(Entity): """ A Transmission sensor. """ def __init__(self, sensor_type, transmission_client, client_name): self._name = SENSOR_TYPES[sensor_type][0] self.transmission_client = transmission_client self.type = sensor_type self.client_name = client_name self._state = None self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] @property def name(self): return self.client_name + ' ' + self._name @property def state(self): """ Returns the state of the device. """ return self._state @property def unit_of_measurement(self): """ Unit of measurement of this entity, if any. """ return self._unit_of_measurement def refresh_transmission_data(self): """ Calls the throttled Transmission refresh method. """ if _THROTTLED_REFRESH is not None: try: _THROTTLED_REFRESH() except TransmissionError: _LOGGER.exception( self.name + " Connection to Transmission API failed." ) def update(self): """ Gets the latest data from Transmission and updates the state. """ self.refresh_transmission_data() if self.type == 'current_status': if self.transmission_client.session: upload = self.transmission_client.session.uploadSpeed download = self.transmission_client.session.downloadSpeed if upload > 0 and download > 0: self._state = 'Up/Down' elif upload > 0 and download == 0: self._state = 'Seeding' elif upload == 0 and download > 0: self._state = 'Downloading' else: self._state = 'Idle' else: self._state = 'Unknown' if self.transmission_client.session: if self.type == 'download_speed': mb_spd = float(self.transmission_client.session.downloadSpeed) mb_spd = mb_spd / 1024 / 1024 self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1) elif self.type == 'upload_speed': mb_spd = float(self.transmission_client.session.uploadSpeed) mb_spd = mb_spd / 1024 / 1024 self._state = round(mb_spd, 2 if mb_spd < 0.1 else 1)
mit
albertocottica/microfoundations-community-management
code/edgesense copy/python/edgesense/metrics/__init__.py
2
2066
from edgesense.utils import sort_by from edgesense.content.metrics import extract_content_metrics from edgesense.network.metrics import extract_network_metrics from edgesense.utils.extract import calculate_timestamp_range from edgesense.network.utils import build_network import logging def compute_all_metrics(nodes_map, posts_map, comments_map, network, timesteps_range, timestep, timestep_window): metrics = {} # calculate the network metrics for ts in timesteps_range: metrics[ts] = metrics_for_ts(nodes_map, posts_map, comments_map, network, ts, timestep, timestep_window) return sorted([m for m in metrics.values() if m is not None], key=sort_by('ts')) def metrics_for_ts(nodes_map, posts_map, comments_map, network, ts, timestep, timestep_window): # calculate network metrics net_metrics = extract_network_metrics(network, ts) if len(net_metrics) > 0: # calculate content metrics ts_metrics = extract_content_metrics(nodes_map, posts_map, comments_map, ts, timestep, timestep_window) ts_metrics.update(net_metrics) if ts_metrics.has_key('full:partitions'): ts_metrics['partitions'] = ts_metrics['full:partitions'] else: ts_metrics['partitions'] = None ts_metrics.update(extract_network_metrics(network, ts, team=False)) return ts_metrics def calculate_network_metrics(nodes_map, posts_map, comments_map, network, timestep_size, timestep_window, timestep_count): # Parameters timestep, timesteps_range = calculate_timestamp_range(network, timestep_size, timestep_window, timestep_count) # build the whole network to use for metrics directed_multiedge_network = build_network(network) logging.info("network built") # calculate the metrics network['metrics'] = compute_all_metrics(nodes_map, posts_map, comments_map, directed_multiedge_network, timesteps_range, timestep, timestep_window) logging.info("network metrics done") return directed_multiedge_network
mit
tymmothy/dds3x25
dds3x25/dds.py
1
12274
#!/usr/bin/env python """ This is an interface library for Hantek DDS-3X25 arbitrary waveform generator. Licenced LGPL2+ Copyright (C) 2013 Domas Jokubauskis (domas@jokubauskis.lt) Copyright (C) 2014 Tymm Twillman (tymmothy@gmail.com) """ import struct import math import collections # dds3x25 imports... from usb_interface import * from packet import * def samplepoint_encode(value): SIGN_BIT = (1 << 11) encoded = abs(value) if encoded > DDS.MAX_POINT_VALUE: msg = "Value {0} is out of range ({1}-{2})".format(value, -DDS.MAX_POINT_VALUE, DDS.MAX_POINT_VALUE) raise ValueError(msg) # Note: 0 is negative value if value > 0: encoded = (DDS.MAX_POINT_VALUE + 1) - encoded else: encoded = encoded | SIGN_BIT return struct.pack("<H", encoded) def samplepoint_chunks(data): """Cut samplepoint data into 32-point chunks. If necessary, add padding to the last chunk to make it 64 bytes. """ SAMPLEPOINT_CHUNK_SIZE=32 for i in xrange(0, len(data), SAMPLEPOINT_CHUNK_SIZE): chunkdata = data[i:i+SAMPLEPOINT_CHUNK_SIZE] chunk = "".join([ samplepoint_encode(x) for x in chunkdata ]) if len(chunk) < SAMPLEPOINT_CHUNK_SIZE * 2: chunk += "\x91\x1c" * ((SAMPLEPOINT_CHUNK_SIZE - (len(chunk) / 2))) yield chunk class DDS(object): # Hantek 3x25 USB Vendor & Product IDs USB_VID = 0x0483 USB_PID = 0x5721 # Core DAC clock -> 200 MHz DAC_CLOCK = int(200e6) # Maximum DAC clock divider DAC_CLOCK_DIV_MAX = 131070 # Maximum # of sample points MAX_POINTS = 4095 # Maximum value of a point MAX_POINT_VALUE = (1 << 11) - 1 NUM_DIGITAL_OUTPUTS = 12 NUM_DIGITAL_INPUTS = 6 def __init__(self, idVendor=USB_VID, idProduct=USB_PID, **kwargs): """Initialize a DDS instance and connect to the hardware. Args: idVendor (int): 16-bit USB Vendor ID (VID) for the DDS hardware. idProduct (int): 16-bit USB Product ID (PID) for the DDS hardware. Kwargs: See DDS.configure() for the list of kwargs that __init__ understands. """ # Set up defaults for instance variables. self._ext_trigger = None self._oneshot = False self._counter_mode = False self._programmable_output = True self._digital_output = 0 self._clock_divider = 128 # do not initialize USB device if used for unit testing if kwargs.get('testing', False): return self._in_ep, self._out_ep = dds_usb_open(idVendor, idProduct) self.configure(**kwargs) def transfer(self, data): self._out_ep.write(data) return self._in_ep.read(self._in_ep.wMaxPacketSize) def configure(self, **kwargs): """Update the 3x25's configuration settings. Kwargs: reset_trig (bool): If True, reset the DDS external trigger. reset_counter (bool): If True, reset the DDS counter. oneshot (bool): If True, only output one wave (not continuous). counter_mode (bool): Set true to enable counter mode. If True, the 3x25 counts pulses. If False, the 3x25 measures frequency. programmable_output (bool): Set true to enable programmable digital output. If True, digital output pins are controlled by setting digital_output. If False, digital output pins follow the DAC output value. ext_trigger ([None, 0 or 1]): Configure external trigger mode. If None, external triggering is disabled. If 1, external triggering occurs on rising pulse edges. If 0, external triggering occurs on falling pulse edges. digital_output (int): 12-bit unsigned value whose bits are written to the 3x25's digital output pins. Note: Only used when programmable_output is enabled. clock_divider (int): Divisor to use for 200Mhz DAC clock to generate sample output clock. Must be an even value from 0-131070 """ reset_trigger = bool(kwargs.get('reset_trig', False)) reset_counter = bool(kwargs.get('reset_counter', False)) oneshot = bool(kwargs.get('oneshot', self._oneshot)) counter_mode = bool(kwargs.get('counter_mode', self._counter_mode)) programmable_output = bool(kwargs.get('programmable_output', self._programmable_output)) ext_trigger = kwargs.get('ext_trigger', self._ext_trigger) if ext_trigger not in [ None, 0, 1 ]: raise ValueError("Invalid value for ext_trigger (must be 1, 0 or None)") digital_output = int(kwargs.get('digital_output', self._digital_output)) clock_divider = int(kwargs.get('clock_divider', self._clock_divider)) if (clock_divider < 1) or (clock_divider > 131070) or (clock_divider > 1 and clock_divider & 1): msg = "Clock divider ({0}) must be 1 or an even value between 2 and {1}.".format(clock_divider, DDS.DAC_CLOCK_DIV_MAX) raise ValueError(msg) self._oneshot = oneshot self._counter_mode = counter_mode self._programmable_output = programmable_output self._ext_trigger = ext_trigger self._digital_output = digital_output self._clock_divider = clock_divider configure_packet = ConfigurePacket(self, reset_trigger=reset_trigger, reset_counter=reset_counter) response = self.transfer(str(configure_packet)) response = self._parse_configure_packet_response(response) return response def _parse_configure_packet_response(self, packet): vals = struct.unpack("<HII", packet) return { 'digital_input' : vals[0], 'frequency' : vals[1] * 2 if self._counter_mode is False else None, 'ticks' : None if vals[2] == 0xffffffff else vals[2], 'counts' : vals[1] if self._counter_mode is True else None, } def set_waveform(self, points, clock_divider=None, shift_points=0): count = len(points) if shift_points: points = collections.deque(points) points.rotate(shift_points) response = self.transfer(str(PointCountPacket(count, is_start=True))) assert response[0] == 0xcc for chunk in samplepoint_chunks(points): response = self.transfer(chunk) assert response[0] == 0xcc response = self.transfer(str(PointCountPacket(count))) assert response[0] == 0xcc if clock_divider is not None: self.configure(clock_divider=clock_divider) def reset_counter(self): """Reset the 3x25 counter state.""" self.configure(reset_counter=True) def reset_trigger(self): """Reset the 3x25 external trigger.""" self.configure(reset_trigger=True) def digital_write(self, pin, pin_state): """Set the output state of a digital output pin. Args: pin (int): Number of pin to control. pin_state (int/bool): If 1/True, pin will be set high. If 0/False, pin will be set low. """ pin_state = 1 if pin_state else 0 digital_output = self._digital_output & ~(1 << pin) digital_output |= (pin_state << pin) self.configure(digital_output=digital_output) def digital_write_port(self, pin_states): """Set the output states of all digital output pins. Args: pin_states (int): Value comprised of bits to write to the digital output pins. """ self.configure(digital_output=val) def digital_read(self, pin): """Read the state of a digital input pin. Args: pin (int): Input pin # to read. Returns: 0 if the pin is low, 1 if the pin is high. """ digital_in = self.configure()['digital_input'] return 1 if (digital_in & (1 << pin)) else 0 def digital_read_port(self): """Read the state of all input pins as one integer value. Returns: Integer w/bits set to the states of the input pins. """ return self.configure()['digital_input'] def count_in_frequency(self): """Get the input frequency at the 3x25's COUNT IN port. The frequency is only available when the 3x25 is NOT in counter mode. Returns: Frequency (in Hz) at the COUNT IN port, or None if in counter mode. """ return self.configure()['frequency'] def count_in_counts(self): """Get the # of pulses counted at the 3x25's COUNT IN port since last reset. The count is only available when the 3x25 IS in counter mode. use .reset_counter() to reset the value to 0. Returns: # of pulses counted at the COUNT IN port, or None if not in counter mode. """ return self.configure()['counts'] def count_in_ticks(self): return self.configure()['ticks'] @property def ext_trigger(self): return self._ext_trigger @ext_trigger.setter def ext_trigger(self, trig): if trig is not None and trig != 0 and trig != 1: raise ValueError("Invalid value for external trigger (should be 1, 0 or None)") self.configure(ext_trigger=trig) @property def oneshot_mode(self): return self._oneshot @oneshot_mode.setter def oneshot_mode(self, val): val = True if val else False self.configure(oneshot=val) @property def counter_mode(self): return self._counter_mode @counter_mode.setter def counter_mode(self, val): val = True if val else False self.configure(counter_mode=val) @property def programmable_output(self): return self._programmable_output @programmable_output.setter def programmable_output(self, val): self.configure(programmable_output=val) @staticmethod def points_and_div_for_freq(freq): # Calculate divisor based on using max # of available samples possible. # -- ceil( DAC_CLOCK / (frequency * MAX_POINTS) ) freq = int(freq) div = (DDS.DAC_CLOCK + (freq - 1) * DDS.MAX_POINTS) / (freq * DDS.MAX_POINTS) # Adjust if odd value -- divisor has to be 1 or a multiple of 2 if div > 1 and div & 1: div += 1 # Calculate # of sample points to use w/this divider to get closest # to requested frequency # -- round( DAC_CLOCK / (divider * frequency) ) npoints = (DDS.DAC_CLOCK + (div * freq / 2)) / (div * freq) # Calculate actual frequency actual = (DDS.DAC_CLOCK / div) / npoints return (npoints, div, actual) def generate_sine(self, freq, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0): phase = float(phase) npoints, div, actual = DDS.points_and_div_for_freq(freq) points = [] for i in range(npoints): i = float(i) point = (amplitude * math.sin((2.0 * math.pi * i / npoints) + phase)) + offset points.append(int(point)) self.set_waveform(points, clock_divider=div, shift_points=shift) return actual def generate_square(self, freq, duty_cycle=0.5, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0): phase = float(phase) npoints, div, actual = DDS.points_and_div_for_freq(freq) points = [] for i in range(npoints): shifted = int(i + (phase * npoints) / (2.0 * math.pi)) % npoints point = amplitude if shifted < (duty_cycle * npoints) else -amplitude points.append(int(point + offset)) self.set_waveform(points, clock_divider=div, shift_points=shift) return actual if __name__ == "__main__": import time freq = 6000000 d = DDS() # print "Generating square wave @ {0} hz".format(freq) # d.generate_square(25000000, 0.50) # time.sleep(10) print "Generating sine wave @ {0} hz".format(freq) d.generate_sine(freq) d.programmable_output=True d.reset_counter() d.counter_mode = True
lgpl-2.1
12yujim/pymtl
pclib/cl/queues_test.py
8
6369
#========================================================================= # queues_test.py #========================================================================= from pymtl import * from pclib.ifcs import InValRdyBundle, OutValRdyBundle from pclib.test import TestSrcSinkSim from queues import Queue, InValRdyQueue, OutValRdyQueue import pytest #------------------------------------------------------------------------- # test_Queue #------------------------------------------------------------------------- @pytest.mark.parametrize( ('size'), [1, 3, 12] ) def test_Queue( size ): # Create the queue queue = Queue( size ) # Fill up the queue for i in range( size ): queue.enq( i ) assert queue.peek() == 0 assert queue.is_empty() == False # Check the queue is full assert queue.is_full() # Check enqueuing throws an assert with pytest.raises( AssertionError ): queue.enq( 0 ) # Empty the queue, check the order is correct for i in range( size ): assert queue.deq() == i assert queue.is_full() == False # Check the queue is empty assert queue.is_empty() # Check that dequeuing throws an assert with pytest.raises( IndexError ): queue.deq() #------------------------------------------------------------------------ # InValRdyQueueHarness #------------------------------------------------------------------------ # Model an input Simple or Bypass queue connected to an output register. class InValRdyQueueHarness( Model ): def __init__( s, dtype, size, pipeq ): s.in_ = InValRdyBundle ( dtype ) s.out = OutValRdyBundle( dtype ) s.queue = InValRdyQueue( dtype, size=size, pipe=pipeq ) s.connect( s.in_, s.queue.in_ ) s.out_buffer_full = False @s.tick def logic(): s.queue.xtick() if s.out.val and s.out.rdy: s.out_buffer_full = False if not s.out_buffer_full and not s.queue.is_empty(): s.out.msg.next = s.queue.deq() s.out_buffer_full = True s.out.val.next = s.out_buffer_full #------------------------------------------------------------------------- # test_InValRdyQueue #------------------------------------------------------------------------- @pytest.mark.parametrize( ('qsize', 'pipeq', 'src_delay', 'sink_delay'), [ (1, 0, 0, 0), (1, 1, 0, 0), (1, 0, 3, 0), (1, 1, 3, 0), (1, 0, 0, 3), (1, 1, 0, 3), (1, 0, 3, 5), (1, 1, 3, 5), (2, 0, 0, 0), (2, 1, 0, 0), (2, 0, 3, 0), (2, 1, 3, 0), (2, 0, 0, 3), (2, 1, 0, 3), (2, 0, 3, 5), (2, 1, 3, 5), ] ) def test_InValRdyQueue( dump_vcd, qsize, pipeq, src_delay, sink_delay ): msgs = range( 5 ) model = InValRdyQueueHarness( Bits( 8 ), qsize, pipeq ) model.vcd_file = dump_vcd sim = TestSrcSinkSim( model, msgs, msgs, src_delay, sink_delay ) sim.run_test() #------------------------------------------------------------------------- # OutValRdyQueueHarness #------------------------------------------------------------------------- class OutValRdyQueueHarness( Model ): def __init__( s, dtype, size, bypassq ): s.in_ = InValRdyBundle ( dtype ) s.out = OutValRdyBundle( dtype ) s.queue = OutValRdyQueue( s.in_.msg.dtype, size=size, bypass=bypassq ) s.connect( s.out, s.queue.out ) @s.tick def logic(): # TODO: this behavior is strange, bypass acts like simple + simple # but simple acts like rdy is getting set a cycle late... fix? s.queue.xtick() if s.in_.val and s.in_.rdy: s.queue.enq( s.in_.msg[:] ) s.in_.rdy.next = not s.queue.is_full() #------------------------------------------------------------------------- # test_OutValRdyQueue #------------------------------------------------------------------------- @pytest.mark.parametrize( ('qsize', 'bypassq', 'src_delay', 'sink_delay'), [ (1, 0, 0, 0), (1, 1, 0, 0), (1, 0, 3, 0), (1, 1, 3, 0), (1, 0, 0, 3), (1, 1, 0, 3), (1, 0, 3, 5), (1, 1, 3, 5), (2, 0, 0, 0), (2, 1, 0, 0), (2, 0, 3, 0), (2, 1, 3, 0), (2, 0, 0, 3), (2, 1, 0, 3), (2, 0, 3, 5), (2, 1, 3, 5), ] ) def test_OutValRdyQueue( dump_vcd, qsize, bypassq, src_delay, sink_delay ): msgs = range( 5 ) model = OutValRdyQueueHarness( Bits( 8 ), qsize, bypassq ) model.vcd_file = dump_vcd sim = TestSrcSinkSim( model, msgs, msgs, src_delay, sink_delay ) sim.run_test() #------------------------------------------------------------------------- # InOutValRdyQueueHarness #------------------------------------------------------------------------- class InOutValRdyQueueHarness( Model ): def __init__( s, dtype, size, pipeq, bypassq ): s.in_ = InValRdyBundle ( dtype ) s.out = OutValRdyBundle( dtype ) s.in_q = InValRdyQueue ( dtype, size=size, pipe =pipeq ) s.out_q = OutValRdyQueue( dtype, size=size, bypass=bypassq ) s.connect( s.in_, s.in_q. in_ ) s.connect( s.out, s.out_q.out ) @s.tick def logic(): # Automatically enq from input / deq from output s.in_q.xtick() s.out_q.xtick() # Transfer data from input to output queue if not s.in_q.is_empty() and not s.out_q.is_full(): s.out_q.enq( s.in_q.deq() ) #------------------------------------------------------------------------- # test_InOutValRdyQueues #------------------------------------------------------------------------- @pytest.mark.parametrize( ('qsize', 'pipeq', 'bypassq', 'src_delay', 'sink_delay'), [ (1, 0, 0, 0, 0), (1, 0, 1, 0, 0), (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 0, 0, 3, 0), (1, 0, 1, 3, 0), (1, 1, 0, 3, 0), (1, 1, 1, 3, 0), (1, 0, 0, 0, 3), (1, 0, 1, 0, 3), (1, 1, 0, 0, 3), (1, 1, 1, 0, 3), (1, 0, 0, 3, 5), (1, 0, 1, 3, 5), (1, 1, 0, 3, 5), (1, 1, 1, 3, 5), (2, 0, 0, 0, 0), (2, 0, 1, 0, 0), (2, 1, 0, 0, 0), (2, 1, 1, 0, 0), (2, 0, 0, 3, 0), (2, 0, 1, 3, 0), (2, 1, 0, 3, 0), (2, 1, 1, 3, 0), (2, 0, 0, 0, 3), (2, 0, 1, 0, 3), (2, 1, 0, 0, 3), (2, 1, 1, 0, 3), (2, 0, 0, 3, 5), (2, 0, 1, 3, 5), (2, 1, 0, 3, 5), (2, 1, 1, 3, 5), ] ) def test_InOutValRdyQueues( dump_vcd, qsize, pipeq, bypassq, src_delay, sink_delay ): msgs = range( 10 ) model = InOutValRdyQueueHarness( Bits( 8 ), qsize, pipeq, bypassq ) model.vcd_file = dump_vcd sim = TestSrcSinkSim( model, msgs, msgs, src_delay, sink_delay ) sim.run_test()
bsd-3-clause
wezhang/vim-setup
bundle/python-mode/pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/design_analysis.py
17
15085
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """check for signs of poor design""" from astroid import Function, If, InferenceError from pylint.interfaces import IAstroidChecker from pylint.checkers import BaseChecker from pylint.checkers.utils import check_messages import re # regexp for ignored argument name IGNORED_ARGUMENT_NAMES = re.compile('_.*') def class_is_abstract(klass): """return true if the given class node should be considered as an abstract class """ for attr in klass.values(): if isinstance(attr, Function): if attr.is_abstract(pass_is_abstract=False): return True return False MSGS = { 'R0901': ('Too many ancestors (%s/%s)', 'too-many-ancestors', 'Used when class has too many parent classes, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0902': ('Too many instance attributes (%s/%s)', 'too-many-instance-attributes', 'Used when class has too many instance attributes, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0903': ('Too few public methods (%s/%s)', 'too-few-public-methods', 'Used when class has too few public methods, so be sure it\'s \ really worth it.'), 'R0904': ('Too many public methods (%s/%s)', 'too-many-public-methods', 'Used when class has too many public methods, try to reduce \ this to get a simpler (and so easier to use) class.'), 'R0911': ('Too many return statements (%s/%s)', 'too-many-return-statements', 'Used when a function or method has too many return statement, \ making it hard to follow.'), 'R0912': ('Too many branches (%s/%s)', 'too-many-branches', 'Used when a function or method has too many branches, \ making it hard to follow.'), 'R0913': ('Too many arguments (%s/%s)', 'too-many-arguments', 'Used when a function or method takes too many arguments.'), 'R0914': ('Too many local variables (%s/%s)', 'too-many-locals', 'Used when a function or method has too many local variables.'), 'R0915': ('Too many statements (%s/%s)', 'too-many-statements', 'Used when a function or method has too many statements. You \ should then split it in smaller functions / methods.'), 'R0921': ('Abstract class not referenced', 'abstract-class-not-used', 'Used when an abstract class is not used as ancestor anywhere.'), 'R0922': ('Abstract class is only referenced %s times', 'abstract-class-little-used', 'Used when an abstract class is used less than X times as \ ancestor.'), 'R0923': ('Interface not implemented', 'interface-not-implemented', 'Used when an interface class is not implemented anywhere.'), } class MisdesignChecker(BaseChecker): """checks for sign of poor/misdesign: * number of methods, attributes, local variables... * size, complexity of functions, methods """ __implements__ = (IAstroidChecker,) # configuration section name name = 'design' # messages msgs = MSGS priority = -2 # configuration options options = (('max-args', {'default' : 5, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of arguments for function / method'} ), ('ignored-argument-names', {'default' : IGNORED_ARGUMENT_NAMES, 'type' :'regexp', 'metavar' : '<regexp>', 'help' : 'Argument names that match this expression will be ' 'ignored. Default to name with leading underscore'} ), ('max-locals', {'default' : 15, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of locals for function / method body'} ), ('max-returns', {'default' : 6, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of return / yield for function / ' 'method body'} ), ('max-branches', {'default' : 12, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of branch for function / method body'} ), ('max-statements', {'default' : 50, 'type' : 'int', 'metavar' : '<int>', 'help': 'Maximum number of statements in function / method ' 'body'} ), ('max-parents', {'default' : 7, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of parents for a class (see R0901).'} ), ('max-attributes', {'default' : 7, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of attributes for a class \ (see R0902).'} ), ('min-public-methods', {'default' : 2, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Minimum number of public methods for a class \ (see R0903).'} ), ('max-public-methods', {'default' : 20, 'type' : 'int', 'metavar' : '<num>', 'help' : 'Maximum number of public methods for a class \ (see R0904).'} ), ) def __init__(self, linter=None): BaseChecker.__init__(self, linter) self.stats = None self._returns = None self._branches = None self._used_abstracts = None self._used_ifaces = None self._abstracts = None self._ifaces = None self._stmts = 0 def open(self): """initialize visit variables""" self.stats = self.linter.add_stats() self._returns = [] self._branches = [] self._used_abstracts = {} self._used_ifaces = {} self._abstracts = [] self._ifaces = [] # Check 'R0921', 'R0922', 'R0923' def close(self): """check that abstract/interface classes are used""" for abstract in self._abstracts: if not abstract in self._used_abstracts: self.add_message('abstract-class-not-used', node=abstract) elif self._used_abstracts[abstract] < 2: self.add_message('abstract-class-little-used', node=abstract, args=self._used_abstracts[abstract]) for iface in self._ifaces: if not iface in self._used_ifaces: self.add_message('interface-not-implemented', node=iface) @check_messages('too-many-ancestors', 'too-many-instance-attributes', 'too-few-public-methods', 'too-many-public-methods', 'abstract-class-not-used', 'abstract-class-little-used', 'interface-not-implemented') def visit_class(self, node): """check size of inheritance hierarchy and number of instance attributes """ self._inc_branch() # Is the total inheritance hierarchy is 7 or less? nb_parents = len(list(node.ancestors())) if nb_parents > self.config.max_parents: self.add_message('too-many-ancestors', node=node, args=(nb_parents, self.config.max_parents)) # Does the class contain less than 20 attributes for # non-GUI classes (40 for GUI)? # FIXME detect gui classes if len(node.instance_attrs) > self.config.max_attributes: self.add_message('too-many-instance-attributes', node=node, args=(len(node.instance_attrs), self.config.max_attributes)) # update abstract / interface classes structures if class_is_abstract(node): self._abstracts.append(node) elif node.type == 'interface' and node.name != 'Interface': self._ifaces.append(node) for parent in node.ancestors(False): if parent.name == 'Interface': continue self._used_ifaces[parent] = 1 try: for iface in node.interfaces(): self._used_ifaces[iface] = 1 except InferenceError: # XXX log ? pass for parent in node.ancestors(): try: self._used_abstracts[parent] += 1 except KeyError: self._used_abstracts[parent] = 1 @check_messages('too-many-ancestors', 'too-many-instance-attributes', 'too-few-public-methods', 'too-many-public-methods', 'abstract-class-not-used', 'abstract-class-little-used', 'interface-not-implemented') def leave_class(self, node): """check number of public methods""" nb_public_methods = 0 special_methods = set() for method in node.methods(): if not method.name.startswith('_'): nb_public_methods += 1 if method.name.startswith("__"): special_methods.add(method.name) # Does the class contain less than 20 public methods ? if nb_public_methods > self.config.max_public_methods: self.add_message('too-many-public-methods', node=node, args=(nb_public_methods, self.config.max_public_methods)) # stop here for exception, metaclass and interface classes if node.type != 'class': return # Does the class contain more than 5 public methods ? if nb_public_methods < self.config.min_public_methods: self.add_message('R0903', node=node, args=(nb_public_methods, self.config.min_public_methods)) @check_messages('too-many-return-statements', 'too-many-branches', 'too-many-arguments', 'too-many-locals', 'too-many-statements') def visit_function(self, node): """check function name, docstring, arguments, redefinition, variable names, max locals """ self._inc_branch() # init branch and returns counters self._returns.append(0) self._branches.append(0) # check number of arguments args = node.args.args if args is not None: ignored_args_num = len( [arg for arg in args if self.config.ignored_argument_names.match(arg.name)]) argnum = len(args) - ignored_args_num if argnum > self.config.max_args: self.add_message('too-many-arguments', node=node, args=(len(args), self.config.max_args)) else: ignored_args_num = 0 # check number of local variables locnum = len(node.locals) - ignored_args_num if locnum > self.config.max_locals: self.add_message('too-many-locals', node=node, args=(locnum, self.config.max_locals)) # init statements counter self._stmts = 1 @check_messages('too-many-return-statements', 'too-many-branches', 'too-many-arguments', 'too-many-locals', 'too-many-statements') def leave_function(self, node): """most of the work is done here on close: checks for max returns, branch, return in __init__ """ returns = self._returns.pop() if returns > self.config.max_returns: self.add_message('too-many-return-statements', node=node, args=(returns, self.config.max_returns)) branches = self._branches.pop() if branches > self.config.max_branches: self.add_message('too-many-branches', node=node, args=(branches, self.config.max_branches)) # check number of statements if self._stmts > self.config.max_statements: self.add_message('too-many-statements', node=node, args=(self._stmts, self.config.max_statements)) def visit_return(self, _): """count number of returns""" if not self._returns: return # return outside function, reported by the base checker self._returns[-1] += 1 def visit_default(self, node): """default visit method -> increments the statements counter if necessary """ if node.is_statement: self._stmts += 1 def visit_tryexcept(self, node): """increments the branches counter""" branches = len(node.handlers) if node.orelse: branches += 1 self._inc_branch(branches) self._stmts += branches def visit_tryfinally(self, _): """increments the branches counter""" self._inc_branch(2) self._stmts += 2 def visit_if(self, node): """increments the branches counter""" branches = 1 # don't double count If nodes coming from some 'elif' if node.orelse and (len(node.orelse) > 1 or not isinstance(node.orelse[0], If)): branches += 1 self._inc_branch(branches) self._stmts += branches def visit_while(self, node): """increments the branches counter""" branches = 1 if node.orelse: branches += 1 self._inc_branch(branches) visit_for = visit_while def _inc_branch(self, branchesnum=1): """increments the branches counter""" branches = self._branches for i in xrange(len(branches)): branches[i] += branchesnum # FIXME: make a nice report... def register(linter): """required method to auto register this checker """ linter.register_checker(MisdesignChecker(linter))
apache-2.0
drkitty/cyder
vendor-local/src/django-extensions/setup.py
8
3555
""" Based entirely on Django's own ``setup.py``. """ import os import sys from distutils.command.install_data import install_data from distutils.command.install import INSTALL_SCHEMES try: from setuptools import setup except ImportError: from distutils.core import setup class osx_install_data(install_data): # On MacOS, the platform-specific lib dir is at: # /System/Library/Framework/Python/.../ # which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific # fix for this in distutils.command.install_data#306. It fixes install_lib # but not install_data, which is why we roll our own install_data class. def finalize_options(self): # By the time finalize_options is called, install.install_lib is set to # the fixed directory, so we set the installdir to install_lib. The # install_data class uses ('install_data', 'install_dir') instead. self.set_undefined_options('install', ('install_lib', 'install_dir')) install_data.finalize_options(self) if sys.platform == "darwin": cmdclasses = {'install_data': osx_install_data} else: cmdclasses = {'install_data': install_data} def fullsplit(path, result=None): """ Split a pathname into components (the opposite of os.path.join) in a platform-neutral way. """ if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) # Tell distutils to put the data_files in platform-specific installation # locations. See here for an explanation: # http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb for scheme in INSTALL_SCHEMES.values(): scheme['data'] = scheme['purelib'] # Compile the list of packages available, because distutils doesn't have # an easy way to do this. packages, data_files = [], [] root_dir = os.path.dirname(__file__) if root_dir != '': os.chdir(root_dir) extensions_dir = 'django_extensions' for dirpath, dirnames, filenames in os.walk(extensions_dir): # Ignore dirnames that start with '.' if os.path.basename(dirpath).startswith("."): continue if '__init__.py' in filenames: packages.append('.'.join(fullsplit(dirpath))) elif filenames: data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]]) version = __import__('django_extensions').__version__ setup( name='django-extensions', version=version, description="Extensions for Django", long_description="""django-extensions bundles several useful additions for Django projects. See the project page for more information: http://github.com/django-extensions/django-extensions""", author='Michael Trier', author_email='mtrier@gmail.com', maintainer='Bas van Oostveen', maintainer_email='v.oostveen@gmail.com', url='http://github.com/django-extensions/django-extensions', license='New BSD License', platforms=['any'], packages=packages, cmdclass=cmdclasses, data_files=data_files, classifiers=[ 'Development Status :: 4 - Beta', 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities', ], )
bsd-3-clause
atzengin/OCC
oc-utils/python/modtool/code_generator.py
1
2298
# # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ A code generator (needed by ModToolAdd) """ from templates import Templates import Cheetah.Template from util_functions import str_to_fancyc_comment from util_functions import str_to_python_comment from util_functions import strip_default_values from util_functions import strip_arg_types from util_functions import strip_arg_types_occ class GRMTemplate(Cheetah.Template.Template): """ An extended template class """ def __init__(self, src, searchList): self.grtypelist = { 'sync': 'sync_block', 'sink': 'sync_block', 'source': 'sync_block', 'decimator': 'sync_decimator', 'interpolator': 'sync_interpolator', 'general': 'block', 'tagged_stream': 'tagged_stream_block', 'hier': 'hier_block2', 'noblock': ''} searchList['str_to_fancyc_comment'] = str_to_fancyc_comment searchList['str_to_python_comment'] = str_to_python_comment searchList['strip_default_values'] = strip_default_values searchList['strip_arg_types'] = strip_arg_types searchList['strip_arg_types_occ'] = strip_arg_types_occ Cheetah.Template.Template.__init__(self, src, searchList=searchList) self.grblocktype = self.grtypelist[searchList['blocktype']] def get_template(tpl_id, **kwargs): """ Return the template given by tpl_id, parsed through Cheetah """ return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))
gpl-3.0
2ndQuadrant/ansible
test/units/module_utils/facts/test_collector.py
78
26241
# This file is part of Ansible # -*- coding: utf-8 -*- # # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type from collections import defaultdict import pprint # for testing from units.compat import unittest from ansible.module_utils.facts import collector from ansible.module_utils.facts import default_collectors class TestFindCollectorsForPlatform(unittest.TestCase): def test(self): compat_platforms = [{'system': 'Generic'}] res = collector.find_collectors_for_platform(default_collectors.collectors, compat_platforms) for coll_class in res: self.assertIn(coll_class._platform, ('Generic')) def test_linux(self): compat_platforms = [{'system': 'Linux'}] res = collector.find_collectors_for_platform(default_collectors.collectors, compat_platforms) for coll_class in res: self.assertIn(coll_class._platform, ('Linux')) def test_linux_or_generic(self): compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}] res = collector.find_collectors_for_platform(default_collectors.collectors, compat_platforms) for coll_class in res: self.assertIn(coll_class._platform, ('Generic', 'Linux')) class TestSelectCollectorNames(unittest.TestCase): def _assert_equal_detail(self, obj1, obj2, msg=None): msg = 'objects are not equal\n%s\n\n!=\n\n%s' % (pprint.pformat(obj1), pprint.pformat(obj2)) return self.assertEqual(obj1, obj2, msg) def test(self): collector_names = ['distribution', 'all_ipv4_addresses', 'local', 'pkg_mgr'] all_fact_subsets = self._all_fact_subsets() res = collector.select_collector_classes(collector_names, all_fact_subsets) expected = [default_collectors.DistributionFactCollector, default_collectors.PkgMgrFactCollector] self._assert_equal_detail(res, expected) def test_default_collectors(self): platform_info = {'system': 'Generic'} compat_platforms = [platform_info] collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors, compat_platforms) all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform) all_valid_subsets = frozenset(all_fact_subsets.keys()) collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets, aliases_map=aliases_map, platform_info=platform_info) complete_collector_names = collector._solve_deps(collector_names, all_fact_subsets) dep_map = collector.build_dep_data(complete_collector_names, all_fact_subsets) ordered_deps = collector.tsort(dep_map) ordered_collector_names = [x[0] for x in ordered_deps] res = collector.select_collector_classes(ordered_collector_names, all_fact_subsets) self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) > res.index(default_collectors.DistributionFactCollector), res) self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) > res.index(default_collectors.PlatformFactCollector), res) def _all_fact_subsets(self, data=None): all_fact_subsets = defaultdict(list) _data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector], 'distribution': [default_collectors.DistributionFactCollector], 'network': [default_collectors.LinuxNetworkCollector]} data = data or _data for key, value in data.items(): all_fact_subsets[key] = value return all_fact_subsets class TestGetCollectorNames(unittest.TestCase): def test_none(self): res = collector.get_collector_names() self.assertIsInstance(res, set) self.assertEqual(res, set([])) def test_empty_sets(self): res = collector.get_collector_names(valid_subsets=frozenset([]), minimal_gather_subset=frozenset([]), gather_subset=[]) self.assertIsInstance(res, set) self.assertEqual(res, set([])) def test_empty_valid_and_min_with_all_gather_subset(self): res = collector.get_collector_names(valid_subsets=frozenset([]), minimal_gather_subset=frozenset([]), gather_subset=['all']) self.assertIsInstance(res, set) self.assertEqual(res, set([])) def test_one_valid_with_all_gather_subset(self): valid_subsets = frozenset(['my_fact']) res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=frozenset([]), gather_subset=['all']) self.assertIsInstance(res, set) self.assertEqual(res, set(['my_fact'])) def _compare_res(self, gather_subset1, gather_subset2, valid_subsets=None, min_subset=None): valid_subsets = valid_subsets or frozenset() minimal_gather_subset = min_subset or frozenset() res1 = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset1) res2 = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset2) return res1, res2 def test_not_all_other_order(self): valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['min_fact']) res1, res2 = self._compare_res(['!all', 'whatever'], ['whatever', '!all'], valid_subsets=valid_subsets, min_subset=minimal_gather_subset) self.assertEqual(res1, res2) self.assertEqual(res1, set(['min_fact', 'whatever'])) def test_not_all_other_order_min(self): valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['min_fact']) res1, res2 = self._compare_res(['!min_fact', 'whatever'], ['whatever', '!min_fact'], valid_subsets=valid_subsets, min_subset=minimal_gather_subset) self.assertEqual(res1, res2) self.assertEqual(res1, set(['whatever'])) def test_one_minimal_with_all_gather_subset(self): my_fact = 'my_fact' valid_subsets = frozenset([my_fact]) minimal_gather_subset = valid_subsets res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['all']) self.assertIsInstance(res, set) self.assertEqual(res, set(['my_fact'])) def test_with_all_gather_subset(self): valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['my_fact']) # even with '!all', the minimal_gather_subset should be returned res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['all']) self.assertIsInstance(res, set) self.assertEqual(res, set(['my_fact', 'something_else', 'whatever'])) def test_one_minimal_with_not_all_gather_subset(self): valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['my_fact']) # even with '!all', the minimal_gather_subset should be returned res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['!all']) self.assertIsInstance(res, set) self.assertEqual(res, set(['my_fact'])) def test_gather_subset_excludes(self): valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['min_fact', 'min_another']) # even with '!all', the minimal_gather_subset should be returned res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, # gather_subset=set(['all', '!my_fact', '!whatever'])) # gather_subset=['all', '!my_fact', '!whatever']) gather_subset=['!min_fact', '!whatever']) self.assertIsInstance(res, set) # min_another is in minimal_gather_subset, so always returned self.assertEqual(res, set(['min_another'])) def test_gather_subset_excludes_ordering(self): valid_subsets = frozenset(['my_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['my_fact']) res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['!all', 'whatever']) self.assertIsInstance(res, set) # excludes are higher precedence than includes, so !all excludes everything # and then minimal_gather_subset is added. so '!all', 'other' == '!all' self.assertEqual(res, set(['my_fact', 'whatever'])) def test_gather_subset_excludes_min(self): valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['min_fact']) res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['whatever', '!min']) self.assertIsInstance(res, set) # excludes are higher precedence than includes, so !all excludes everything # and then minimal_gather_subset is added. so '!all', 'other' == '!all' self.assertEqual(res, set(['whatever'])) def test_gather_subset_excludes_min_and_all(self): valid_subsets = frozenset(['min_fact', 'something_else', 'whatever']) minimal_gather_subset = frozenset(['min_fact']) res = collector.get_collector_names(valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['whatever', '!all', '!min']) self.assertIsInstance(res, set) # excludes are higher precedence than includes, so !all excludes everything # and then minimal_gather_subset is added. so '!all', 'other' == '!all' self.assertEqual(res, set(['whatever'])) def test_invaid_gather_subset(self): valid_subsets = frozenset(['my_fact', 'something_else']) minimal_gather_subset = frozenset(['my_fact']) self.assertRaisesRegexp(TypeError, r'Bad subset .* given to Ansible.*allowed\:.*all,.*my_fact.*', collector.get_collector_names, valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=['my_fact', 'not_a_valid_gather_subset']) class TestFindUnresolvedRequires(unittest.TestCase): def test(self): names = ['network', 'virtual', 'env'] all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} res = collector.find_unresolved_requires(names, all_fact_subsets) # pprint.pprint(res) self.assertIsInstance(res, set) self.assertEqual(res, set(['platform', 'distribution'])) def test_resolved(self): names = ['network', 'virtual', 'env', 'platform', 'distribution'] all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'distribution': [default_collectors.DistributionFactCollector], 'platform': [default_collectors.PlatformFactCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} res = collector.find_unresolved_requires(names, all_fact_subsets) # pprint.pprint(res) self.assertIsInstance(res, set) self.assertEqual(res, set()) class TestBuildDepData(unittest.TestCase): def test(self): names = ['network', 'virtual', 'env'] all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} res = collector.build_dep_data(names, all_fact_subsets) # pprint.pprint(dict(res)) self.assertIsInstance(res, defaultdict) self.assertEqual(dict(res), {'network': set(['platform', 'distribution']), 'virtual': set(), 'env': set()}) class TestSolveDeps(unittest.TestCase): def test_no_solution(self): unresolved = set(['required_thing1', 'required_thing2']) all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} self.assertRaises(collector.CollectorNotFoundError, collector._solve_deps, unresolved, all_fact_subsets) def test(self): unresolved = set(['env', 'network']) all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector], 'platform': [default_collectors.PlatformFactCollector], 'distribution': [default_collectors.DistributionFactCollector]} res = collector.resolve_requires(unresolved, all_fact_subsets) res = collector._solve_deps(unresolved, all_fact_subsets) self.assertIsInstance(res, set) for goal in unresolved: self.assertIn(goal, res) class TestResolveRequires(unittest.TestCase): def test_no_resolution(self): unresolved = ['required_thing1', 'required_thing2'] all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} self.assertRaisesRegexp(collector.UnresolvedFactDep, 'unresolved fact dep.*required_thing2', collector.resolve_requires, unresolved, all_fact_subsets) def test(self): unresolved = ['env', 'network'] all_fact_subsets = {'env': [default_collectors.EnvFactCollector], 'network': [default_collectors.LinuxNetworkCollector], 'virtual': [default_collectors.LinuxVirtualCollector]} res = collector.resolve_requires(unresolved, all_fact_subsets) for goal in unresolved: self.assertIn(goal, res) def test_exception(self): unresolved = ['required_thing1'] all_fact_subsets = {} try: collector.resolve_requires(unresolved, all_fact_subsets) except collector.UnresolvedFactDep as exc: self.assertIn(unresolved[0], '%s' % exc) class TestTsort(unittest.TestCase): def test(self): dep_map = {'network': set(['distribution', 'platform']), 'virtual': set(), 'platform': set(['what_platform_wants']), 'what_platform_wants': set(), 'network_stuff': set(['network'])} res = collector.tsort(dep_map) # pprint.pprint(res) self.assertIsInstance(res, list) names = [x[0] for x in res] self.assertTrue(names.index('network_stuff') > names.index('network')) self.assertTrue(names.index('platform') > names.index('what_platform_wants')) self.assertTrue(names.index('network') > names.index('platform')) def test_cycles(self): dep_map = {'leaf1': set(), 'leaf2': set(), 'node1': set(['node2']), 'node2': set(['node3']), 'node3': set(['node1'])} self.assertRaises(collector.CycleFoundInFactDeps, collector.tsort, dep_map) def test_just_nodes(self): dep_map = {'leaf1': set(), 'leaf4': set(), 'leaf3': set(), 'leaf2': set()} res = collector.tsort(dep_map) self.assertIsInstance(res, list) names = [x[0] for x in res] # not a lot to assert here, any order of the # results is valid self.assertEqual(set(names), set(dep_map.keys())) def test_self_deps(self): dep_map = {'node1': set(['node1']), 'node2': set(['node2'])} self.assertRaises(collector.CycleFoundInFactDeps, collector.tsort, dep_map) def test_unsolvable(self): dep_map = {'leaf1': set(), 'node2': set(['leaf2'])} res = collector.tsort(dep_map) self.assertIsInstance(res, list) names = [x[0] for x in res] self.assertEqual(set(names), set(dep_map.keys())) def test_chain(self): dep_map = {'leaf1': set(['leaf2']), 'leaf2': set(['leaf3']), 'leaf3': set(['leaf4']), 'leaf4': set(), 'leaf5': set(['leaf1'])} res = collector.tsort(dep_map) self.assertIsInstance(res, list) names = [x[0] for x in res] self.assertEqual(set(names), set(dep_map.keys())) def test_multi_pass(self): dep_map = {'leaf1': set(), 'leaf2': set(['leaf3', 'leaf1', 'leaf4', 'leaf5']), 'leaf3': set(['leaf4', 'leaf1']), 'leaf4': set(['leaf1']), 'leaf5': set(['leaf1'])} res = collector.tsort(dep_map) self.assertIsInstance(res, list) names = [x[0] for x in res] self.assertEqual(set(names), set(dep_map.keys())) self.assertTrue(names.index('leaf1') < names.index('leaf2')) for leaf in ('leaf2', 'leaf3', 'leaf4', 'leaf5'): self.assertTrue(names.index('leaf1') < names.index(leaf)) class TestCollectorClassesFromGatherSubset(unittest.TestCase): maxDiff = None def _classes(self, all_collector_classes=None, valid_subsets=None, minimal_gather_subset=None, gather_subset=None, gather_timeout=None, platform_info=None): platform_info = platform_info or {'system': 'Linux'} return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes, valid_subsets=valid_subsets, minimal_gather_subset=minimal_gather_subset, gather_subset=gather_subset, gather_timeout=gather_timeout, platform_info=platform_info) def test_no_args(self): res = self._classes() self.assertIsInstance(res, list) self.assertEqual(res, []) def test_not_all(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['!all']) self.assertIsInstance(res, list) self.assertEqual(res, []) def test_all(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['all']) self.assertIsInstance(res, list) def test_hardware(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['hardware']) self.assertIsInstance(res, list) self.assertIn(default_collectors.PlatformFactCollector, res) self.assertIn(default_collectors.LinuxHardwareCollector, res) self.assertTrue(res.index(default_collectors.LinuxHardwareCollector) > res.index(default_collectors.PlatformFactCollector)) def test_network(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['network']) self.assertIsInstance(res, list) self.assertIn(default_collectors.DistributionFactCollector, res) self.assertIn(default_collectors.PlatformFactCollector, res) self.assertIn(default_collectors.LinuxNetworkCollector, res) self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) > res.index(default_collectors.PlatformFactCollector)) self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) > res.index(default_collectors.DistributionFactCollector)) # self.assertEqual(set(res, [default_collectors.DistributionFactCollector, # default_collectors.PlatformFactCollector, # default_collectors.LinuxNetworkCollector]) def test_env(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['env']) self.assertIsInstance(res, list) self.assertEqual(res, [default_collectors.EnvFactCollector]) def test_facter(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['env', 'facter'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.EnvFactCollector, default_collectors.FacterFactCollector])) def test_facter_ohai(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['env', 'facter', 'ohai'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.EnvFactCollector, default_collectors.FacterFactCollector, default_collectors.OhaiFactCollector])) def test_just_facter(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=set(['facter'])) self.assertIsInstance(res, list) self.assertEqual(set(res), set([default_collectors.FacterFactCollector])) def test_collector_specified_multiple_times(self): res = self._classes(all_collector_classes=default_collectors.collectors, gather_subset=['platform', 'all', 'machine']) self.assertIsInstance(res, list) self.assertIn(default_collectors.PlatformFactCollector, res) def test_unknown_collector(self): # something claims 'unknown_collector' is a valid gather_subset, but there is # no FactCollector mapped to 'unknown_collector' self.assertRaisesRegexp(TypeError, r'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*', self._classes, all_collector_classes=default_collectors.collectors, gather_subset=['env', 'unknown_collector'])
gpl-3.0
ujenmr/ansible
lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_facts.py
16
6547
#!/usr/bin/python # # Copyright (c) 2019 Zim Kalinowski, (@zikalino) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_devtestlabvirtualnetwork_facts version_added: "2.8" short_description: Get Azure DevTest Lab Virtual Network facts. description: - Get facts of Azure DevTest Lab Virtual Network. options: resource_group: description: - The name of the resource group. required: True lab_name: description: - The name of DevTest Lab. required: True name: description: - The name of DevTest Lab Virtual Network. extends_documentation_fragment: - azure author: - "Zim Kalinowski (@zikalino)" ''' EXAMPLES = ''' - name: Get instance of DevTest Lab Virtual Network azure_rm_devtestlabvirtualnetwork_facts: resource_group: myResourceGroup lab_name: myLab name: myVirtualNetwork - name: List all Virtual Networks in DevTest Lab azure_rm_devtestlabvirtualnetwork_facts: resource_group: myResourceGroup lab_name: myLab name: myVirtualNetwork ''' RETURN = ''' virtualnetworks: description: A list of dictionaries containing facts for DevTest Lab Virtual Network. returned: always type: complex contains: id: description: - The identifier of the virtual network. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt ualnetworks/myVirtualNetwork" resource_group: description: - Name of the resource group. returned: always type: str sample: myResourceGroup lab_name: description: - Name of the lab. returned: always type: str sample: myLab name: description: - Name of the virtual network. returned: always type: str sample: myVirtualNetwork description: description: - Description of the virtual network. returned: always type: str sample: My Virtual Network external_provider_resource_id: description: - Resource id of an external virtual network. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my VirtualNetwork" provisioning_state: description: - Provisioning state of the virtual network. returned: always type: str sample: Succeeded ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from azure.mgmt.devtestlabs import DevTestLabsClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class AzureRMDevTestLabVirtualNetworkFacts(AzureRMModuleBase): def __init__(self): # define user inputs into argument self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), lab_name=dict( type='str', required=True ), name=dict( type='str' ) ) # store the results of the module operation self.results = dict( changed=False ) self.mgmt_client = None self.resource_group = None self.lab_name = None self.name = None super(AzureRMDevTestLabVirtualNetworkFacts, self).__init__(self.module_arg_spec, supports_tags=False) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, base_url=self._cloud_environment.endpoints.resource_manager) if self.name: self.results['virtualnetworks'] = self.get() else: self.results['virtualnetworks'] = self.list() return self.results def list(self): response = None results = [] try: response = self.mgmt_client.virtual_networks.list(resource_group_name=self.resource_group, lab_name=self.lab_name) self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not list Virtual Networks for DevTest Lab.') if response is not None: for item in response: results.append(self.format_response(item)) return results def get(self): response = None results = [] try: response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group, lab_name=self.lab_name, name=self.name) self.log("Response : {0}".format(response)) except CloudError as e: self.fail('Could not get facts for Virtual Network.') if response: results.append(self.format_response(response)) return results def format_response(self, item): d = item.as_dict() d = { 'resource_group': self.resource_group, 'lab_name': self.lab_name, 'name': d.get('name', None), 'id': d.get('id', None), 'external_provider_resource_id': d.get('external_provider_resource_id', None), 'provisioning_state': d.get('provisioning_state', None), 'description': d.get('description', None) } return d def main(): AzureRMDevTestLabVirtualNetworkFacts() if __name__ == '__main__': main()
gpl-3.0
ken-muturi/pombola
pombola/south_africa/management/commands/south_africa_import_parties.py
5
1550
''' Checks whether parties in a CSV file exist in the database and adds them if necessary. ''' import os import sys import unicodecsv from pombola.core.models import Organisation, OrganisationKind from django.core.management.base import LabelCommand class Command(LabelCommand): help = 'Import csv file of South African political parties' def handle_label(self, label, **options): verbosity = int(options['verbosity']) if not os.path.exists(label): print >> sys.stderr, "The parties file doesn't exist", sys.exit(1) #get the party kind object partykind = OrganisationKind.objects.get(slug='party') #check each party by checking against slug with open(label, 'rb') as csvfile: parties = unicodecsv.reader(csvfile) for slug, name in parties: try: party = Organisation.objects.get(slug=slug) if party.name != name: if verbosity >= 1: print 'Updating party %s from %s to %s' % (slug, party.name, name) party.name = name party.save() except Organisation.DoesNotExist: #we need to add the party if verbosity >= 1: print 'Adding party %s' % name Organisation.objects.create( name = name, slug = slug, kind = partykind)
agpl-3.0
SNAPPETITE/backend
flask/lib/python2.7/site-packages/pbr/testr_command.py
59
5446
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (c) 2013 Testrepository Contributors # # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. # # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # license you chose for the specific language governing permissions and # limitations under that license. """setuptools/distutils commands to run testr via setup.py Currently provides 'testr' which runs tests using testr. You can pass --coverage which will also export PYTHON='coverage run --source <your package>' and automatically combine the coverage from each testr backend test runner after the run completes. To use, just use setuptools/distribute and depend on testr, and it should be picked up automatically (as the commands are exported in the testrepository package metadata. """ from distutils import cmd import distutils.errors import logging import os import sys logger = logging.getLogger(__name__) class TestrReal(cmd.Command): description = "Run unit tests using testr" user_options = [ ('coverage', None, "Replace PYTHON with coverage and merge coverage " "from each testr worker."), ('testr-args=', 't', "Run 'testr' with these args"), ('omit=', 'o', "Files to omit from coverage calculations"), ('coverage-package-name=', None, "Use this name for coverage package"), ('slowest', None, "Show slowest test times after tests complete."), ('no-parallel', None, "Run testr serially"), ('log-level=', 'l', "Log level (default: info)"), ] boolean_options = ['coverage', 'slowest', 'no_parallel'] def _run_testr(self, *args): logger.debug("_run_testr called with args = %r", args) return commands.run_argv([sys.argv[0]] + list(args), sys.stdin, sys.stdout, sys.stderr) def initialize_options(self): self.testr_args = None self.coverage = None self.omit = "" self.slowest = None self.coverage_package_name = None self.no_parallel = None self.log_level = 'info' def finalize_options(self): self.log_level = getattr( logging, self.log_level.upper(), logging.INFO) logging.basicConfig(level=self.log_level) logger.debug("finalize_options called") if self.testr_args is None: self.testr_args = [] else: self.testr_args = self.testr_args.split() if self.omit: self.omit = "--omit=%s" % self.omit logger.debug("finalize_options: self.__dict__ = %r", self.__dict__) def run(self): """Set up testr repo, then run testr.""" logger.debug("run called") if not os.path.isdir(".testrepository"): self._run_testr("init") if self.coverage: self._coverage_before() if not self.no_parallel: testr_ret = self._run_testr("run", "--parallel", *self.testr_args) else: testr_ret = self._run_testr("run", *self.testr_args) if testr_ret: raise distutils.errors.DistutilsError( "testr failed (%d)" % testr_ret) if self.slowest: print("Slowest Tests") self._run_testr("slowest") if self.coverage: self._coverage_after() def _coverage_before(self): logger.debug("_coverage_before called") package = self.distribution.get_name() if package.startswith('python-'): package = package[7:] # Use this as coverage package name if self.coverage_package_name: package = self.coverage_package_name options = "--source %s --parallel-mode" % package os.environ['PYTHON'] = ("coverage run %s" % options) logger.debug("os.environ['PYTHON'] = %r", os.environ['PYTHON']) def _coverage_after(self): logger.debug("_coverage_after called") os.system("coverage combine") os.system("coverage html -d ./cover %s" % self.omit) class TestrFake(cmd.Command): description = "Run unit tests using testr" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): print("Install testrepository to run 'testr' command properly.") try: from testrepository import commands have_testr = True Testr = TestrReal except ImportError: have_testr = False Testr = TestrFake
mit
fghaas/edx-configuration
playbooks/roles/supervisor/files/pre_supervisor_checks.py
8
12225
import argparse import boto from boto.utils import get_instance_metadata from boto.exception import AWSConnectionError import hipchat import os import subprocess import traceback import socket import time # Services that should be checked for migrations. MIGRATION_COMMANDS = { 'lms': ". {env_file}; {python} {code_dir}/manage.py lms migrate --noinput --list --settings=aws", 'cms': ". {env_file}; {python} {code_dir}/manage.py cms migrate --noinput --list --settings=aws", 'xqueue': "{python} {code_dir}/manage.py xqueue migrate --noinput --settings=aws --db-dry-run --merge", 'ecommerce': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'programs': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'insights': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'analytics_api': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list" } HIPCHAT_USER = "PreSupervisor" # Max amount of time to wait for tags to be applied. MAX_BACKOFF = 120 INITIAL_BACKOFF = 1 def services_for_instance(instance_id): """ Get the list of all services named by the services tag in this instance's tags. """ ec2 = boto.connect_ec2() reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: services = instance.tags['services'].split(',') except KeyError as ke: msg = "Tag named 'services' not found on this instance({})".format(instance_id) raise Exception(msg) for service in services: yield service def edp_for_instance(instance_id): ec2 = boto.connect_ec2() reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: environment = instance.tags['environment'] deployment = instance.tags['deployment'] play = instance.tags['play'] except KeyError as ke: msg = "{} tag not found on this instance({})".format(ke.message, instance_id) raise Exception(msg) return (environment, deployment, play) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Enable all services that are in the services tag of this ec2 instance.") parser.add_argument("-a","--available", help="The location of the available services.") parser.add_argument("-e","--enabled", help="The location of the enabled services.") migration_args = parser.add_argument_group("edxapp_migrations", "Args for running edxapp migration checks.") migration_args.add_argument("--edxapp-code-dir", help="Location of the edx-platform code.") migration_args.add_argument("--edxapp-python", help="Path to python to use for executing migration check.") migration_args.add_argument("--edxapp-env", help="Location of the ecommerce environment file.") xq_migration_args = parser.add_argument_group("xqueue_migrations", "Args for running xqueue migration checks.") xq_migration_args.add_argument("--xqueue-code-dir", help="Location of the xqueue code.") xq_migration_args.add_argument("--xqueue-python", help="Path to python to use for executing migration check.") ecom_migration_args = parser.add_argument_group("ecommerce_migrations", "Args for running ecommerce migration checks.") ecom_migration_args.add_argument("--ecommerce-python", help="Path to python to use for executing migration check.") ecom_migration_args.add_argument("--ecommerce-env", help="Location of the ecommerce environment file.") ecom_migration_args.add_argument("--ecommerce-code-dir", help="Location to of the ecommerce code.") programs_migration_args = parser.add_argument_group("programs_migrations", "Args for running programs migration checks.") programs_migration_args.add_argument("--programs-python", help="Path to python to use for executing migration check.") programs_migration_args.add_argument("--programs-env", help="Location of the programs environment file.") programs_migration_args.add_argument("--programs-code-dir", help="Location to of the programs code.") insights_migration_args = parser.add_argument_group("insights_migrations", "Args for running insights migration checks.") insights_migration_args.add_argument("--insights-python", help="Path to python to use for executing migration check.") insights_migration_args.add_argument("--insights-env", help="Location of the insights environment file.") insights_migration_args.add_argument("--insights-code-dir", help="Location to of the insights code.") analyticsapi_migration_args = parser.add_argument_group("analytics_api_migrations", "Args for running analytics_api migration checks.") analyticsapi_migration_args.add_argument("--analytics-api-python", help="Path to python to use for executing migration check.") analyticsapi_migration_args.add_argument("--analytics-api-env", help="Location of the analytics_api environment file.") analyticsapi_migration_args.add_argument("--analytics-api-code-dir", help="Location to of the analytics_api code.") hipchat_args = parser.add_argument_group("hipchat", "Args for hipchat notification.") hipchat_args.add_argument("-c","--hipchat-api-key", help="Hipchat token if you want to receive notifications via hipchat.") hipchat_args.add_argument("-r","--hipchat-room", help="Room to send messages to.") args = parser.parse_args() report = [] prefix = None notify = None try: if args.hipchat_api_key: hc = hipchat.HipChat(token=args.hipchat_api_key) notify = lambda message: hc.message_room(room_id=args.hipchat_room, message_from=HIPCHAT_USER, message=message) except Exception as e: print("Failed to initialize hipchat, {}".format(e)) traceback.print_exc() instance_id = get_instance_metadata()['instance-id'] prefix = instance_id ec2 = boto.connect_ec2() reservations = ec2.get_all_instances(instance_ids=[instance_id]) instance = reservations[0].instances[0] if instance.instance_profile['arn'].endswith('/abbey'): print("Running an abbey build. Not starting any services.") # Needs to exit with 1 instead of 0 to prevent # services from starting. exit(1) time_left = MAX_BACKOFF backoff = INITIAL_BACKOFF environment = None deployment = None play = None while time_left > 0: try: environment, deployment, play = edp_for_instance(instance_id) prefix = "{environment}-{deployment}-{play}-{instance_id}".format( environment=environment, deployment=deployment, play=play, instance_id=instance_id) break except Exception as e: print("Failed to get EDP for {}: {}".format(instance_id, str(e))) # With the time limit being 2 minutes we will # try 5 times before giving up. time.sleep(backoff) time_left -= backoff backoff = backoff * 2 if environment is None or deployment is None or play is None: msg = "Unable to retrieve environment, deployment, or play tag." print(msg) if notify: notify("{} : {}".format(prefix, msg)) exit(0) #get the hostname of the sandbox hostname = socket.gethostname() try: #get the list of the volumes, that are attached to the instance volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id}) for volume in volumes: volume.add_tags({"hostname": hostname, "environment": environment, "deployment": deployment, "cluster": play, "instance-id": instance_id, "created": volume.create_time }) except Exception as e: msg = "Failed to tag volumes associated with {}: {}".format(instance_id, str(e)) print(msg) if notify: notify(msg) try: for service in services_for_instance(instance_id): if service in MIGRATION_COMMANDS: # Do extra migration related stuff. if service == 'xqueue' and args.xqueue_code_dir: cmd = MIGRATION_COMMANDS[service].format(python=args.xqueue_python, code_dir=xqueue_code_dir) if os.path.exists(args.xqueue_code_dir): os.chdir(args.xqueue_code_dir) # Run migration check command. output = subprocess.check_output(cmd, shell=True) if 'Migrating' in output: raise Exception("Migrations have not been run for {}".format(service)) else: new_services = { "lms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, "cms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, "ecommerce": {'python': args.ecommerce_python, 'env_file': args.ecommerce_env, 'code_dir': args.ecommerce_code_dir}, "programs": {'python': args.programs_python, 'env_file': args.programs_env, 'code_dir': args.programs_code_dir}, "insights": {'python': args.insights_python, 'env_file': args.insights_env, 'code_dir': args.insights_code_dir}, "analytics_api": {'python': args.analytics_api_python, 'env_file': args.analytics_api_env, 'code_dir': args.analytics_api_code_dir} } if service in new_services and all(arg!=None for arg in new_services[service].values()) and service in MIGRATION_COMMANDS: serv_vars = new_services[service] cmd = MIGRATION_COMMANDS[service].format(**serv_vars) if os.path.exists(serv_vars['code_dir']): os.chdir(serv_vars['code_dir']) # Run migration check command. output = subprocess.check_output(cmd, shell=True, ) if '[ ]' in output: raise Exception("Migrations have not been run for {}".format(service)) # Link to available service. available_file = os.path.join(args.available, "{}.conf".format(service)) link_location = os.path.join(args.enabled, "{}.conf".format(service)) if os.path.exists(available_file): subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True) report.append("Enabling service: {}".format(service)) else: raise Exception("No conf available for service: {}".format(link_location)) except AWSConnectionError as ae: msg = "{}: ERROR : {}".format(prefix, ae) if notify: notify(msg) notify(traceback.format_exc()) raise ae except Exception as e: msg = "{}: ERROR : {}".format(prefix, e) print(msg) if notify: notify(msg) traceback.print_exc() raise e else: msg = "{}: {}".format(prefix, " | ".join(report)) print(msg) if notify: notify(msg)
agpl-3.0
amisrs/angular-flask
angular_flask/lib/python2.7/site-packages/yaml/scanner.py
64
52446
# Scanner produces tokens of the following types: # STREAM-START # STREAM-END # DIRECTIVE(name, value) # DOCUMENT-START # DOCUMENT-END # BLOCK-SEQUENCE-START # BLOCK-MAPPING-START # BLOCK-END # FLOW-SEQUENCE-START # FLOW-MAPPING-START # FLOW-SEQUENCE-END # FLOW-MAPPING-END # BLOCK-ENTRY # FLOW-ENTRY # KEY # VALUE # ALIAS(value) # ANCHOR(value) # TAG(value) # SCALAR(value, plain, style) # # Read comments in the Scanner code for more details. # __all__ = ['Scanner', 'ScannerError'] from error import MarkedYAMLError from tokens import * class ScannerError(MarkedYAMLError): pass class SimpleKey(object): # See below simple keys treatment. def __init__(self, token_number, required, index, line, column, mark): self.token_number = token_number self.required = required self.index = index self.line = line self.column = column self.mark = mark class Scanner(object): def __init__(self): """Initialize the scanner.""" # It is assumed that Scanner and Reader will have a common descendant. # Reader do the dirty work of checking for BOM and converting the # input data to Unicode. It also adds NUL to the end. # # Reader supports the following methods # self.peek(i=0) # peek the next i-th character # self.prefix(l=1) # peek the next l characters # self.forward(l=1) # read the next l characters and move the pointer. # Had we reached the end of the stream? self.done = False # The number of unclosed '{' and '['. `flow_level == 0` means block # context. self.flow_level = 0 # List of processed tokens that are not yet emitted. self.tokens = [] # Add the STREAM-START token. self.fetch_stream_start() # Number of tokens that were emitted through the `get_token` method. self.tokens_taken = 0 # The current indentation level. self.indent = -1 # Past indentation levels. self.indents = [] # Variables related to simple keys treatment. # A simple key is a key that is not denoted by the '?' indicator. # Example of simple keys: # --- # block simple key: value # ? not a simple key: # : { flow simple key: value } # We emit the KEY token before all keys, so when we find a potential # simple key, we try to locate the corresponding ':' indicator. # Simple keys should be limited to a single line and 1024 characters. # Can a simple key start at the current position? A simple key may # start: # - at the beginning of the line, not counting indentation spaces # (in block context), # - after '{', '[', ',' (in the flow context), # - after '?', ':', '-' (in the block context). # In the block context, this flag also signifies if a block collection # may start at the current position. self.allow_simple_key = True # Keep track of possible simple keys. This is a dictionary. The key # is `flow_level`; there can be no more that one possible simple key # for each level. The value is a SimpleKey record: # (token_number, required, index, line, column, mark) # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), # '[', or '{' tokens. self.possible_simple_keys = {} # Public methods. def check_token(self, *choices): # Check if the next token is one of the given types. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: if not choices: return True for choice in choices: if isinstance(self.tokens[0], choice): return True return False def peek_token(self): # Return the next token, but do not delete if from the queue. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: return self.tokens[0] def get_token(self): # Return the next token. while self.need_more_tokens(): self.fetch_more_tokens() if self.tokens: self.tokens_taken += 1 return self.tokens.pop(0) # Private methods. def need_more_tokens(self): if self.done: return False if not self.tokens: return True # The current token may be a potential simple key, so we # need to look further. self.stale_possible_simple_keys() if self.next_possible_simple_key() == self.tokens_taken: return True def fetch_more_tokens(self): # Eat whitespaces and comments until we reach the next token. self.scan_to_next_token() # Remove obsolete possible simple keys. self.stale_possible_simple_keys() # Compare the current indentation and column. It may add some tokens # and decrease the current indentation level. self.unwind_indent(self.column) # Peek the next character. ch = self.peek() # Is it the end of stream? if ch == u'\0': return self.fetch_stream_end() # Is it a directive? if ch == u'%' and self.check_directive(): return self.fetch_directive() # Is it the document start? if ch == u'-' and self.check_document_start(): return self.fetch_document_start() # Is it the document end? if ch == u'.' and self.check_document_end(): return self.fetch_document_end() # TODO: support for BOM within a stream. #if ch == u'\uFEFF': # return self.fetch_bom() <-- issue BOMToken # Note: the order of the following checks is NOT significant. # Is it the flow sequence start indicator? if ch == u'[': return self.fetch_flow_sequence_start() # Is it the flow mapping start indicator? if ch == u'{': return self.fetch_flow_mapping_start() # Is it the flow sequence end indicator? if ch == u']': return self.fetch_flow_sequence_end() # Is it the flow mapping end indicator? if ch == u'}': return self.fetch_flow_mapping_end() # Is it the flow entry indicator? if ch == u',': return self.fetch_flow_entry() # Is it the block entry indicator? if ch == u'-' and self.check_block_entry(): return self.fetch_block_entry() # Is it the key indicator? if ch == u'?' and self.check_key(): return self.fetch_key() # Is it the value indicator? if ch == u':' and self.check_value(): return self.fetch_value() # Is it an alias? if ch == u'*': return self.fetch_alias() # Is it an anchor? if ch == u'&': return self.fetch_anchor() # Is it a tag? if ch == u'!': return self.fetch_tag() # Is it a literal scalar? if ch == u'|' and not self.flow_level: return self.fetch_literal() # Is it a folded scalar? if ch == u'>' and not self.flow_level: return self.fetch_folded() # Is it a single quoted scalar? if ch == u'\'': return self.fetch_single() # Is it a double quoted scalar? if ch == u'\"': return self.fetch_double() # It must be a plain scalar then. if self.check_plain(): return self.fetch_plain() # No? It's an error. Let's produce a nice error message. raise ScannerError("while scanning for the next token", None, "found character %r that cannot start any token" % ch.encode('utf-8'), self.get_mark()) # Simple keys treatment. def next_possible_simple_key(self): # Return the number of the nearest possible simple key. Actually we # don't need to loop through the whole dictionary. We may replace it # with the following code: # if not self.possible_simple_keys: # return None # return self.possible_simple_keys[ # min(self.possible_simple_keys.keys())].token_number min_token_number = None for level in self.possible_simple_keys: key = self.possible_simple_keys[level] if min_token_number is None or key.token_number < min_token_number: min_token_number = key.token_number return min_token_number def stale_possible_simple_keys(self): # Remove entries that are no longer possible simple keys. According to # the YAML specification, simple keys # - should be limited to a single line, # - should be no longer than 1024 characters. # Disabling this procedure will allow simple keys of any length and # height (may cause problems if indentation is broken though). for level in self.possible_simple_keys.keys(): key = self.possible_simple_keys[level] if key.line != self.line \ or self.index-key.index > 1024: if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[level] def save_possible_simple_key(self): # The next token may start a simple key. We check if it's possible # and save its position. This function is called for # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. # Check if a simple key is required at the current position. required = not self.flow_level and self.indent == self.column # The next token might be a simple key. Let's save it's number and # position. if self.allow_simple_key: self.remove_possible_simple_key() token_number = self.tokens_taken+len(self.tokens) key = SimpleKey(token_number, required, self.index, self.line, self.column, self.get_mark()) self.possible_simple_keys[self.flow_level] = key def remove_possible_simple_key(self): # Remove the saved possible key position at the current flow level. if self.flow_level in self.possible_simple_keys: key = self.possible_simple_keys[self.flow_level] if key.required: raise ScannerError("while scanning a simple key", key.mark, "could not find expected ':'", self.get_mark()) del self.possible_simple_keys[self.flow_level] # Indentation functions. def unwind_indent(self, column): ## In flow context, tokens should respect indentation. ## Actually the condition should be `self.indent >= column` according to ## the spec. But this condition will prohibit intuitively correct ## constructions such as ## key : { ## } #if self.flow_level and self.indent > column: # raise ScannerError(None, None, # "invalid intendation or unclosed '[' or '{'", # self.get_mark()) # In the flow context, indentation is ignored. We make the scanner less # restrictive then specification requires. if self.flow_level: return # In block context, we may need to issue the BLOCK-END tokens. while self.indent > column: mark = self.get_mark() self.indent = self.indents.pop() self.tokens.append(BlockEndToken(mark, mark)) def add_indent(self, column): # Check if we need to increase indentation. if self.indent < column: self.indents.append(self.indent) self.indent = column return True return False # Fetchers. def fetch_stream_start(self): # We always add STREAM-START as the first token and STREAM-END as the # last token. # Read the token. mark = self.get_mark() # Add STREAM-START. self.tokens.append(StreamStartToken(mark, mark, encoding=self.encoding)) def fetch_stream_end(self): # Set the current intendation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False self.possible_simple_keys = {} # Read the token. mark = self.get_mark() # Add STREAM-END. self.tokens.append(StreamEndToken(mark, mark)) # The steam is finished. self.done = True def fetch_directive(self): # Set the current intendation to -1. self.unwind_indent(-1) # Reset simple keys. self.remove_possible_simple_key() self.allow_simple_key = False # Scan and add DIRECTIVE. self.tokens.append(self.scan_directive()) def fetch_document_start(self): self.fetch_document_indicator(DocumentStartToken) def fetch_document_end(self): self.fetch_document_indicator(DocumentEndToken) def fetch_document_indicator(self, TokenClass): # Set the current intendation to -1. self.unwind_indent(-1) # Reset simple keys. Note that there could not be a block collection # after '---'. self.remove_possible_simple_key() self.allow_simple_key = False # Add DOCUMENT-START or DOCUMENT-END. start_mark = self.get_mark() self.forward(3) end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_start(self): self.fetch_flow_collection_start(FlowSequenceStartToken) def fetch_flow_mapping_start(self): self.fetch_flow_collection_start(FlowMappingStartToken) def fetch_flow_collection_start(self, TokenClass): # '[' and '{' may start a simple key. self.save_possible_simple_key() # Increase the flow level. self.flow_level += 1 # Simple keys are allowed after '[' and '{'. self.allow_simple_key = True # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_sequence_end(self): self.fetch_flow_collection_end(FlowSequenceEndToken) def fetch_flow_mapping_end(self): self.fetch_flow_collection_end(FlowMappingEndToken) def fetch_flow_collection_end(self, TokenClass): # Reset possible simple key on the current level. self.remove_possible_simple_key() # Decrease the flow level. self.flow_level -= 1 # No simple keys after ']' or '}'. self.allow_simple_key = False # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(TokenClass(start_mark, end_mark)) def fetch_flow_entry(self): # Simple keys are allowed after ','. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add FLOW-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(FlowEntryToken(start_mark, end_mark)) def fetch_block_entry(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a new entry? if not self.allow_simple_key: raise ScannerError(None, None, "sequence entries are not allowed here", self.get_mark()) # We may need to add BLOCK-SEQUENCE-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockSequenceStartToken(mark, mark)) # It's an error for the block entry to occur in the flow context, # but we let the parser detect this. else: pass # Simple keys are allowed after '-'. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add BLOCK-ENTRY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(BlockEntryToken(start_mark, end_mark)) def fetch_key(self): # Block context needs additional checks. if not self.flow_level: # Are we allowed to start a key (not nessesary a simple)? if not self.allow_simple_key: raise ScannerError(None, None, "mapping keys are not allowed here", self.get_mark()) # We may need to add BLOCK-MAPPING-START. if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after '?' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add KEY. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(KeyToken(start_mark, end_mark)) def fetch_value(self): # Do we determine a simple key? if self.flow_level in self.possible_simple_keys: # Add KEY. key = self.possible_simple_keys[self.flow_level] del self.possible_simple_keys[self.flow_level] self.tokens.insert(key.token_number-self.tokens_taken, KeyToken(key.mark, key.mark)) # If this key starts a new block mapping, we need to add # BLOCK-MAPPING-START. if not self.flow_level: if self.add_indent(key.column): self.tokens.insert(key.token_number-self.tokens_taken, BlockMappingStartToken(key.mark, key.mark)) # There cannot be two simple keys one after another. self.allow_simple_key = False # It must be a part of a complex key. else: # Block context needs additional checks. # (Do we really need them? They will be catched by the parser # anyway.) if not self.flow_level: # We are allowed to start a complex value if and only if # we can start a simple key. if not self.allow_simple_key: raise ScannerError(None, None, "mapping values are not allowed here", self.get_mark()) # If this value starts a new block mapping, we need to add # BLOCK-MAPPING-START. It will be detected as an error later by # the parser. if not self.flow_level: if self.add_indent(self.column): mark = self.get_mark() self.tokens.append(BlockMappingStartToken(mark, mark)) # Simple keys are allowed after ':' in the block context. self.allow_simple_key = not self.flow_level # Reset possible simple key on the current level. self.remove_possible_simple_key() # Add VALUE. start_mark = self.get_mark() self.forward() end_mark = self.get_mark() self.tokens.append(ValueToken(start_mark, end_mark)) def fetch_alias(self): # ALIAS could be a simple key. self.save_possible_simple_key() # No simple keys after ALIAS. self.allow_simple_key = False # Scan and add ALIAS. self.tokens.append(self.scan_anchor(AliasToken)) def fetch_anchor(self): # ANCHOR could start a simple key. self.save_possible_simple_key() # No simple keys after ANCHOR. self.allow_simple_key = False # Scan and add ANCHOR. self.tokens.append(self.scan_anchor(AnchorToken)) def fetch_tag(self): # TAG could start a simple key. self.save_possible_simple_key() # No simple keys after TAG. self.allow_simple_key = False # Scan and add TAG. self.tokens.append(self.scan_tag()) def fetch_literal(self): self.fetch_block_scalar(style='|') def fetch_folded(self): self.fetch_block_scalar(style='>') def fetch_block_scalar(self, style): # A simple key may follow a block scalar. self.allow_simple_key = True # Reset possible simple key on the current level. self.remove_possible_simple_key() # Scan and add SCALAR. self.tokens.append(self.scan_block_scalar(style)) def fetch_single(self): self.fetch_flow_scalar(style='\'') def fetch_double(self): self.fetch_flow_scalar(style='"') def fetch_flow_scalar(self, style): # A flow scalar could be a simple key. self.save_possible_simple_key() # No simple keys after flow scalars. self.allow_simple_key = False # Scan and add SCALAR. self.tokens.append(self.scan_flow_scalar(style)) def fetch_plain(self): # A plain scalar could be a simple key. self.save_possible_simple_key() # No simple keys after plain scalars. But note that `scan_plain` will # change this flag if the scan is finished at the beginning of the # line. self.allow_simple_key = False # Scan and add SCALAR. May change `allow_simple_key`. self.tokens.append(self.scan_plain()) # Checkers. def check_directive(self): # DIRECTIVE: ^ '%' ... # The '%' indicator is already checked. if self.column == 0: return True def check_document_start(self): # DOCUMENT-START: ^ '---' (' '|'\n') if self.column == 0: if self.prefix(3) == u'---' \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return True def check_document_end(self): # DOCUMENT-END: ^ '...' (' '|'\n') if self.column == 0: if self.prefix(3) == u'...' \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return True def check_block_entry(self): # BLOCK-ENTRY: '-' (' '|'\n') return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_key(self): # KEY(flow context): '?' if self.flow_level: return True # KEY(block context): '?' (' '|'\n') else: return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_value(self): # VALUE(flow context): ':' if self.flow_level: return True # VALUE(block context): ':' (' '|'\n') else: return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' def check_plain(self): # A plain scalar may start with any non-space character except: # '-', '?', ':', ',', '[', ']', '{', '}', # '#', '&', '*', '!', '|', '>', '\'', '\"', # '%', '@', '`'. # # It may also start with # '-', '?', ':' # if it is followed by a non-space character. # # Note that we limit the last rule to the block context (except the # '-' character) because we want the flow context to be space # independent. ch = self.peek() return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' and (ch == u'-' or (not self.flow_level and ch in u'?:'))) # Scanners. def scan_to_next_token(self): # We ignore spaces, line breaks and comments. # If we find a line break in the block context, we set the flag # `allow_simple_key` on. # The byte order mark is stripped if it's the first character in the # stream. We do not yet support BOM inside the stream as the # specification requires. Any such mark will be considered as a part # of the document. # # TODO: We need to make tab handling rules more sane. A good rule is # Tabs cannot precede tokens # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, # KEY(block), VALUE(block), BLOCK-ENTRY # So the checking code is # if <TAB>: # self.allow_simple_keys = False # We also need to add the check for `allow_simple_keys == True` to # `unwind_indent` before issuing BLOCK-END. # Scanners for block, flow, and plain scalars need to be modified. if self.index == 0 and self.peek() == u'\uFEFF': self.forward() found = False while not found: while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() if self.scan_line_break(): if not self.flow_level: self.allow_simple_key = True else: found = True def scan_directive(self): # See the specification for details. start_mark = self.get_mark() self.forward() name = self.scan_directive_name(start_mark) value = None if name == u'YAML': value = self.scan_yaml_directive_value(start_mark) end_mark = self.get_mark() elif name == u'TAG': value = self.scan_tag_directive_value(start_mark) end_mark = self.get_mark() else: end_mark = self.get_mark() while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() self.scan_directive_ignored_line(start_mark) return DirectiveToken(name, value, start_mark, end_mark) def scan_directive_name(self, start_mark): # See the specification for details. length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_yaml_directive_value(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() major = self.scan_yaml_directive_number(start_mark) if self.peek() != '.': raise ScannerError("while scanning a directive", start_mark, "expected a digit or '.', but found %r" % self.peek().encode('utf-8'), self.get_mark()) self.forward() minor = self.scan_yaml_directive_number(start_mark) if self.peek() not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a digit or ' ', but found %r" % self.peek().encode('utf-8'), self.get_mark()) return (major, minor) def scan_yaml_directive_number(self, start_mark): # See the specification for details. ch = self.peek() if not (u'0' <= ch <= u'9'): raise ScannerError("while scanning a directive", start_mark, "expected a digit, but found %r" % ch.encode('utf-8'), self.get_mark()) length = 0 while u'0' <= self.peek(length) <= u'9': length += 1 value = int(self.prefix(length)) self.forward(length) return value def scan_tag_directive_value(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() handle = self.scan_tag_directive_handle(start_mark) while self.peek() == u' ': self.forward() prefix = self.scan_tag_directive_prefix(start_mark) return (handle, prefix) def scan_tag_directive_handle(self, start_mark): # See the specification for details. value = self.scan_tag_handle('directive', start_mark) ch = self.peek() if ch != u' ': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_tag_directive_prefix(self, start_mark): # See the specification for details. value = self.scan_tag_uri('directive', start_mark) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) return value def scan_directive_ignored_line(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in u'\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a directive", start_mark, "expected a comment or a line break, but found %r" % ch.encode('utf-8'), self.get_mark()) self.scan_line_break() def scan_anchor(self, TokenClass): # The specification does not restrict characters for anchors and # aliases. This may lead to problems, for instance, the document: # [ *alias, value ] # can be interpteted in two ways, as # [ "value" ] # and # [ *alias , "value" ] # Therefore we restrict aliases to numbers and ASCII letters. start_mark = self.get_mark() indicator = self.peek() if indicator == u'*': name = 'alias' else: name = 'anchor' self.forward() length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if not length: raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) value = self.prefix(length) self.forward(length) ch = self.peek() if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': raise ScannerError("while scanning an %s" % name, start_mark, "expected alphabetic or numeric character, but found %r" % ch.encode('utf-8'), self.get_mark()) end_mark = self.get_mark() return TokenClass(value, start_mark, end_mark) def scan_tag(self): # See the specification for details. start_mark = self.get_mark() ch = self.peek(1) if ch == u'<': handle = None self.forward(2) suffix = self.scan_tag_uri('tag', start_mark) if self.peek() != u'>': raise ScannerError("while parsing a tag", start_mark, "expected '>', but found %r" % self.peek().encode('utf-8'), self.get_mark()) self.forward() elif ch in u'\0 \t\r\n\x85\u2028\u2029': handle = None suffix = u'!' self.forward() else: length = 1 use_handle = False while ch not in u'\0 \r\n\x85\u2028\u2029': if ch == u'!': use_handle = True break length += 1 ch = self.peek(length) handle = u'!' if use_handle: handle = self.scan_tag_handle('tag', start_mark) else: handle = u'!' self.forward() suffix = self.scan_tag_uri('tag', start_mark) ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a tag", start_mark, "expected ' ', but found %r" % ch.encode('utf-8'), self.get_mark()) value = (handle, suffix) end_mark = self.get_mark() return TagToken(value, start_mark, end_mark) def scan_block_scalar(self, style): # See the specification for details. if style == '>': folded = True else: folded = False chunks = [] start_mark = self.get_mark() # Scan the header. self.forward() chomping, increment = self.scan_block_scalar_indicators(start_mark) self.scan_block_scalar_ignored_line(start_mark) # Determine the indentation level and go to the first non-empty line. min_indent = self.indent+1 if min_indent < 1: min_indent = 1 if increment is None: breaks, max_indent, end_mark = self.scan_block_scalar_indentation() indent = max(min_indent, max_indent) else: indent = min_indent+increment-1 breaks, end_mark = self.scan_block_scalar_breaks(indent) line_break = u'' # Scan the inner part of the block scalar. while self.column == indent and self.peek() != u'\0': chunks.extend(breaks) leading_non_space = self.peek() not in u' \t' length = 0 while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': length += 1 chunks.append(self.prefix(length)) self.forward(length) line_break = self.scan_line_break() breaks, end_mark = self.scan_block_scalar_breaks(indent) if self.column == indent and self.peek() != u'\0': # Unfortunately, folding rules are ambiguous. # # This is the folding according to the specification: if folded and line_break == u'\n' \ and leading_non_space and self.peek() not in u' \t': if not breaks: chunks.append(u' ') else: chunks.append(line_break) # This is Clark Evans's interpretation (also in the spec # examples): # #if folded and line_break == u'\n': # if not breaks: # if self.peek() not in ' \t': # chunks.append(u' ') # else: # chunks.append(line_break) #else: # chunks.append(line_break) else: break # Chomp the tail. if chomping is not False: chunks.append(line_break) if chomping is True: chunks.extend(breaks) # We are done. return ScalarToken(u''.join(chunks), False, start_mark, end_mark, style) def scan_block_scalar_indicators(self, start_mark): # See the specification for details. chomping = None increment = None ch = self.peek() if ch in u'+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch in u'0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() elif ch in u'0123456789': increment = int(ch) if increment == 0: raise ScannerError("while scanning a block scalar", start_mark, "expected indentation indicator in the range 1-9, but found 0", self.get_mark()) self.forward() ch = self.peek() if ch in u'+-': if ch == '+': chomping = True else: chomping = False self.forward() ch = self.peek() if ch not in u'\0 \r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected chomping or indentation indicators, but found %r" % ch.encode('utf-8'), self.get_mark()) return chomping, increment def scan_block_scalar_ignored_line(self, start_mark): # See the specification for details. while self.peek() == u' ': self.forward() if self.peek() == u'#': while self.peek() not in u'\0\r\n\x85\u2028\u2029': self.forward() ch = self.peek() if ch not in u'\0\r\n\x85\u2028\u2029': raise ScannerError("while scanning a block scalar", start_mark, "expected a comment or a line break, but found %r" % ch.encode('utf-8'), self.get_mark()) self.scan_line_break() def scan_block_scalar_indentation(self): # See the specification for details. chunks = [] max_indent = 0 end_mark = self.get_mark() while self.peek() in u' \r\n\x85\u2028\u2029': if self.peek() != u' ': chunks.append(self.scan_line_break()) end_mark = self.get_mark() else: self.forward() if self.column > max_indent: max_indent = self.column return chunks, max_indent, end_mark def scan_block_scalar_breaks(self, indent): # See the specification for details. chunks = [] end_mark = self.get_mark() while self.column < indent and self.peek() == u' ': self.forward() while self.peek() in u'\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) end_mark = self.get_mark() while self.column < indent and self.peek() == u' ': self.forward() return chunks, end_mark def scan_flow_scalar(self, style): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don't need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. if style == '"': double = True else: double = False chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) self.forward() end_mark = self.get_mark() return ScalarToken(u''.join(chunks), False, start_mark, end_mark, style) ESCAPE_REPLACEMENTS = { u'0': u'\0', u'a': u'\x07', u'b': u'\x08', u't': u'\x09', u'\t': u'\x09', u'n': u'\x0A', u'v': u'\x0B', u'f': u'\x0C', u'r': u'\x0D', u'e': u'\x1B', u' ': u'\x20', u'\"': u'\"', u'\\': u'\\', u'N': u'\x85', u'_': u'\xA0', u'L': u'\u2028', u'P': u'\u2029', } ESCAPE_CODES = { u'x': 2, u'u': 4, u'U': 8, } def scan_flow_scalar_non_spaces(self, double, start_mark): # See the specification for details. chunks = [] while True: length = 0 while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': length += 1 if length: chunks.append(self.prefix(length)) self.forward(length) ch = self.peek() if not double and ch == u'\'' and self.peek(1) == u'\'': chunks.append(u'\'') self.forward(2) elif (double and ch == u'\'') or (not double and ch in u'\"\\'): chunks.append(ch) self.forward() elif double and ch == u'\\': self.forward() ch = self.peek() if ch in self.ESCAPE_REPLACEMENTS: chunks.append(self.ESCAPE_REPLACEMENTS[ch]) self.forward() elif ch in self.ESCAPE_CODES: length = self.ESCAPE_CODES[ch] self.forward() for k in range(length): if self.peek(k) not in u'0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, "expected escape sequence of %d hexdecimal numbers, but found %r" % (length, self.peek(k).encode('utf-8')), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(unichr(code)) self.forward(length) elif ch in u'\r\n\x85\u2028\u2029': self.scan_line_break() chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) else: raise ScannerError("while scanning a double-quoted scalar", start_mark, "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) else: return chunks def scan_flow_scalar_spaces(self, double, start_mark): # See the specification for details. chunks = [] length = 0 while self.peek(length) in u' \t': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch == u'\0': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected end of stream", self.get_mark()) elif ch in u'\r\n\x85\u2028\u2029': line_break = self.scan_line_break() breaks = self.scan_flow_scalar_breaks(double, start_mark) if line_break != u'\n': chunks.append(line_break) elif not breaks: chunks.append(u' ') chunks.extend(breaks) else: chunks.append(whitespaces) return chunks def scan_flow_scalar_breaks(self, double, start_mark): # See the specification for details. chunks = [] while True: # Instead of checking indentation, we check for document # separators. prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': raise ScannerError("while scanning a quoted scalar", start_mark, "found unexpected document separator", self.get_mark()) while self.peek() in u' \t': self.forward() if self.peek() in u'\r\n\x85\u2028\u2029': chunks.append(self.scan_line_break()) else: return chunks def scan_plain(self): # See the specification for details. # We add an additional restriction for the flow context: # plain scalars in the flow context cannot contain ',', ':' and '?'. # We also keep track of the `allow_simple_key` flag here. # Indentation rules are loosed for the flow context. chunks = [] start_mark = self.get_mark() end_mark = start_mark indent = self.indent+1 # We allow zero indentation for scalars, but then we need to check for # document separators at the beginning of the line. #if indent == 0: # indent = 1 spaces = [] while True: length = 0 if self.peek() == u'#': break while True: ch = self.peek(length) if ch in u'\0 \t\r\n\x85\u2028\u2029' \ or (not self.flow_level and ch == u':' and self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \ or (self.flow_level and ch in u',:?[]{}'): break length += 1 # It's not clear what we should do with ':' in the flow context. if (self.flow_level and ch == u':' and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'): self.forward(length) raise ScannerError("while scanning a plain scalar", start_mark, "found unexpected ':'", self.get_mark(), "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") if length == 0: break self.allow_simple_key = False chunks.extend(spaces) chunks.append(self.prefix(length)) self.forward(length) end_mark = self.get_mark() spaces = self.scan_plain_spaces(indent, start_mark) if not spaces or self.peek() == u'#' \ or (not self.flow_level and self.column < indent): break return ScalarToken(u''.join(chunks), True, start_mark, end_mark) def scan_plain_spaces(self, indent, start_mark): # See the specification for details. # The specification is really confusing about tabs in plain scalars. # We just forbid them completely. Do not use tabs in YAML! chunks = [] length = 0 while self.peek(length) in u' ': length += 1 whitespaces = self.prefix(length) self.forward(length) ch = self.peek() if ch in u'\r\n\x85\u2028\u2029': line_break = self.scan_line_break() self.allow_simple_key = True prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return breaks = [] while self.peek() in u' \r\n\x85\u2028\u2029': if self.peek() == ' ': self.forward() else: breaks.append(self.scan_line_break()) prefix = self.prefix(3) if (prefix == u'---' or prefix == u'...') \ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': return if line_break != u'\n': chunks.append(line_break) elif not breaks: chunks.append(u' ') chunks.extend(breaks) elif whitespaces: chunks.append(whitespaces) return chunks def scan_tag_handle(self, name, start_mark): # See the specification for details. # For some strange reasons, the specification does not allow '_' in # tag handles. I have allowed it anyway. ch = self.peek() if ch != u'!': raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch.encode('utf-8'), self.get_mark()) length = 1 ch = self.peek(length) if ch != u' ': while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-_': length += 1 ch = self.peek(length) if ch != u'!': self.forward(length) raise ScannerError("while scanning a %s" % name, start_mark, "expected '!', but found %r" % ch.encode('utf-8'), self.get_mark()) length += 1 value = self.prefix(length) self.forward(length) return value def scan_tag_uri(self, name, start_mark): # See the specification for details. # Note: we do not check if URI is well-formed. chunks = [] length = 0 ch = self.peek(length) while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ or ch in u'-;/?:@&=+$,_.!~*\'()[]%': if ch == u'%': chunks.append(self.prefix(length)) self.forward(length) length = 0 chunks.append(self.scan_uri_escapes(name, start_mark)) else: length += 1 ch = self.peek(length) if length: chunks.append(self.prefix(length)) self.forward(length) length = 0 if not chunks: raise ScannerError("while parsing a %s" % name, start_mark, "expected URI, but found %r" % ch.encode('utf-8'), self.get_mark()) return u''.join(chunks) def scan_uri_escapes(self, name, start_mark): # See the specification for details. bytes = [] mark = self.get_mark() while self.peek() == u'%': self.forward() for k in range(2): if self.peek(k) not in u'0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % (self.peek(k).encode('utf-8')), self.get_mark()) bytes.append(chr(int(self.prefix(2), 16))) self.forward(2) try: value = unicode(''.join(bytes), 'utf-8') except UnicodeDecodeError, exc: raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) return value def scan_line_break(self): # Transforms: # '\r\n' : '\n' # '\r' : '\n' # '\n' : '\n' # '\x85' : '\n' # '\u2028' : '\u2028' # '\u2029 : '\u2029' # default : '' ch = self.peek() if ch in u'\r\n\x85': if self.prefix(2) == u'\r\n': self.forward(2) else: self.forward() return u'\n' elif ch in u'\u2028\u2029': self.forward() return ch return u'' #try: # import psyco # psyco.bind(Scanner) #except ImportError: # pass
mit
renner/spacewalk
backend/server/handlers/xmlrpc/queue.py
9
30018
# Queue functions on the server side. # # Copyright (c) 2008--2016 Red Hat, Inc. # # This software is licensed to you under the GNU General Public License, # version 2 (GPLv2). There is NO WARRANTY for this software, express or # implied, including the implied warranties of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 # along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. # # Red Hat trademarks are not licensed under GPLv2. No permission is # granted to use or replicate Red Hat trademarks that are incorporated # in this software or its documentation. # import sys import time try: # python 2 import xmlrpclib except ImportError: # python3 import xmlrpc.client as xmlrpclib from spacewalk.common.usix import IntType, TupleType, UnicodeType # Global modules from spacewalk.common.usix import raise_with_tb from spacewalk.common import rhnFlags from spacewalk.common.rhnLog import log_debug, log_error from spacewalk.common.rhnConfig import CFG from spacewalk.common.rhnException import rhnFault from spacewalk.common.rhnTranslate import _ from spacewalk.common.rhnTB import Traceback from spacewalk.server.rhnHandler import rhnHandler from spacewalk.server import rhnSQL, rhnCapability, rhnAction from spacewalk.server.rhnLib import InvalidAction, EmptyAction, ShadowAction from spacewalk.server.rhnServer import server_kickstart import getMethod class Queue(rhnHandler): """ XMLRPC queue functions that we will provide for the outside world. """ def __init__(self): """ Add a list of functions we are willing to server out. """ rhnHandler.__init__(self) self.functions.append('get') self.functions.append('get_future_actions') self.functions.append('length') self.functions.append('submit') # XXX I am not proud of this. There should be a generic way to map # the client's error codes into success status codes self.action_type_completed_codes = { 'errata.update': { 39: None, }, } def __getV1(self, action): """ Fetches old queued actions for the client version 1. """ log_debug(3, self.server_id) actionId = action['id'] method = action["method"] if method == 'packages.update': xml = self.__packageUpdate(actionId) elif method == 'errata.update': xml = self.__errataUpdate(actionId) elif method == 'hardware.refresh_list': xml = xmlrpclib.dumps(("hardware",), methodname="client.refresh") elif method == 'packages.refresh_list': xml = xmlrpclib.dumps(("rpmlist",), methodname="client.refresh") else: # Unrecognized, skip raise InvalidAction("Action method %s unsupported by " "Update Agent Client" % method) # all good return {'id': actionId, 'version': 1, 'action': xml} def __getV2(self, action, dry_run=0): """ Fetches queued actions for the clients version 2+. """ log_debug(3, self.server_id) # Get the root dir of this install try: method = getMethod.getMethod(action['method'], 'server.action') except getMethod.GetMethodException: Traceback("queue.get V2") raise_with_tb(EmptyAction("Could not get a valid method for %s" % ( action['method'],)), sys.exc_info()[2]) # Call the method result = method(self.server_id, action['id'], dry_run) if result is None: # None are mapped to the empty list result = () elif not isinstance(result, TupleType): # Everything other than a tuple is wrapped in a tuple result = (result, ) xmlblob = xmlrpclib.dumps(result, methodname=action['method']) log_debug(5, "returning xmlblob for action", xmlblob) return { 'id': action['id'], 'action': xmlblob, 'version': action['version'], } def __update_status(self, status): """ Update the runnng kernel and the last boot values for this server from the status dictionary passed on queue checkin. Record last running kernel and uptime. Only update last_boot if it has changed by more than five seconds. We don't know the timezone the server is in. or even if its clock is right, but we do know it can properly track seconds since it rebooted, and use our own clocks to keep proper track of the actual time. """ rhnSQL.set_log_auth_login('CLIENT') if 'uname' in status: kernelver = status['uname'][2] if kernelver != self.server.server["running_kernel"]: self.server.server["running_kernel"] = kernelver # XXX:We should be using Oracle's sysdate() for this management # In the case of multiple app servers in mutiple time zones all the # results are skewed. if 'uptime' in status: uptime = status['uptime'] if isinstance(uptime, type([])) and len(uptime): # Toss the other values. For now uptime = uptime[0] try: uptime = float(uptime) except ValueError: # Wrong value passed by the client pass else: last_boot = time.time() - uptime if abs(last_boot - self.server.server["last_boot"]) > 5: self.server.server["last_boot"] = last_boot self.__set_reboot_action_to_succcess() # this is smart enough to do a NOOP if nothing changed. self.server.server.save() def __set_reboot_action_to_succcess(self): h = rhnSQL.prepare(""" update rhnServerAction set status = 2 where server_id = :server_id and action_id in ( select sa.action_id from rhnServerAction sa join rhnAction a on sa.action_id = a.id join rhnActionType at on a.action_type = at.id where sa.server_id = :server_id and sa.status = 1 and at.label = 'reboot.reboot' ) """) h.execute(server_id=self.server_id) def __should_snapshot(self): log_debug(4, self.server_id, "determining whether to snapshot...") entitlements = self.server.check_entitlement() if "enterprise_entitled" not in entitlements: return 0 # ok, take the snapshot before attempting this action return 1 def _invalidate_child_actions(self, action_id): f_action_ids = rhnAction.invalidate_action(self.server_id, action_id) for f_action_id in f_action_ids: # Invalidate any kickstart session that depends on this action server_kickstart.update_kickstart_session(self.server_id, f_action_id, action_status=3, kickstart_state='failed', next_action_type=None) return f_action_ids def _invalidate_failed_prereq_actions(self): h = rhnSQL.prepare(""" select sa.action_id, a.prerequisite from rhnServerAction sa, rhnAction a where sa.server_id = :server_id and sa.action_id = a.id and sa.status in (0, 1) -- Queued or picked up and a.prerequisite is not null and exists ( select 1 from rhnServerAction where server_id = sa.server_id and action_id = a.prerequisite and status = 3 -- failed ) """) h.execute(server_id=self.server_id) while 1: row = h.fetchone_dict() if not row: break action_id, prereq_action_id = row['action_id'], row['prerequisite'] self._invalidate_child_actions(action_id) _query_future_enabled = rhnSQL.Statement(""" select staging_content_enabled from rhnOrgConfiguration oc, rhnServer s where s.org_id = oc.org_id and s.id = :server_id """) def _future_actions_enabled(self): """ Returns true if staging content is enabled for this system """ h = rhnSQL.prepare(self._query_future_enabled) h.execute(server_id=self.server_id) row = h.fetchone_dict() log_debug(4, row["staging_content_enabled"]) return row["staging_content_enabled"] == "Y" _query_queue_future = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label as method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp + numtodsinterval(:time_window * 3600, 'second') -- Check earliest_action and at.label in ('packages.update', 'errata.update', 'packages.runTransaction', 'packages.fullUpdate') order by a.earliest_action, a.prerequisite nulls first, a.id """) def get_future_actions(self, system_id, time_window): """ return actions which are scheduled within next /time_window/ hours """ self.auth_system(system_id) log_debug(3, "Checking for future actions within %d hours" % time_window) result = [] if self._future_actions_enabled() and not self.__reboot_in_progress(): h = rhnSQL.prepare(self._query_queue_future) h.execute(server_id=self.server_id, time_window=time_window) action = h.fetchone_dict() while action: log_debug(5, action) result.append(self.__getV2(action, dry_run=1)) action = h.fetchone_dict() return result _query_queue_get = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label as method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp -- Check earliest_action and not exists ( select 1 from rhnServerAction sap where sap.server_id = :server_id and sap.action_id = a.prerequisite and sap.status != 2 -- completed ) order by a.earliest_action, a.prerequisite nulls first, a.id """) # Probably we need to figure out if we really need to split these two. def get(self, system_id, version=1, status={}): # Authenticate the system certificate if CFG.DISABLE_CHECKINS: self.update_checkin = 0 else: self.update_checkin = 1 self.auth_system(system_id) log_debug(1, self.server_id, version, "checkins %s" % ["disabled", "enabled"][self.update_checkin]) if status: self.__update_status(status) # Update the capabilities list rhnCapability.update_client_capabilities(self.server_id) # Invalidate failed actions self._invalidate_failed_prereq_actions() server_locked = self.server.server_locked() log_debug(3, "Server locked", server_locked) if self.__reboot_in_progress(): log_debug(3, "Server reboot in progress", self.server_id) rhnSQL.commit() return "" ret = {} # get the action. Status codes are currently: # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed # XXX: we should really be using labels from rhnActionType instead of # hard coded type id numbers. # We fetch actions whose prerequisites have completed, and actions # that don't have prerequisites at all h = rhnSQL.prepare(self._query_queue_get) should_execute = 1 # Loop to get a valid action # (only one valid action will be dealt with per execution of this function...) while 1: if should_execute: h.execute(server_id=self.server_id) should_execute = 0 # Okay, got an action action = h.fetchone_dict() if not action: # No actions available; bail out # Don't forget the commit at the end... ret = "" break action_id = action['id'] log_debug(4, "Checking action %s" % action_id) # okay, now we have the action - process it. if action['remaining_tries'] < 1: log_debug(4, "Action %s picked up too many times" % action_id) # We've run out of pickup attempts for this action... self.__update_action(action_id, status=3, message="This action has been picked up multiple times " "without a successful transaction; " "this action is now failed for this system.") # Invalidate actions that depend on this one self._invalidate_child_actions(action_id) # keep looking for a good action to process... continue if server_locked and action['unlocked_only'] == 'Y': # This action is locked log_debug(4, "server id %s locked for action id %s" % ( self.server_id, action_id)) continue try: if version == 1: ret = self.__getV1(action) else: ret = self.__getV2(action) except ShadowAction: # Action the client should not see e = sys.exc_info()[1] # Make sure we re-execute the query, so we pick up whatever # extra actions were added should_execute = 1 text = e.args[0] log_debug(4, "Shadow Action", text) self.__update_action(action['id'], 2, 0, text) continue except InvalidAction: # This is an invalid action e = sys.exc_info()[1] # Update its status so it won't bother us again text = e.args[0] log_debug(4, "Invalid Action", text) self.__update_action(action['id'], 3, -99, text) continue except EmptyAction: e = sys.exc_info()[1] # this means that we have some sort of internal error # which gets reported in the logs. We don't touch the # action because this should get fixed on our side. log_error("Can not process action data", action, e.args) ret = "" break else: # all fine # Update the status of the action h = rhnSQL.prepare(""" update rhnServerAction set status = 1, pickup_time = current_timestamp, remaining_tries = :tries - 1 where action_id = :action_id and server_id = :server_id """) h.execute(action_id=action["id"], server_id=self.server_id, tries=action["remaining_tries"]) break # commit all changes rhnSQL.commit() return ret def submit(self, system_id, action_id, result, message="", data={}): """ Submit the results of a queue run. Maps old and new rhn_check behavior to new database status codes The new API uses 4 slightly different status codes than the old client does. This function will "hopefully" sensibly map them. Old methodology: -rhn_check retrieves an action from the top of the action queue. -It attempts to execute the desired action and returns either (a) 0 -- presumed successful. (b) rhnFault object -- presumed failed (c) some other non-fault object -- *assumed* successful. -Regardless of result code, action is marked as "executed" We try to make a smarter status selection (i.e. failed||completed). For reference: New DB status codes: Old DB status codes: 0: Queued 0: queued 1: Picked Up 1: picked up 2: Completed 2: executed 3: Failed 3: completed """ if type(action_id) is not IntType: # Convert it to int try: action_id = int(action_id) except ValueError: log_error("Invalid action_id", action_id) raise_with_tb(rhnFault(30, _("Invalid action value type %s (%s)") % (action_id, type(action_id))), sys.exc_info()[2]) # Authenticate the system certificate self.auth_system(system_id) log_debug(1, self.server_id, action_id, result) # check that the action is valid # We have a uniqueness constraint on (action_id, server_id) h = rhnSQL.prepare(""" select at.label action_type, at.trigger_snapshot, at.name from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = :action_id and sa.status = 1 and a.id = :action_id and a.action_type = at.id """) h.execute(server_id=self.server_id, action_id=action_id) row = h.fetchone_dict() if not row: log_error("Server %s does not own action %s" % ( self.server_id, action_id)) raise rhnFault(22, _("Action %s does not belong to server %s") % ( action_id, self.server_id)) action_type = row['action_type'] trigger_snapshot = (row['trigger_snapshot'] == 'Y') if 'missing_packages' in data: missing_packages = "Missing-Packages: %s" % str( data['missing_packages']) rmsg = "%s %s" % (message, missing_packages) elif 'koan' in data: rmsg = "%s: %s" % (message, data['koan']) else: rmsg = message rcode = result # Careful with this one, result can be a very complex thing # and this processing is required for compatibility with old # rhn_check clients if type(rcode) == type({}): if "faultCode" in result: rcode = result["faultCode"] if "faultString" in result: rmsg = result["faultString"] + str(data) if type(rcode) in [type({}), type(()), type([])] \ or type(rcode) is not IntType: rmsg = u"%s [%s]" % (UnicodeType(message), UnicodeType(rcode)) rcode = -1 # map to db codes. status = self.status_for_action_type_code(action_type, rcode) if status == 3: # Failed action - invalidate children self._invalidate_child_actions(action_id) elif action_type == 'reboot.reboot': # reboot action should stay as pickup rhnSQL.commit() return 0 elif status == 2 and trigger_snapshot and self.__should_snapshot(): # if action status is 'Completed', snapshot if allowed and if needed self.server.take_snapshot("Scheduled action completion: %s" % row['name']) self.__update_action(action_id, status, rcode, rmsg) # Store the status in a flag - easier than to complicate the action # plugin API by adding a status rhnFlags.set('action_id', action_id) rhnFlags.set('action_status', status) self.process_extra_data(self.server_id, action_id, data=data, action_type=action_type) # commit, because nobody else will rhnSQL.commit() return 0 def status_for_action_type_code(self, action_type, rcode): """ Convert whatever the client sends as a result code into a status in the database format This is more complicated, since some of the client's result codes have to be marked as successes. """ log_debug(4, action_type, rcode) if rcode == 0: # Completed return 2 if action_type not in self.action_type_completed_codes: # Failed return 3 hash = self.action_type_completed_codes[action_type] if rcode not in hash: # Failed return 3 # Completed return 2 def process_extra_data(self, server_id, action_id, data={}, action_type=None): log_debug(4, server_id, action_id, action_type) if not action_type: # Shouldn't happen return try: method = getMethod.getMethod(action_type, 'server.action_extra_data') except getMethod.GetMethodException: Traceback("queue.get V2") raise_with_tb(EmptyAction("Could not get a valid method for %s" % action_type), sys.exc_info()[2]) # Call the method result = method(self.server_id, action_id, data=data) return result def length(self, system_id): """ Return the queue length for a certain server. """ # Authenticate the system certificate self.auth_system(system_id) log_debug(1, self.server_id) h = rhnSQL.prepare(""" select count(action_id) id from rhnServerAction r where r.server_id = :server_id and r.status in (0, 1) """) h.execute(server_id=self.server_id) data = h.fetchone_dict() if data is None: return 0 return data["id"] # PRIVATE methods def __reboot_in_progress(self): """check for a reboot action for this server in status Picked Up""" log_debug(4, self.server_id) h = rhnSQL.prepare(""" select 1 from rhnServerAction sa join rhnAction a on sa.action_id = a.id join rhnActionType at on a.action_type = at.id where sa.server_id = :server_id and at.label = 'reboot.reboot' and sa.status = 1 -- Picked Up """) h.execute(server_id=self.server_id) ret = h.fetchone_dict() or None if ret: return True return False def __update_action(self, action_id, status, resultCode=None, message=""): """ Update the status of an action. """ log_debug(4, action_id, status, resultCode, message) rhnAction.update_server_action(server_id=self.server_id, action_id=action_id, status=status, result_code=resultCode, result_message=message) return 0 def __errataUpdate(self, actionId): """ Old client errata retrieval. """ log_debug(3, self.server_id, actionId) # get the names of the packages associated with each errata and # look them up in channels subscribed to by the server and select # the latest version sql = """ select pn.name name, pl.evr.version version, pl.evr.release release from ( select p.name_id, max(pe.evr) evr from rhnPackageEVR pe, rhnChannelPackage cp, rhnPackage p, rhnServerChannel sc, ( select p_name.name_id id from rhnActionErrataUpdate aeu, rhnErrataPackage ep, rhnPackage p_name where aeu.action_id = :action_id and aeu.errata_id = ep.errata_id and ep.package_id = p_name.id ) nids where nids.id = p.name_id and p.evr_id = pe.id and p.id = cp.package_id and cp.channel_id = sc.channel_id and sc.server_id = :server_id group by p.name_id ) pl, rhnPackageName pn where pn.id = pl.name_id """ h = rhnSQL.prepare(sql) h.execute(action_id=actionId, server_id=self.server_id) packages = [] while 1: ret = h.fetchone_dict() if not ret: break # older clients have issues with real epochs, se they are # kind of irrelevant packages.append([ret["name"], ret["version"], ret["release"], '']) xml = xmlrpclib.dumps((packages,), methodname='client.update_packages') return xml def __packageUpdate(self, actionId): """ Old client package retrieval. """ log_debug(3, self.server_id, actionId) # The SQL query is a union of: # - packages with a specific EVR # - the latest packages (no EVR specified) # XXX Should we want to schedule the install for a specific version, # we'll have to modify this statement = """ select distinct pkglist.name name, -- decode the evr object selected earlier pkglist.evr.version version, pkglist.evr.release release from ( -- get the max of the two possible cases select pl.name name, max(pl.evr) evr from ( -- if the EVR is specifically requested... select pn.name name, pe.evr evr from rhnActionPackage ap, rhnPackage p, rhnPackageName pn, rhnPackageEVR pe, rhnServerChannel sc, rhnChannelPackage cp where ap.action_id = :action_id and ap.evr_id is NOT NULL and ap.evr_id = p.evr_id and ap.evr_id = pe.id and ap.name_id = p.name_id and ap.name_id = pn.id and p.id = cp.package_id and cp.channel_id = sc.channel_id and sc.server_id = :server_id UNION -- when no EVR requested, we need to compute the max available -- from the channels the server is subscribed to select pn.name name, max(pevr.evr) evr from rhnActionPackage ap, rhnServerChannel sc, rhnChannelPackage cp, rhnPackage p, rhnPackageEVR pevr, rhnPackageName pn where ap.action_id = :action_id and ap.evr_id is null and ap.name_id = pn.id and ap.name_id = p.name_id and p.evr_id = pevr.id and sc.server_id = :server_id and sc.channel_id = cp.channel_id and cp.package_id = p.id group by pn.name ) pl group by pl.name ) pkglist """ h = rhnSQL.prepare(statement) h.execute(action_id=actionId, server_id=self.server_id) ret = h.fetchall_dict() or [] packages = [] for p in ret: # old clients have issues dealing with real epochs, so we # kind of fake it for now in here entry = [p['name'], p['version'], p['release'], ''] packages.append(entry) xml = xmlrpclib.dumps((packages,), methodname='client.update_packages') return xml #----------------------------------------------------------------------------- if __name__ == "__main__": print("You can not run this module by itself") q = Queue() sys.exit(-1) #-----------------------------------------------------------------------------
gpl-2.0
Chilledheart/chromium
tools/telemetry/PRESUBMIT.py
3
3473
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. def _CommonChecks(input_api, output_api): results = [] # TODO(nduca): This should call update_docs.IsUpdateDocsNeeded(). # Disabled due to crbug.com/255326. if False: update_docs_path = input_api.os_path.join( input_api.PresubmitLocalPath(), 'update_docs') assert input_api.os_path.exists(update_docs_path) results.append(output_api.PresubmitError( 'Docs are stale. Please run:\n' + '$ %s' % input_api.os_path.abspath(update_docs_path))) pylint_checks = input_api.canned_checks.GetPylint( input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api), pylintrc='pylintrc') results.extend(_CheckNoMoreUsageOfDeprecatedCode( input_api, output_api, deprecated_code='GetChromiumSrcDir()', crbug_number=511332)) results.extend(input_api.RunTests(pylint_checks)) return results def _CheckNoMoreUsageOfDeprecatedCode( input_api, output_api, deprecated_code, crbug_number): results = [] # These checks are not perfcet but should be good enough for most of our # usecases. def _IsAddedLine(line): return line.startswith('+') and not line.startswith('+++ ') def _IsRemovedLine(line): return line.startswith('-') and not line.startswith('--- ') presubmit_dir = input_api.os_path.join( input_api.PresubmitLocalPath(), 'PRESUBMIT.py') added_calls = 0 removed_calls = 0 for affected_file in input_api.AffectedFiles(): # Do not do the check on PRESUBMIT.py itself. if affected_file.AbsoluteLocalPath() == presubmit_dir: continue for line in affected_file.GenerateScmDiff().splitlines(): if _IsAddedLine(line) and deprecated_code in line: added_calls += 1 elif _IsRemovedLine(line) and deprecated_code in line: removed_calls += 1 if added_calls > removed_calls: results.append(output_api.PresubmitError( 'Your patch adds more instances of %s. Please see crbug.com/%i for' 'how to proceed.' % (deprecated_code, crbug_number))) return results def _GetPathsToPrepend(input_api): telemetry_dir = input_api.PresubmitLocalPath() chromium_src_dir = input_api.os_path.join(telemetry_dir, '..', '..') return [ telemetry_dir, input_api.os_path.join(telemetry_dir, 'third_party', 'altgraph'), input_api.os_path.join(telemetry_dir, 'third_party', 'mock'), input_api.os_path.join(telemetry_dir, 'third_party', 'modulegraph'), input_api.os_path.join(telemetry_dir, 'third_party', 'pexpect'), input_api.os_path.join(telemetry_dir, 'third_party', 'png'), input_api.os_path.join(telemetry_dir, 'third_party', 'pyserial'), input_api.os_path.join(telemetry_dir, 'third_party', 'typ'), input_api.os_path.join(telemetry_dir, 'third_party', 'webpagereplay'), input_api.os_path.join(telemetry_dir, 'third_party', 'websocket-client'), input_api.os_path.join(chromium_src_dir, 'build', 'android'), input_api.os_path.join(chromium_src_dir, 'third_party', 'catapult', 'tracing'), ] def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(_CommonChecks(input_api, output_api)) return results def CheckChangeOnCommit(input_api, output_api): results = [] results.extend(_CommonChecks(input_api, output_api)) return results
bsd-3-clause
mostaphaRoudsari/Honeybee
src/Honeybee_AskMe.py
1
1992
# # Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari # # This file is part of Honeybee. # # Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools> # Honeybee is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 3 of the License, # or (at your option) any later version. # # Honeybee is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Honeybee; If not, see <http://www.gnu.org/licenses/>. # # @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+> """ Use this component to get basic information on Honeybee Objects, whether they are HBSrfs or HBZones. - Provided by Honeybee 0.0.66 Args: _HBObjects: Any valid Honeybee object. Returns: readMe!: Information about the Honeybee object. Connect to a panel to visualize. """ ghenv.Component.Name = "Honeybee_AskMe" ghenv.Component.NickName = 'askMe' ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020' ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application ghenv.Component.Category = "HB-Legacy" ghenv.Component.SubCategory = "00 | Honeybee" #compatibleHBVersion = VER 0.0.56\nJUL_24_2017 #compatibleLBVersion = VER 0.0.59\nFEB_01_2015 try: ghenv.Component.AdditionalHelpFromDocStrings = "1" except: pass import scriptcontext as sc try: # call the objects from the lib hb_hive = sc.sticky["honeybee_Hive"]() HBObjectsFromHive = hb_hive.visualizeFromHoneybeeHive(_HBObjects) for HBO in HBObjectsFromHive: print HBO except Exception, e: print "Honeybee has no idea what this object is! Vviiiiiiz!" pass
gpl-3.0
xzturn/tensorflow
tensorflow/python/kernel_tests/softplus_op_test.py
22
5095
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Softplus and SoftplusGrad.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import nn_ops import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test class SoftplusTest(test.TestCase): def _npSoftplus(self, np_features): np_features = np.asarray(np_features) zero = np.asarray(0).astype(np_features.dtype) return np.logaddexp(zero, np_features) def _testSoftplus(self, np_features, use_gpu=False): np_softplus = self._npSoftplus(np_features) with self.cached_session(use_gpu=use_gpu): softplus = nn_ops.softplus(np_features) tf_softplus = self.evaluate(softplus) self.assertAllCloseAccordingToType(np_softplus, tf_softplus) self.assertTrue(np.all(tf_softplus > 0)) self.assertShapeEqual(np_softplus, softplus) def testNumbers(self): for t in [np.float16, np.float32, np.float64]: self._testSoftplus( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t), use_gpu=False) self._testSoftplus( np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t), use_gpu=True) log_eps = np.log(np.finfo(t).eps) one = t(1) ten = t(10) self._testSoftplus( [ log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten, -log_eps, -log_eps - one, -log_eps + one, -log_eps - ten, -log_eps + ten ], use_gpu=False) self._testSoftplus( [ log_eps, log_eps - one, log_eps + one, log_eps - ten, log_eps + ten - log_eps, -log_eps - one, -log_eps + one, -log_eps - ten, -log_eps + ten ], use_gpu=True) @test_util.run_deprecated_v1 def testGradient(self): with self.cached_session(): x = constant_op.constant( [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9], shape=[2, 5], name="x") y = nn_ops.softplus(x, name="softplus") x_init = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker.compute_gradient_error( x, [2, 5], y, [2, 5], x_init_value=x_init) print("softplus (float) gradient err = ", err) self.assertLess(err, 1e-4) @test_util.run_deprecated_v1 def testGradGrad(self): with self.cached_session(): x = constant_op.constant( [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9], shape=[2, 5], name="x") y = nn_ops.softplus(x, name="softplus") (grad,) = gradients_impl.gradients(y, x) x_init = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker.compute_gradient_error( x, [2, 5], grad, [2, 5], x_init_value=x_init) print("softplus (float) gradient of gradient err = ", err) self.assertLess(err, 5e-5) @test_util.run_deprecated_v1 def testGradGradGrad(self): with self.cached_session(): x = constant_op.constant( [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9], shape=[2, 5], name="x") y = nn_ops.softplus(x, name="softplus") (grad,) = gradients_impl.gradients(y, x) (grad_grad,) = gradients_impl.gradients(grad, x) x_init = np.asarray( [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]], dtype=np.float32, order="F") err = gradient_checker.compute_gradient_error( x, [2, 5], grad_grad, [2, 5], x_init_value=x_init) print("softplus (float) third-order gradient err = ", err) self.assertLess(err, 5e-5) @test_util.run_deprecated_v1 def testNoInts(self): with self.cached_session(): with self.assertRaisesRegexp( TypeError, "'features' has DataType int32 not in list of allowed values"): nn_ops.softplus(constant_op.constant(42)).eval() if __name__ == "__main__": test.main()
apache-2.0
googleapis/python-aiplatform
schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py
3
1795
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .classification import ( ClassificationPredictionResult, ) from .image_object_detection import ( ImageObjectDetectionPredictionResult, ) from .image_segmentation import ( ImageSegmentationPredictionResult, ) from .tabular_classification import ( TabularClassificationPredictionResult, ) from .tabular_regression import ( TabularRegressionPredictionResult, ) from .text_extraction import ( TextExtractionPredictionResult, ) from .text_sentiment import ( TextSentimentPredictionResult, ) from .video_action_recognition import ( VideoActionRecognitionPredictionResult, ) from .video_classification import ( VideoClassificationPredictionResult, ) from .video_object_tracking import ( VideoObjectTrackingPredictionResult, ) __all__ = ( 'ClassificationPredictionResult', 'ImageObjectDetectionPredictionResult', 'ImageSegmentationPredictionResult', 'TabularClassificationPredictionResult', 'TabularRegressionPredictionResult', 'TextExtractionPredictionResult', 'TextSentimentPredictionResult', 'VideoActionRecognitionPredictionResult', 'VideoClassificationPredictionResult', 'VideoObjectTrackingPredictionResult', )
apache-2.0
murali-munna/scikit-learn
sklearn/externals/joblib/pool.py
237
23894
"""Custom implementation of multiprocessing.Pool with custom pickler This module provides efficient ways of working with data stored in shared memory with numpy.memmap arrays without inducing any memory copy between the parent and child processes. This module should not be imported if multiprocessing is not available as it implements subclasses of multiprocessing Pool that uses a custom alternative to SimpleQueue. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Copyright: 2012, Olivier Grisel # License: BSD 3 clause from mmap import mmap import errno import os import stat import sys import threading import atexit import tempfile import shutil try: # Python 2 compat from cPickle import loads from cPickle import dumps except ImportError: from pickle import loads from pickle import dumps import copyreg # Customizable pure Python pickler in Python 2 # customizable C-optimized pickler under Python 3.3+ from pickle import Pickler from pickle import HIGHEST_PROTOCOL from io import BytesIO from ._multiprocessing_helpers import mp, assert_spawning # We need the class definition to derive from it not the multiprocessing.Pool # factory function from multiprocessing.pool import Pool try: import numpy as np from numpy.lib.stride_tricks import as_strided except ImportError: np = None from .numpy_pickle import load from .numpy_pickle import dump from .hashing import hash # Some system have a ramdisk mounted by default, we can use it instead of /tmp # as the default folder to dump big arrays to share with subprocesses SYSTEM_SHARED_MEM_FS = '/dev/shm' # Folder and file permissions to chmod temporary files generated by the # memmaping pool. Only the owner of the Python process can access the # temporary files and folder. FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR ############################################################################### # Support for efficient transient pickling of numpy data structures def _get_backing_memmap(a): """Recursively look up the original np.memmap instance base if any""" b = getattr(a, 'base', None) if b is None: # TODO: check scipy sparse datastructure if scipy is installed # a nor its descendants do not have a memmap base return None elif isinstance(b, mmap): # a is already a real memmap instance. return a else: # Recursive exploration of the base ancestry return _get_backing_memmap(b) def has_shareable_memory(a): """Return True if a is backed by some mmap buffer directly or not""" return _get_backing_memmap(a) is not None def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides, total_buffer_len): """Reconstruct an array view on a memmory mapped file""" if mode == 'w+': # Do not zero the original data when unpickling mode = 'r+' if strides is None: # Simple, contiguous memmap return np.memmap(filename, dtype=dtype, shape=shape, mode=mode, offset=offset, order=order) else: # For non-contiguous data, memmap the total enclosing buffer and then # extract the non-contiguous view with the stride-tricks API base = np.memmap(filename, dtype=dtype, shape=total_buffer_len, mode=mode, offset=offset, order=order) return as_strided(base, shape=shape, strides=strides) def _reduce_memmap_backed(a, m): """Pickling reduction for memmap backed arrays a is expected to be an instance of np.ndarray (or np.memmap) m is expected to be an instance of np.memmap on the top of the ``base`` attribute ancestry of a. ``m.base`` should be the real python mmap object. """ # offset that comes from the striding differences between a and m a_start, a_end = np.byte_bounds(a) m_start = np.byte_bounds(m)[0] offset = a_start - m_start # offset from the backing memmap offset += m.offset if m.flags['F_CONTIGUOUS']: order = 'F' else: # The backing memmap buffer is necessarily contiguous hence C if not # Fortran order = 'C' if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']: # If the array is a contiguous view, no need to pass the strides strides = None total_buffer_len = None else: # Compute the total number of items to map from which the strided # view will be extracted. strides = a.strides total_buffer_len = (a_end - a_start) // a.itemsize return (_strided_from_memmap, (m.filename, a.dtype, m.mode, offset, order, a.shape, strides, total_buffer_len)) def reduce_memmap(a): """Pickle the descriptors of a memmap instance to reopen on same file""" m = _get_backing_memmap(a) if m is not None: # m is a real mmap backed memmap instance, reduce a preserving striding # information return _reduce_memmap_backed(a, m) else: # This memmap instance is actually backed by a regular in-memory # buffer: this can happen when using binary operators on numpy.memmap # instances return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),)) class ArrayMemmapReducer(object): """Reducer callable to dump large arrays to memmap files. Parameters ---------- max_nbytes: int Threshold to trigger memmaping of large arrays to files created a folder. temp_folder: str Path of a folder where files for backing memmaped arrays are created. mmap_mode: 'r', 'r+' or 'c' Mode for the created memmap datastructure. See the documentation of numpy.memmap for more details. Note: 'w+' is coerced to 'r+' automatically to avoid zeroing the data on unpickling. verbose: int, optional, 0 by default If verbose > 0, memmap creations are logged. If verbose > 1, both memmap creations, reuse and array pickling are logged. context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. It might not be the case when using the MemmapingPool API directly. prewarm: bool, optional, False by default. Force a read on newly memmaped array to make sure that OS pre-cache it memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. """ def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0, context_id=None, prewarm=True): self._max_nbytes = max_nbytes self._temp_folder = temp_folder self._mmap_mode = mmap_mode self.verbose = int(verbose) self._context_id = context_id self._prewarm = prewarm def __call__(self, a): m = _get_backing_memmap(a) if m is not None: # a is already backed by a memmap file, let's reuse it directly return _reduce_memmap_backed(a, m) if (not a.dtype.hasobject and self._max_nbytes is not None and a.nbytes > self._max_nbytes): # check that the folder exists (lazily create the pool temp folder # if required) try: os.makedirs(self._temp_folder) os.chmod(self._temp_folder, FOLDER_PERMISSIONS) except OSError as e: if e.errno != errno.EEXIST: raise e # Find a unique, concurrent safe filename for writing the # content of this array only once. if self._context_id is not None: marker = self._context_id else: marker = hash(a) basename = "%d-%d-%d-%s.pkl" % ( os.getpid(), id(threading.current_thread()), id(a), marker) filename = os.path.join(self._temp_folder, basename) # In case the same array with the same content is passed several # times to the pool subprocess children, serialize it only once # XXX: implement an explicit reference counting scheme to make it # possible to delete temporary files as soon as the workers are # done processing this data. if not os.path.exists(filename): if self.verbose > 0: print("Memmaping (shape=%r, dtype=%s) to new file %s" % ( a.shape, a.dtype, filename)) for dumped_filename in dump(a, filename): os.chmod(dumped_filename, FILE_PERMISSIONS) if self._prewarm: # Warm up the data to avoid concurrent disk access in # multiple children processes load(filename, mmap_mode=self._mmap_mode).max() elif self.verbose > 1: print("Memmaping (shape=%s, dtype=%s) to old file %s" % ( a.shape, a.dtype, filename)) # Let's use the memmap reducer return reduce_memmap(load(filename, mmap_mode=self._mmap_mode)) else: # do not convert a into memmap, let pickler do its usual copy with # the default system pickler if self.verbose > 1: print("Pickling array (shape=%r, dtype=%s)." % ( a.shape, a.dtype)) return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),)) ############################################################################### # Enable custom pickling in Pool queues class CustomizablePickler(Pickler): """Pickler that accepts custom reducers. HIGHEST_PROTOCOL is selected by default as this pickler is used to pickle ephemeral datastructures for interprocess communication hence no backward compatibility is required. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ # We override the pure Python pickler as its the only way to be able to # customize the dispatch table without side effects in Python 2.6 # to 3.2. For Python 3.3+ leverage the new dispatch_table # feature from http://bugs.python.org/issue14166 that makes it possible # to use the C implementation of the Pickler which is faster. def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): Pickler.__init__(self, writer, protocol=protocol) if reducers is None: reducers = {} if hasattr(Pickler, 'dispatch'): # Make the dispatch registry an instance level attribute instead of # a reference to the class dictionary under Python 2 self.dispatch = Pickler.dispatch.copy() else: # Under Python 3 initialize the dispatch table with a copy of the # default registry self.dispatch_table = copyreg.dispatch_table.copy() for type, reduce_func in reducers.items(): self.register(type, reduce_func) def register(self, type, reduce_func): if hasattr(Pickler, 'dispatch'): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(self, obj): reduced = reduce_func(obj) self.save_reduce(obj=obj, *reduced) self.dispatch[type] = dispatcher else: self.dispatch_table[type] = reduce_func class CustomizablePicklingQueue(object): """Locked Pipe implementation that uses a customizable pickler. This class is an alternative to the multiprocessing implementation of SimpleQueue in order to make it possible to pass custom pickling reducers, for instance to avoid memory copy when passing memmory mapped datastructures. `reducers` is expected expected to be a dictionary with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, context, reducers=None): self._reducers = reducers self._reader, self._writer = context.Pipe(duplex=False) self._rlock = context.Lock() if sys.platform == 'win32': self._wlock = None else: self._wlock = context.Lock() self._make_methods() def __getstate__(self): assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock, self._reducers) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state self._make_methods() def empty(self): return not self._reader.poll() def _make_methods(self): self._recv = recv = self._reader.recv racquire, rrelease = self._rlock.acquire, self._rlock.release def get(): racquire() try: return recv() finally: rrelease() self.get = get if self._reducers: def send(obj): buffer = BytesIO() CustomizablePickler(buffer, self._reducers).dump(obj) self._writer.send_bytes(buffer.getvalue()) self._send = send else: self._send = send = self._writer.send if self._wlock is None: # writes to a message oriented win32 pipe are atomic self.put = send else: wlock_acquire, wlock_release = ( self._wlock.acquire, self._wlock.release) def put(obj): wlock_acquire() try: return send(obj) finally: wlock_release() self.put = put class PicklingPool(Pool): """Pool implementation with customizable pickling reducers. This is useful to control how data is shipped between processes and makes it possible to use shared memory without useless copies induces by the default pickling methods of the original objects passed as arguments to dispatch. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() self._forward_reducers = forward_reducers self._backward_reducers = backward_reducers poolargs = dict(processes=processes) poolargs.update(kwargs) super(PicklingPool, self).__init__(**poolargs) def _setup_queues(self): context = getattr(self, '_ctx', mp) self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers) self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers) self._quick_put = self._inqueue._send self._quick_get = self._outqueue._recv def delete_folder(folder_path): """Utility function to cleanup a temporary folder if still existing""" if os.path.exists(folder_path): shutil.rmtree(folder_path) class MemmapingPool(PicklingPool): """Process pool that shares large arrays to avoid memory copy. This drop-in replacement for `multiprocessing.pool.Pool` makes it possible to work efficiently with shared memory in a numpy context. Existing instances of numpy.memmap are preserved: the child suprocesses will have access to the same shared memory in the original mode except for the 'w+' mode that is automatically transformed as 'r+' to avoid zeroing the original data upon instantiation. Furthermore large arrays from the parent process are automatically dumped to a temporary folder on the filesystem such as child processes to access their content via memmaping (file system backed shared memory). Note: it is important to call the terminate method to collect the temporary folder used by the pool. Parameters ---------- processes: int, optional Number of worker processes running concurrently in the pool. initializer: callable, optional Callable executed on worker process creation. initargs: tuple, optional Arguments passed to the initializer callable. temp_folder: str, optional Folder to be used by the pool for memmaping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. max_nbytes int or None, optional, 1e6 by default Threshold on the size of arrays passed to the workers that triggers automated memmory mapping in temp_folder. Use None to disable memmaping of large arrays. forward_reducers: dictionary, optional Reducers used to pickle objects passed from master to worker processes: see below. backward_reducers: dictionary, optional Reducers used to pickle return values from workers back to the master process. verbose: int, optional Make it possible to monitor how the communication of numpy arrays with the subprocess is handled (pickling or memmaping) context_id: int, optional, None by default Set to a value identifying a call context to spare costly hashing of the content of the input arrays when it is safe to assume that each array will not be mutated by the parent process for the duration of the dispatch process. This is the case when using the high level Parallel API. prewarm: bool or str, optional, "auto" by default. If True, force a read on newly memmaped array to make sure that OS pre- cache it in memory. This can be useful to avoid concurrent disk access when the same data array is passed to different worker processes. If "auto" (by default), prewarm is set to True, unless the Linux shared memory partition /dev/shm is available and used as temp_folder. `forward_reducers` and `backward_reducers` are expected to be dictionaries with key/values being `(type, callable)` pairs where `callable` is a function that give an instance of `type` will return a tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the pickled `tuple_of_objects` as would return a `__reduce__` method. See the standard library documentation on pickling for more details. """ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6, mmap_mode='r', forward_reducers=None, backward_reducers=None, verbose=0, context_id=None, prewarm=False, **kwargs): if forward_reducers is None: forward_reducers = dict() if backward_reducers is None: backward_reducers = dict() # Prepare a sub-folder name for the serialization of this particular # pool instance (do not create in advance to spare FS write access if # no array is to be dumped): use_shared_mem = False pool_folder_name = "joblib_memmaping_pool_%d_%d" % ( os.getpid(), id(self)) if temp_folder is None: temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None) if temp_folder is None: if os.path.exists(SYSTEM_SHARED_MEM_FS): try: temp_folder = SYSTEM_SHARED_MEM_FS pool_folder = os.path.join(temp_folder, pool_folder_name) if not os.path.exists(pool_folder): os.makedirs(pool_folder) use_shared_mem = True except IOError: # Missing rights in the the /dev/shm partition, # fallback to regular temp folder. temp_folder = None if temp_folder is None: # Fallback to the default tmp folder, typically /tmp temp_folder = tempfile.gettempdir() temp_folder = os.path.abspath(os.path.expanduser(temp_folder)) pool_folder = os.path.join(temp_folder, pool_folder_name) self._temp_folder = pool_folder # Register the garbage collector at program exit in case caller forgets # to call terminate explicitly: note we do not pass any reference to # self to ensure that this callback won't prevent garbage collection of # the pool instance and related file handler resources such as POSIX # semaphores and pipes atexit.register(lambda: delete_folder(pool_folder)) if np is not None: # Register smart numpy.ndarray reducers that detects memmap backed # arrays and that is alse able to dump to memmap large in-memory # arrays over the max_nbytes threshold if prewarm == "auto": prewarm = not use_shared_mem forward_reduce_ndarray = ArrayMemmapReducer( max_nbytes, pool_folder, mmap_mode, verbose, context_id=context_id, prewarm=prewarm) forward_reducers[np.ndarray] = forward_reduce_ndarray forward_reducers[np.memmap] = reduce_memmap # Communication from child process to the parent process always # pickles in-memory numpy.ndarray without dumping them as memmap # to avoid confusing the caller and make it tricky to collect the # temporary folder backward_reduce_ndarray = ArrayMemmapReducer( None, pool_folder, mmap_mode, verbose) backward_reducers[np.ndarray] = backward_reduce_ndarray backward_reducers[np.memmap] = reduce_memmap poolargs = dict( processes=processes, forward_reducers=forward_reducers, backward_reducers=backward_reducers) poolargs.update(kwargs) super(MemmapingPool, self).__init__(**poolargs) def terminate(self): super(MemmapingPool, self).terminate() delete_folder(self._temp_folder)
bsd-3-clause
aaltinisik/OCBAltinkaya
addons/website_event/controllers/main.py
209
11805
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import babel.dates import time from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import werkzeug.urls from werkzeug.exceptions import NotFound from openerp import http from openerp import tools from openerp.http import request from openerp.tools.translate import _ from openerp.addons.website.models.website import slug class website_event(http.Controller): @http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True) def events(self, page=1, **searches): cr, uid, context = request.cr, request.uid, request.context event_obj = request.registry['event.event'] type_obj = request.registry['event.type'] country_obj = request.registry['res.country'] searches.setdefault('date', 'all') searches.setdefault('type', 'all') searches.setdefault('country', 'all') domain_search = {} def sdn(date): return date.strftime('%Y-%m-%d 23:59:59') def sd(date): return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) today = datetime.today() dates = [ ['all', _('Next Events'), [("date_end", ">", sd(today))], 0], ['today', _('Today'), [ ("date_end", ">", sd(today)), ("date_begin", "<", sdn(today))], 0], ['week', _('This Week'), [ ("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))), ("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))], 0], ['nextweek', _('Next Week'), [ ("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))), ("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))], 0], ['month', _('This month'), [ ("date_end", ">=", sd(today.replace(day=1))), ("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))], 0], ['nextmonth', _('Next month'), [ ("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))), ("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))], 0], ['old', _('Old Events'), [ ("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))], 0], ] # search domains current_date = None current_type = None current_country = None for date in dates: if searches["date"] == date[0]: domain_search["date"] = date[2] if date[0] != 'all': current_date = date[1] if searches["type"] != 'all': current_type = type_obj.browse(cr, uid, int(searches['type']), context=context) domain_search["type"] = [("type", "=", int(searches["type"]))] if searches["country"] != 'all' and searches["country"] != 'online': current_country = country_obj.browse(cr, uid, int(searches['country']), context=context) domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)] elif searches["country"] == 'online': domain_search["country"] = [("country_id", "=", False)] def dom_without(without): domain = [('state', "in", ['draft','confirm','done'])] for key, search in domain_search.items(): if key != without: domain += search return domain # count by domains without self search for date in dates: if date[0] <> 'old': date[3] = event_obj.search( request.cr, request.uid, dom_without('date') + date[2], count=True, context=request.context) domain = dom_without('type') types = event_obj.read_group( request.cr, request.uid, domain, ["id", "type"], groupby="type", orderby="type", context=request.context) type_count = event_obj.search(request.cr, request.uid, domain, count=True, context=request.context) types.insert(0, { 'type_count': type_count, 'type': ("all", _("All Categories")) }) domain = dom_without('country') countries = event_obj.read_group( request.cr, request.uid, domain, ["id", "country_id"], groupby="country_id", orderby="country_id", context=request.context) country_id_count = event_obj.search(request.cr, request.uid, domain, count=True, context=request.context) countries.insert(0, { 'country_id_count': country_id_count, 'country_id': ("all", _("All Countries")) }) step = 10 # Number of events per page event_count = event_obj.search( request.cr, request.uid, dom_without("none"), count=True, context=request.context) pager = request.website.pager( url="/event", url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')}, total=event_count, page=page, step=step, scope=5) order = 'website_published desc, date_begin' if searches.get('date','all') == 'old': order = 'website_published desc, date_begin desc' obj_ids = event_obj.search( request.cr, request.uid, dom_without("none"), limit=step, offset=pager['offset'], order=order, context=request.context) events_ids = event_obj.browse(request.cr, request.uid, obj_ids, context=request.context) values = { 'current_date': current_date, 'current_country': current_country, 'current_type': current_type, 'event_ids': events_ids, 'dates': dates, 'types': types, 'countries': countries, 'pager': pager, 'searches': searches, 'search_path': "?%s" % werkzeug.url_encode(searches), } return request.website.render("website_event.index", values) @http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True) def event_page(self, event, page, **post): values = { 'event': event, 'main_object': event } if '.' not in page: page = 'website_event.%s' % page try: request.website.get_template(page) except ValueError, e: # page not found raise NotFound return request.website.render(page, values) @http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True) def event(self, event, **post): if event.menu_id and event.menu_id.child_id: target_url = event.menu_id.child_id[0].url else: target_url = '/event/%s/register' % str(event.id) if post.get('enable_editor') == '1': target_url += '?enable_editor=1' return request.redirect(target_url); @http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True) def event_register(self, event, **post): values = { 'event': event, 'main_object': event, 'range': range, } return request.website.render("website_event.event_description_full", values) @http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True) def add_event(self, event_name="New Event", **kwargs): return self._add_event(event_name, request.context, **kwargs) def _add_event(self, event_name=None, context={}, **kwargs): if not event_name: event_name = _("New Event") Event = request.registry.get('event.event') date_begin = datetime.today() + timedelta(days=(14)) vals = { 'name': event_name, 'date_begin': date_begin.strftime('%Y-%m-%d'), 'date_end': (date_begin + timedelta(days=(1))).strftime('%Y-%m-%d'), } event_id = Event.create(request.cr, request.uid, vals, context=context) event = Event.browse(request.cr, request.uid, event_id, context=context) return request.redirect("/event/%s/register?enable_editor=1" % slug(event)) def get_formated_date(self, event): context = request.context start_date = datetime.strptime(event.date_begin, tools.DEFAULT_SERVER_DATETIME_FORMAT).date() end_date = datetime.strptime(event.date_end, tools.DEFAULT_SERVER_DATETIME_FORMAT).date() month = babel.dates.get_month_names('abbreviated', locale=context.get('lang', 'en_US'))[start_date.month] return _('%(month)s %(start_day)s%(end_day)s') % { 'month': month, 'start_day': start_date.strftime("%e"), 'end_day': (end_date != start_date and ("-"+end_date.strftime("%e")) or "") } @http.route('/event/get_country_event_list', type='http', auth='public', website=True) def get_country_events(self ,**post): cr, uid, context, event_ids = request.cr, request.uid, request.context,[] country_obj = request.registry['res.country'] event_obj = request.registry['event.event'] country_code = request.session['geoip'].get('country_code') result = {'events':[],'country':False} if country_code: country_ids = country_obj.search(cr, uid, [('code', '=', country_code)], context=context) event_ids = event_obj.search(cr, uid, ['|', ('address_id', '=', None),('country_id.code', '=', country_code),('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context) if not event_ids: event_ids = event_obj.search(cr, uid, [('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context) for event in event_obj.browse(cr, uid, event_ids, context=context)[:6]: if country_code and event.country_id.code == country_code: result['country'] = country_obj.browse(cr, uid, country_ids[0], context=context) result['events'].append({ "date": self.get_formated_date(event), "event": event, "url": event.website_url}) return request.website.render("website_event.country_events_list",result)
agpl-3.0
ryfeus/lambda-packs
Tensorflow_OpenCV_Nightly/source/markdown/extensions/__init__.py
2
1623
""" Extensions ----------------------------------------------------------------------------- """ class Extension: """ Base class for extensions to subclass. """ def __init__(self, configs = {}): """Create an instance of an Extention. Keyword arguments: * configs: A dict of configuration setting used by an Extension. """ self.config = configs def getConfig(self, key, default=''): """ Return a setting for the given key or an empty string. """ if key in self.config: return self.config[key][0] else: return default def getConfigs(self): """ Return all configs settings as a dict. """ return dict([(key, self.getConfig(key)) for key in list(self.config.keys())]) def getConfigInfo(self): """ Return all config descriptions as a list of tuples. """ return [(key, self.config[key][1]) for key in list(self.config.keys())] def setConfig(self, key, value): """ Set a config setting for `key` with the given `value`. """ self.config[key][0] = value def extendMarkdown(self, md, md_globals): """ Add the various proccesors and patterns to the Markdown Instance. This method must be overriden by every extension. Keyword arguments: * md: The Markdown instance. * md_globals: Global variables in the markdown module namespace. """ raise NotImplementedError('Extension "%s.%s" must define an "extendMarkdown"' \ 'method.' % (self.__class__.__module__, self.__class__.__name__))
mit
wdv4758h/ZipPy
edu.uci.python.benchmark/src/benchmarks/euler31-timed.py
1
1614
#runas solve() #unittest.skip recursive generator #pythran export solve() # 01/08/14 modified for benchmarking by Wei Zhang import sys, time COINS = [1, 2, 5, 10, 20, 50, 100, 200] # test def _sum(iterable): sum = None for i in iterable: if sum is None: sum = i else: sum += i return sum def balance(pattern): return _sum(COINS[x]*pattern[x] for x in range(0, len(pattern))) def gen(pattern, coinnum, num): coin = COINS[coinnum] for p in range(0, num//coin + 1): newpat = pattern[:coinnum] + (p,) bal = balance(newpat) if bal > num: return elif bal == num: yield newpat elif coinnum < len(COINS)-1: for pat in gen(newpat, coinnum+1, num): yield pat def solve(total): ''' In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation: 1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p). It is possible to make P2 in the following way: 1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p How many different ways can P2 be made using any number of coins? ''' return _sum(1 for pat in gen((), 0, total)) def measure(): input = int(sys.argv[1]) # 200 for i in range(3): solve(input) print("Start timing...") start = time.time() result = solve(input) print('total number of different ways: ', result) duration = "%.3f\n" % (time.time() - start) print("euler31: " + duration) # warm up for i in range(2000): # 300 solve(40) measure()
bsd-3-clause
angelapper/edx-platform
common/lib/xmodule/xmodule/split_test_module.py
8
29984
""" Module for running content split tests """ import logging import json from webob import Response from uuid import uuid4 from operator import itemgetter from xmodule.progress import Progress from xmodule.seq_module import SequenceDescriptor from xmodule.studio_editable import StudioEditableModule, StudioEditableDescriptor from xmodule.x_module import XModule, module_attr, STUDENT_VIEW from xmodule.validation import StudioValidation, StudioValidationMessage from xmodule.modulestore.inheritance import UserPartitionList from lxml import etree from xblock.core import XBlock from xblock.fields import Scope, Integer, String, ReferenceValueDict from xblock.fragment import Fragment log = logging.getLogger('edx.' + __name__) # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file _ = lambda text: text DEFAULT_GROUP_NAME = _(u'Group ID {group_id}') class SplitTestFields(object): """Fields needed for split test module""" has_children = True # All available user partitions (with value and display name). This is updated each time # editable_metadata_fields is called. user_partition_values = [] # Default value used for user_partition_id no_partition_selected = {'display_name': _("Not Selected"), 'value': -1} @staticmethod def build_partition_values(all_user_partitions, selected_user_partition): """ This helper method builds up the user_partition values that will be passed to the Studio editor """ SplitTestFields.user_partition_values = [] # Add "No selection" value if there is not a valid selected user partition. if not selected_user_partition: SplitTestFields.user_partition_values.append(SplitTestFields.no_partition_selected) for user_partition in get_split_user_partitions(all_user_partitions): SplitTestFields.user_partition_values.append( {"display_name": user_partition.name, "value": user_partition.id} ) return SplitTestFields.user_partition_values display_name = String( display_name=_("Display Name"), help=_("The display name for this component. (Not shown to learners)"), scope=Scope.settings, default=_("Content Experiment") ) # Specified here so we can see what the value set at the course-level is. user_partitions = UserPartitionList( help=_("The list of group configurations for partitioning students in content experiments."), default=[], scope=Scope.settings ) user_partition_id = Integer( help=_("The configuration defines how users are grouped for this content experiment. Caution: Changing the group configuration of a student-visible experiment will impact the experiment data."), scope=Scope.content, display_name=_("Group Configuration"), default=no_partition_selected["value"], values=lambda: SplitTestFields.user_partition_values # Will be populated before the Studio editor is shown. ) # group_id is an int # child is a serialized UsageId (aka Location). This child # location needs to actually match one of the children of this # Block. (expected invariant that we'll need to test, and handle # authoring tools that mess this up) group_id_to_child = ReferenceValueDict( help=_("Which child module students in a particular group_id should see"), scope=Scope.content ) def get_split_user_partitions(user_partitions): """ Helper method that filters a list of user_partitions and returns just the ones that are suitable for the split_test module. """ return [user_partition for user_partition in user_partitions if user_partition.scheme.name == "random"] @XBlock.needs('user_tags') # pylint: disable=abstract-method @XBlock.needs('partitions') @XBlock.needs('user') class SplitTestModule(SplitTestFields, XModule, StudioEditableModule): """ Show the user the appropriate child. Uses the ExperimentState API to figure out which child to show. Course staff still get put in an experimental condition, but have the option to see the other conditions. The only thing that counts toward their grade/progress is the condition they are actually in. Technical notes: - There is more dark magic in this code than I'd like. The whole varying-children + grading interaction is a tangle between super and subclasses of descriptors and modules. """ def __init__(self, *args, **kwargs): super(SplitTestModule, self).__init__(*args, **kwargs) self.child_descriptor = None child_descriptors = self.get_child_descriptors() if len(child_descriptors) >= 1: self.child_descriptor = child_descriptors[0] if self.child_descriptor is not None: self.child = self.system.get_module(self.child_descriptor) else: self.child = None def get_child_descriptor_by_location(self, location): """ Look through the children and look for one with the given location. Returns the descriptor. If none match, return None """ # NOTE: calling self.get_children() creates a circular reference-- # it calls get_child_descriptors() internally, but that doesn't work until # we've picked a choice. Use self.descriptor.get_children() instead. for child in self.descriptor.get_children(): if child.location == location: return child return None def get_content_titles(self): """ Returns list of content titles for split_test's child. This overwrites the get_content_titles method included in x_module by default. WHY THIS OVERWRITE IS NECESSARY: If we fetch *all* of split_test's children, we'll end up getting all of the possible conditions users could ever see. Ex: If split_test shows a video to group A and HTML to group B, the regular get_content_titles in x_module will get the title of BOTH the video AND the HTML. We only want the content titles that should actually be displayed to the user. split_test's .child property contains *only* the child that should actually be shown to the user, so we call get_content_titles() on only that child. """ return self.child.get_content_titles() def get_child_descriptors(self): """ For grading--return just the chosen child. """ group_id = self.get_group_id() if group_id is None: return [] # group_id_to_child comes from json, so it has to have string keys str_group_id = str(group_id) if str_group_id in self.group_id_to_child: child_location = self.group_id_to_child[str_group_id] child_descriptor = self.get_child_descriptor_by_location(child_location) else: # Oops. Config error. log.debug("configuration error in split test module: invalid group_id %r (not one of %r). Showing error", str_group_id, self.group_id_to_child.keys()) if child_descriptor is None: # Peak confusion is great. Now that we set child_descriptor, # get_children() should return a list with one element--the # xmodule for the child log.debug("configuration error in split test module: no such child") return [] return [child_descriptor] def get_group_id(self): """ Returns the group ID, or None if none is available. """ partitions_service = self.runtime.service(self, 'partitions') user_service = self.runtime.service(self, 'user') user = user_service._django_user # pylint: disable=protected-access return partitions_service.get_user_group_id_for_partition(user, self.user_partition_id) @property def is_configured(self): """ Returns true if the split_test instance is associated with a UserPartition. """ return self.descriptor.is_configured def _staff_view(self, context): """ Render the staff view for a split test module. """ fragment = Fragment() active_contents = [] inactive_contents = [] for child_location in self.children: # pylint: disable=no-member child_descriptor = self.get_child_descriptor_by_location(child_location) child = self.system.get_module(child_descriptor) rendered_child = child.render(STUDENT_VIEW, context) fragment.add_frag_resources(rendered_child) group_name, updated_group_id = self.get_data_for_vertical(child) if updated_group_id is None: # inactive group group_name = child.display_name updated_group_id = [g_id for g_id, loc in self.group_id_to_child.items() if loc == child_location][0] inactive_contents.append({ 'group_name': _(u'{group_name} (inactive)').format(group_name=group_name), 'id': child.location.to_deprecated_string(), 'content': rendered_child.content, 'group_id': updated_group_id, }) continue active_contents.append({ 'group_name': group_name, 'id': child.location.to_deprecated_string(), 'content': rendered_child.content, 'group_id': updated_group_id, }) # Sort active and inactive contents by group name. sorted_active_contents = sorted(active_contents, key=itemgetter('group_name')) sorted_inactive_contents = sorted(inactive_contents, key=itemgetter('group_name')) # Use the new template fragment.add_content(self.system.render_template('split_test_staff_view.html', { 'items': sorted_active_contents + sorted_inactive_contents, })) fragment.add_css('.split-test-child { display: none; }') fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_staff.js')) fragment.initialize_js('ABTestSelector') return fragment def author_view(self, context): """ Renders the Studio preview by rendering each child so that they can all be seen and edited. """ fragment = Fragment() root_xblock = context.get('root_xblock') is_root = root_xblock and root_xblock.location == self.location active_groups_preview = None inactive_groups_preview = None if is_root: [active_children, inactive_children] = self.descriptor.active_and_inactive_children() active_groups_preview = self.studio_render_children( fragment, active_children, context ) inactive_groups_preview = self.studio_render_children( fragment, inactive_children, context ) fragment.add_content(self.system.render_template('split_test_author_view.html', { 'split_test': self, 'is_root': is_root, 'is_configured': self.is_configured, 'active_groups_preview': active_groups_preview, 'inactive_groups_preview': inactive_groups_preview, 'group_configuration_url': self.descriptor.group_configuration_url, })) fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_author_view.js')) fragment.initialize_js('SplitTestAuthorView') return fragment def studio_render_children(self, fragment, children, context): """ Renders the specified children and returns it as an HTML string. In addition, any dependencies are added to the specified fragment. """ html = "" for active_child_descriptor in children: active_child = self.system.get_module(active_child_descriptor) rendered_child = active_child.render(StudioEditableModule.get_preview_view_name(active_child), context) if active_child.category == 'vertical': group_name, group_id = self.get_data_for_vertical(active_child) if group_name: rendered_child.content = rendered_child.content.replace( DEFAULT_GROUP_NAME.format(group_id=group_id), group_name ) fragment.add_frag_resources(rendered_child) html = html + rendered_child.content return html def student_view(self, context): """ Renders the contents of the chosen condition for students, and all the conditions for staff. """ if self.child is None: # raise error instead? In fact, could complain on descriptor load... return Fragment(content=u"<div>Nothing here. Move along.</div>") if self.system.user_is_staff: return self._staff_view(context) else: child_fragment = self.child.render(STUDENT_VIEW, context) fragment = Fragment(self.system.render_template('split_test_student_view.html', { 'child_content': child_fragment.content, 'child_id': self.child.scope_ids.usage_id, })) fragment.add_frag_resources(child_fragment) fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/split_test_student.js')) fragment.initialize_js('SplitTestStudentView') return fragment @XBlock.handler def log_child_render(self, request, suffix=''): # pylint: disable=unused-argument """ Record in the tracking logs which child was rendered """ # TODO: use publish instead, when publish is wired to the tracking logs self.system.track_function('xblock.split_test.child_render', {'child_id': self.child.scope_ids.usage_id.to_deprecated_string()}) return Response() def get_icon_class(self): return self.child.get_icon_class() if self.child else 'other' def get_progress(self): children = self.get_children() progresses = [child.get_progress() for child in children] progress = reduce(Progress.add_counts, progresses, None) return progress def get_data_for_vertical(self, vertical): """ Return name and id of a group corresponding to `vertical`. """ user_partition = self.descriptor.get_selected_partition() if user_partition: for group in user_partition.groups: group_id = unicode(group.id) child_location = self.group_id_to_child.get(group_id, None) if child_location == vertical.location: return (group.name, group.id) return (None, None) @property def tooltip_title(self): return getattr(self.child, 'tooltip_title', '') def validate(self): """ Message for either error or warning validation message/s. Returns message and type. Priority given to error type message. """ return self.descriptor.validate() @XBlock.needs('user_tags') # pylint: disable=abstract-method @XBlock.needs('partitions') @XBlock.needs('user') class SplitTestDescriptor(SplitTestFields, SequenceDescriptor, StudioEditableDescriptor): # the editing interface can be the same as for sequences -- just a container module_class = SplitTestModule resources_dir = 'assets/split_test' filename_extension = "xml" mako_template = "widgets/metadata-only-edit.html" show_in_read_only_mode = True child_descriptor = module_attr('child_descriptor') log_child_render = module_attr('log_child_render') get_content_titles = module_attr('get_content_titles') def definition_to_xml(self, resource_fs): xml_object = etree.Element('split_test') renderable_groups = {} # json.dumps doesn't know how to handle Location objects for group in self.group_id_to_child: renderable_groups[group] = self.group_id_to_child[group].to_deprecated_string() xml_object.set('group_id_to_child', json.dumps(renderable_groups)) xml_object.set('user_partition_id', str(self.user_partition_id)) for child in self.get_children(): self.runtime.add_block_as_child_node(child, xml_object) return xml_object @classmethod def definition_from_xml(cls, xml_object, system): children = [] raw_group_id_to_child = xml_object.attrib.get('group_id_to_child', None) user_partition_id = xml_object.attrib.get('user_partition_id', None) try: group_id_to_child = json.loads(raw_group_id_to_child) except ValueError: msg = "group_id_to_child is not valid json" log.exception(msg) system.error_tracker(msg) for child in xml_object: try: descriptor = system.process_xml(etree.tostring(child)) children.append(descriptor.scope_ids.usage_id) except Exception: msg = "Unable to load child when parsing split_test module." log.exception(msg) system.error_tracker(msg) return ({ 'group_id_to_child': group_id_to_child, 'user_partition_id': user_partition_id }, children) def get_context(self): _context = super(SplitTestDescriptor, self).get_context() _context.update({ 'selected_partition': self.get_selected_partition() }) return _context def has_dynamic_children(self): """ Grading needs to know that only one of the children is actually "real". This makes it use module.get_child_descriptors(). """ return True def editor_saved(self, user, old_metadata, old_content): """ Used to create default verticals for the groups. Assumes that a mutable modulestore is being used. """ # Any existing value of user_partition_id will be in "old_content" instead of "old_metadata" # because it is Scope.content. if 'user_partition_id' not in old_content or old_content['user_partition_id'] != self.user_partition_id: selected_partition = self.get_selected_partition() if selected_partition is not None: self.group_id_mapping = {} # pylint: disable=attribute-defined-outside-init for group in selected_partition.groups: self._create_vertical_for_group(group, user.id) # Don't need to call update_item in the modulestore because the caller of this method will do it. else: # If children referenced in group_id_to_child have been deleted, remove them from the map. for str_group_id, usage_key in self.group_id_to_child.items(): if usage_key not in self.children: # pylint: disable=no-member del self.group_id_to_child[str_group_id] @property def editable_metadata_fields(self): # Update the list of partitions based on the currently available user_partitions. SplitTestFields.build_partition_values(self.user_partitions, self.get_selected_partition()) editable_fields = super(SplitTestDescriptor, self).editable_metadata_fields # Explicitly add user_partition_id, which does not automatically get picked up because it is Scope.content. # Note that this means it will be saved by the Studio editor as "metadata", but the field will # still update correctly. editable_fields[SplitTestFields.user_partition_id.name] = self._create_metadata_editor_info( SplitTestFields.user_partition_id ) return editable_fields @property def non_editable_metadata_fields(self): non_editable_fields = super(SplitTestDescriptor, self).non_editable_metadata_fields non_editable_fields.extend([ SplitTestDescriptor.due, SplitTestDescriptor.user_partitions, SplitTestDescriptor.group_id_to_child, ]) return non_editable_fields def get_selected_partition(self): """ Returns the partition that this split module is currently using, or None if the currently selected partition ID does not match any of the defined partitions. """ for user_partition in self.user_partitions: if user_partition.id == self.user_partition_id: return user_partition return None def active_and_inactive_children(self): """ Returns two values: 1. The active children of this split test, in the order of the groups. 2. The remaining (inactive) children, in the order they were added to the split test. """ children = self.get_children() user_partition = self.get_selected_partition() if not user_partition: return [], children def get_child_descriptor(location): """ Returns the child descriptor which matches the specified location, or None if one is not found. """ for child in children: if child.location == location: return child return None # Compute the active children in the order specified by the user partition active_children = [] for group in user_partition.groups: group_id = unicode(group.id) child_location = self.group_id_to_child.get(group_id, None) child = get_child_descriptor(child_location) if child: active_children.append(child) # Compute the inactive children in the order they were added to the split test inactive_children = [child for child in children if child not in active_children] return active_children, inactive_children @property def is_configured(self): """ Returns true if the split_test instance is associated with a UserPartition. """ return not self.user_partition_id == SplitTestFields.no_partition_selected['value'] def validate(self): """ Validates the state of this split_test instance. This is the override of the general XBlock method, and it will also ask its superclass to validate. """ validation = super(SplitTestDescriptor, self).validate() split_test_validation = self.validate_split_test() if split_test_validation: return validation validation = StudioValidation.copy(validation) if validation and (not self.is_configured and len(split_test_validation.messages) == 1): validation.summary = split_test_validation.messages[0] else: validation.summary = self.general_validation_message(split_test_validation) validation.add_messages(split_test_validation) return validation def validate_split_test(self): """ Returns a StudioValidation object describing the current state of the split_test_module (not including superclass validation messages). """ _ = self.runtime.service(self, "i18n").ugettext split_validation = StudioValidation(self.location) if self.user_partition_id < 0: split_validation.add( StudioValidationMessage( StudioValidationMessage.NOT_CONFIGURED, _(u"The experiment is not associated with a group configuration."), action_class='edit-button', action_label=_(u"Select a Group Configuration") ) ) else: user_partition = self.get_selected_partition() if not user_partition: split_validation.add( StudioValidationMessage( StudioValidationMessage.ERROR, _(u"The experiment uses a deleted group configuration. Select a valid group configuration or delete this experiment.") ) ) else: # If the user_partition selected is not valid for the split_test module, error. # This can only happen via XML and import/export. if not get_split_user_partitions([user_partition]): split_validation.add( StudioValidationMessage( StudioValidationMessage.ERROR, _(u"The experiment uses a group configuration that is not supported for experiments. " u"Select a valid group configuration or delete this experiment.") ) ) else: [active_children, inactive_children] = self.active_and_inactive_children() if len(active_children) < len(user_partition.groups): split_validation.add( StudioValidationMessage( StudioValidationMessage.ERROR, _(u"The experiment does not contain all of the groups in the configuration."), action_runtime_event='add-missing-groups', action_label=_(u"Add Missing Groups") ) ) if len(inactive_children) > 0: split_validation.add( StudioValidationMessage( StudioValidationMessage.WARNING, _(u"The experiment has an inactive group. " u"Move content into active groups, then delete the inactive group.") ) ) return split_validation def general_validation_message(self, validation=None): """ Returns just a summary message about whether or not this split_test instance has validation issues (not including superclass validation messages). If the split_test instance validates correctly, this method returns None. """ if validation is None: validation = self.validate_split_test() if not validation: has_error = any(message.type == StudioValidationMessage.ERROR for message in validation.messages) return StudioValidationMessage( StudioValidationMessage.ERROR if has_error else StudioValidationMessage.WARNING, _(u"This content experiment has issues that affect content visibility.") ) return None @XBlock.handler def add_missing_groups(self, request, suffix=''): # pylint: disable=unused-argument """ Create verticals for any missing groups in the split test instance. Called from Studio view. """ user_partition = self.get_selected_partition() changed = False for group in user_partition.groups: str_group_id = unicode(group.id) if str_group_id not in self.group_id_to_child: user_id = self.runtime.service(self, 'user').get_current_user().opt_attrs['edx-platform.user_id'] self._create_vertical_for_group(group, user_id) changed = True if changed: # user.id - to be fixed by Publishing team self.system.modulestore.update_item(self, None) return Response() @property def group_configuration_url(self): assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'get_course'), \ "modulestore has to be available" course_module = self.system.modulestore.get_course(self.location.course_key) group_configuration_url = None if 'split_test' in course_module.advanced_modules: user_partition = self.get_selected_partition() if user_partition: group_configuration_url = "{url}#{configuration_id}".format( url='/group_configurations/' + unicode(self.location.course_key), configuration_id=str(user_partition.id) ) return group_configuration_url def _create_vertical_for_group(self, group, user_id): """ Creates a vertical to associate with the group. This appends the new vertical to the end of children, and updates group_id_to_child. A mutable modulestore is needed to call this method (will need to update after mixed modulestore work, currently relies on mongo's create_item method). """ assert hasattr(self.system, 'modulestore') and hasattr(self.system.modulestore, 'create_item'), \ "editor_saved should only be called when a mutable modulestore is available" modulestore = self.system.modulestore dest_usage_key = self.location.replace(category="vertical", name=uuid4().hex) metadata = {'display_name': DEFAULT_GROUP_NAME.format(group_id=group.id)} modulestore.create_item( user_id, self.location.course_key, dest_usage_key.block_type, block_id=dest_usage_key.block_id, definition_data=None, metadata=metadata, runtime=self.system, ) self.children.append(dest_usage_key) # pylint: disable=no-member self.group_id_to_child[unicode(group.id)] = dest_usage_key tooltip_title = module_attr('tooltip_title')
agpl-3.0
sbellem/bitcoin
qa/rpc-tests/test_framework/script.py
91
23849
# # script.py # # This file is modified from python-bitcoinlib. # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # """Scripts Functionality to build scripts, as well as SignatureHash(). """ from __future__ import absolute_import, division, print_function, unicode_literals from test_framework.mininode import CTransaction, CTxOut, hash256 import sys bchr = chr bord = ord if sys.version > '3': long = int bchr = lambda x: bytes([x]) bord = lambda x: x import copy import struct import test_framework.bignum MAX_SCRIPT_SIZE = 10000 MAX_SCRIPT_ELEMENT_SIZE = 520 MAX_SCRIPT_OPCODES = 201 OPCODE_NAMES = {} _opcode_instances = [] class CScriptOp(int): """A single script opcode""" __slots__ = [] @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" if len(d) < 0x4c: return b'' + bchr(len(d)) + d # OP_PUSHDATA elif len(d) <= 0xff: return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 elif len(d) <= 0xffff: return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2 elif len(d) <= 0xffffffff: return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4 else: raise ValueError("Data too long to encode in a PUSHDATA op") @staticmethod def encode_op_n(n): """Encode a small integer op, returning an opcode""" if not (0 <= n <= 16): raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n) if n == 0: return OP_0 else: return CScriptOp(OP_1 + n-1) def decode_op_n(self): """Decode a small integer opcode, returning an integer""" if self == OP_0: return 0 if not (self == OP_0 or OP_1 <= self <= OP_16): raise ValueError('op %r is not an OP_N' % self) return int(self - OP_1+1) def is_small_int(self): """Return true if the op pushes a small integer to the stack""" if 0x51 <= self <= 0x60 or self == 0: return True else: return False def __str__(self): return repr(self) def __repr__(self): if self in OPCODE_NAMES: return OPCODE_NAMES[self] else: return 'CScriptOp(0x%x)' % self def __new__(cls, n): try: return _opcode_instances[n] except IndexError: assert len(_opcode_instances) == n _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n)) return _opcode_instances[n] # Populate opcode instance table for n in range(0xff+1): CScriptOp(n) # push value OP_0 = CScriptOp(0x00) OP_FALSE = OP_0 OP_PUSHDATA1 = CScriptOp(0x4c) OP_PUSHDATA2 = CScriptOp(0x4d) OP_PUSHDATA4 = CScriptOp(0x4e) OP_1NEGATE = CScriptOp(0x4f) OP_RESERVED = CScriptOp(0x50) OP_1 = CScriptOp(0x51) OP_TRUE=OP_1 OP_2 = CScriptOp(0x52) OP_3 = CScriptOp(0x53) OP_4 = CScriptOp(0x54) OP_5 = CScriptOp(0x55) OP_6 = CScriptOp(0x56) OP_7 = CScriptOp(0x57) OP_8 = CScriptOp(0x58) OP_9 = CScriptOp(0x59) OP_10 = CScriptOp(0x5a) OP_11 = CScriptOp(0x5b) OP_12 = CScriptOp(0x5c) OP_13 = CScriptOp(0x5d) OP_14 = CScriptOp(0x5e) OP_15 = CScriptOp(0x5f) OP_16 = CScriptOp(0x60) # control OP_NOP = CScriptOp(0x61) OP_VER = CScriptOp(0x62) OP_IF = CScriptOp(0x63) OP_NOTIF = CScriptOp(0x64) OP_VERIF = CScriptOp(0x65) OP_VERNOTIF = CScriptOp(0x66) OP_ELSE = CScriptOp(0x67) OP_ENDIF = CScriptOp(0x68) OP_VERIFY = CScriptOp(0x69) OP_RETURN = CScriptOp(0x6a) # stack ops OP_TOALTSTACK = CScriptOp(0x6b) OP_FROMALTSTACK = CScriptOp(0x6c) OP_2DROP = CScriptOp(0x6d) OP_2DUP = CScriptOp(0x6e) OP_3DUP = CScriptOp(0x6f) OP_2OVER = CScriptOp(0x70) OP_2ROT = CScriptOp(0x71) OP_2SWAP = CScriptOp(0x72) OP_IFDUP = CScriptOp(0x73) OP_DEPTH = CScriptOp(0x74) OP_DROP = CScriptOp(0x75) OP_DUP = CScriptOp(0x76) OP_NIP = CScriptOp(0x77) OP_OVER = CScriptOp(0x78) OP_PICK = CScriptOp(0x79) OP_ROLL = CScriptOp(0x7a) OP_ROT = CScriptOp(0x7b) OP_SWAP = CScriptOp(0x7c) OP_TUCK = CScriptOp(0x7d) # splice ops OP_CAT = CScriptOp(0x7e) OP_SUBSTR = CScriptOp(0x7f) OP_LEFT = CScriptOp(0x80) OP_RIGHT = CScriptOp(0x81) OP_SIZE = CScriptOp(0x82) # bit logic OP_INVERT = CScriptOp(0x83) OP_AND = CScriptOp(0x84) OP_OR = CScriptOp(0x85) OP_XOR = CScriptOp(0x86) OP_EQUAL = CScriptOp(0x87) OP_EQUALVERIFY = CScriptOp(0x88) OP_RESERVED1 = CScriptOp(0x89) OP_RESERVED2 = CScriptOp(0x8a) # numeric OP_1ADD = CScriptOp(0x8b) OP_1SUB = CScriptOp(0x8c) OP_2MUL = CScriptOp(0x8d) OP_2DIV = CScriptOp(0x8e) OP_NEGATE = CScriptOp(0x8f) OP_ABS = CScriptOp(0x90) OP_NOT = CScriptOp(0x91) OP_0NOTEQUAL = CScriptOp(0x92) OP_ADD = CScriptOp(0x93) OP_SUB = CScriptOp(0x94) OP_MUL = CScriptOp(0x95) OP_DIV = CScriptOp(0x96) OP_MOD = CScriptOp(0x97) OP_LSHIFT = CScriptOp(0x98) OP_RSHIFT = CScriptOp(0x99) OP_BOOLAND = CScriptOp(0x9a) OP_BOOLOR = CScriptOp(0x9b) OP_NUMEQUAL = CScriptOp(0x9c) OP_NUMEQUALVERIFY = CScriptOp(0x9d) OP_NUMNOTEQUAL = CScriptOp(0x9e) OP_LESSTHAN = CScriptOp(0x9f) OP_GREATERTHAN = CScriptOp(0xa0) OP_LESSTHANOREQUAL = CScriptOp(0xa1) OP_GREATERTHANOREQUAL = CScriptOp(0xa2) OP_MIN = CScriptOp(0xa3) OP_MAX = CScriptOp(0xa4) OP_WITHIN = CScriptOp(0xa5) # crypto OP_RIPEMD160 = CScriptOp(0xa6) OP_SHA1 = CScriptOp(0xa7) OP_SHA256 = CScriptOp(0xa8) OP_HASH160 = CScriptOp(0xa9) OP_HASH256 = CScriptOp(0xaa) OP_CODESEPARATOR = CScriptOp(0xab) OP_CHECKSIG = CScriptOp(0xac) OP_CHECKSIGVERIFY = CScriptOp(0xad) OP_CHECKMULTISIG = CScriptOp(0xae) OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf) # expansion OP_NOP1 = CScriptOp(0xb0) OP_NOP2 = CScriptOp(0xb1) OP_NOP3 = CScriptOp(0xb2) OP_NOP4 = CScriptOp(0xb3) OP_NOP5 = CScriptOp(0xb4) OP_NOP6 = CScriptOp(0xb5) OP_NOP7 = CScriptOp(0xb6) OP_NOP8 = CScriptOp(0xb7) OP_NOP9 = CScriptOp(0xb8) OP_NOP10 = CScriptOp(0xb9) # template matching params OP_SMALLINTEGER = CScriptOp(0xfa) OP_PUBKEYS = CScriptOp(0xfb) OP_PUBKEYHASH = CScriptOp(0xfd) OP_PUBKEY = CScriptOp(0xfe) OP_INVALIDOPCODE = CScriptOp(0xff) VALID_OPCODES = { OP_1NEGATE, OP_RESERVED, OP_1, OP_2, OP_3, OP_4, OP_5, OP_6, OP_7, OP_8, OP_9, OP_10, OP_11, OP_12, OP_13, OP_14, OP_15, OP_16, OP_NOP, OP_VER, OP_IF, OP_NOTIF, OP_VERIF, OP_VERNOTIF, OP_ELSE, OP_ENDIF, OP_VERIFY, OP_RETURN, OP_TOALTSTACK, OP_FROMALTSTACK, OP_2DROP, OP_2DUP, OP_3DUP, OP_2OVER, OP_2ROT, OP_2SWAP, OP_IFDUP, OP_DEPTH, OP_DROP, OP_DUP, OP_NIP, OP_OVER, OP_PICK, OP_ROLL, OP_ROT, OP_SWAP, OP_TUCK, OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_SIZE, OP_INVERT, OP_AND, OP_OR, OP_XOR, OP_EQUAL, OP_EQUALVERIFY, OP_RESERVED1, OP_RESERVED2, OP_1ADD, OP_1SUB, OP_2MUL, OP_2DIV, OP_NEGATE, OP_ABS, OP_NOT, OP_0NOTEQUAL, OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT, OP_BOOLAND, OP_BOOLOR, OP_NUMEQUAL, OP_NUMEQUALVERIFY, OP_NUMNOTEQUAL, OP_LESSTHAN, OP_GREATERTHAN, OP_LESSTHANOREQUAL, OP_GREATERTHANOREQUAL, OP_MIN, OP_MAX, OP_WITHIN, OP_RIPEMD160, OP_SHA1, OP_SHA256, OP_HASH160, OP_HASH256, OP_CODESEPARATOR, OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_NOP1, OP_NOP2, OP_NOP3, OP_NOP4, OP_NOP5, OP_NOP6, OP_NOP7, OP_NOP8, OP_NOP9, OP_NOP10, OP_SMALLINTEGER, OP_PUBKEYS, OP_PUBKEYHASH, OP_PUBKEY, } OPCODE_NAMES.update({ OP_0 : 'OP_0', OP_PUSHDATA1 : 'OP_PUSHDATA1', OP_PUSHDATA2 : 'OP_PUSHDATA2', OP_PUSHDATA4 : 'OP_PUSHDATA4', OP_1NEGATE : 'OP_1NEGATE', OP_RESERVED : 'OP_RESERVED', OP_1 : 'OP_1', OP_2 : 'OP_2', OP_3 : 'OP_3', OP_4 : 'OP_4', OP_5 : 'OP_5', OP_6 : 'OP_6', OP_7 : 'OP_7', OP_8 : 'OP_8', OP_9 : 'OP_9', OP_10 : 'OP_10', OP_11 : 'OP_11', OP_12 : 'OP_12', OP_13 : 'OP_13', OP_14 : 'OP_14', OP_15 : 'OP_15', OP_16 : 'OP_16', OP_NOP : 'OP_NOP', OP_VER : 'OP_VER', OP_IF : 'OP_IF', OP_NOTIF : 'OP_NOTIF', OP_VERIF : 'OP_VERIF', OP_VERNOTIF : 'OP_VERNOTIF', OP_ELSE : 'OP_ELSE', OP_ENDIF : 'OP_ENDIF', OP_VERIFY : 'OP_VERIFY', OP_RETURN : 'OP_RETURN', OP_TOALTSTACK : 'OP_TOALTSTACK', OP_FROMALTSTACK : 'OP_FROMALTSTACK', OP_2DROP : 'OP_2DROP', OP_2DUP : 'OP_2DUP', OP_3DUP : 'OP_3DUP', OP_2OVER : 'OP_2OVER', OP_2ROT : 'OP_2ROT', OP_2SWAP : 'OP_2SWAP', OP_IFDUP : 'OP_IFDUP', OP_DEPTH : 'OP_DEPTH', OP_DROP : 'OP_DROP', OP_DUP : 'OP_DUP', OP_NIP : 'OP_NIP', OP_OVER : 'OP_OVER', OP_PICK : 'OP_PICK', OP_ROLL : 'OP_ROLL', OP_ROT : 'OP_ROT', OP_SWAP : 'OP_SWAP', OP_TUCK : 'OP_TUCK', OP_CAT : 'OP_CAT', OP_SUBSTR : 'OP_SUBSTR', OP_LEFT : 'OP_LEFT', OP_RIGHT : 'OP_RIGHT', OP_SIZE : 'OP_SIZE', OP_INVERT : 'OP_INVERT', OP_AND : 'OP_AND', OP_OR : 'OP_OR', OP_XOR : 'OP_XOR', OP_EQUAL : 'OP_EQUAL', OP_EQUALVERIFY : 'OP_EQUALVERIFY', OP_RESERVED1 : 'OP_RESERVED1', OP_RESERVED2 : 'OP_RESERVED2', OP_1ADD : 'OP_1ADD', OP_1SUB : 'OP_1SUB', OP_2MUL : 'OP_2MUL', OP_2DIV : 'OP_2DIV', OP_NEGATE : 'OP_NEGATE', OP_ABS : 'OP_ABS', OP_NOT : 'OP_NOT', OP_0NOTEQUAL : 'OP_0NOTEQUAL', OP_ADD : 'OP_ADD', OP_SUB : 'OP_SUB', OP_MUL : 'OP_MUL', OP_DIV : 'OP_DIV', OP_MOD : 'OP_MOD', OP_LSHIFT : 'OP_LSHIFT', OP_RSHIFT : 'OP_RSHIFT', OP_BOOLAND : 'OP_BOOLAND', OP_BOOLOR : 'OP_BOOLOR', OP_NUMEQUAL : 'OP_NUMEQUAL', OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY', OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL', OP_LESSTHAN : 'OP_LESSTHAN', OP_GREATERTHAN : 'OP_GREATERTHAN', OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL', OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL', OP_MIN : 'OP_MIN', OP_MAX : 'OP_MAX', OP_WITHIN : 'OP_WITHIN', OP_RIPEMD160 : 'OP_RIPEMD160', OP_SHA1 : 'OP_SHA1', OP_SHA256 : 'OP_SHA256', OP_HASH160 : 'OP_HASH160', OP_HASH256 : 'OP_HASH256', OP_CODESEPARATOR : 'OP_CODESEPARATOR', OP_CHECKSIG : 'OP_CHECKSIG', OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY', OP_CHECKMULTISIG : 'OP_CHECKMULTISIG', OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY', OP_NOP1 : 'OP_NOP1', OP_NOP2 : 'OP_NOP2', OP_NOP3 : 'OP_NOP3', OP_NOP4 : 'OP_NOP4', OP_NOP5 : 'OP_NOP5', OP_NOP6 : 'OP_NOP6', OP_NOP7 : 'OP_NOP7', OP_NOP8 : 'OP_NOP8', OP_NOP9 : 'OP_NOP9', OP_NOP10 : 'OP_NOP10', OP_SMALLINTEGER : 'OP_SMALLINTEGER', OP_PUBKEYS : 'OP_PUBKEYS', OP_PUBKEYHASH : 'OP_PUBKEYHASH', OP_PUBKEY : 'OP_PUBKEY', OP_INVALIDOPCODE : 'OP_INVALIDOPCODE', }) OPCODES_BY_NAME = { 'OP_0' : OP_0, 'OP_PUSHDATA1' : OP_PUSHDATA1, 'OP_PUSHDATA2' : OP_PUSHDATA2, 'OP_PUSHDATA4' : OP_PUSHDATA4, 'OP_1NEGATE' : OP_1NEGATE, 'OP_RESERVED' : OP_RESERVED, 'OP_1' : OP_1, 'OP_2' : OP_2, 'OP_3' : OP_3, 'OP_4' : OP_4, 'OP_5' : OP_5, 'OP_6' : OP_6, 'OP_7' : OP_7, 'OP_8' : OP_8, 'OP_9' : OP_9, 'OP_10' : OP_10, 'OP_11' : OP_11, 'OP_12' : OP_12, 'OP_13' : OP_13, 'OP_14' : OP_14, 'OP_15' : OP_15, 'OP_16' : OP_16, 'OP_NOP' : OP_NOP, 'OP_VER' : OP_VER, 'OP_IF' : OP_IF, 'OP_NOTIF' : OP_NOTIF, 'OP_VERIF' : OP_VERIF, 'OP_VERNOTIF' : OP_VERNOTIF, 'OP_ELSE' : OP_ELSE, 'OP_ENDIF' : OP_ENDIF, 'OP_VERIFY' : OP_VERIFY, 'OP_RETURN' : OP_RETURN, 'OP_TOALTSTACK' : OP_TOALTSTACK, 'OP_FROMALTSTACK' : OP_FROMALTSTACK, 'OP_2DROP' : OP_2DROP, 'OP_2DUP' : OP_2DUP, 'OP_3DUP' : OP_3DUP, 'OP_2OVER' : OP_2OVER, 'OP_2ROT' : OP_2ROT, 'OP_2SWAP' : OP_2SWAP, 'OP_IFDUP' : OP_IFDUP, 'OP_DEPTH' : OP_DEPTH, 'OP_DROP' : OP_DROP, 'OP_DUP' : OP_DUP, 'OP_NIP' : OP_NIP, 'OP_OVER' : OP_OVER, 'OP_PICK' : OP_PICK, 'OP_ROLL' : OP_ROLL, 'OP_ROT' : OP_ROT, 'OP_SWAP' : OP_SWAP, 'OP_TUCK' : OP_TUCK, 'OP_CAT' : OP_CAT, 'OP_SUBSTR' : OP_SUBSTR, 'OP_LEFT' : OP_LEFT, 'OP_RIGHT' : OP_RIGHT, 'OP_SIZE' : OP_SIZE, 'OP_INVERT' : OP_INVERT, 'OP_AND' : OP_AND, 'OP_OR' : OP_OR, 'OP_XOR' : OP_XOR, 'OP_EQUAL' : OP_EQUAL, 'OP_EQUALVERIFY' : OP_EQUALVERIFY, 'OP_RESERVED1' : OP_RESERVED1, 'OP_RESERVED2' : OP_RESERVED2, 'OP_1ADD' : OP_1ADD, 'OP_1SUB' : OP_1SUB, 'OP_2MUL' : OP_2MUL, 'OP_2DIV' : OP_2DIV, 'OP_NEGATE' : OP_NEGATE, 'OP_ABS' : OP_ABS, 'OP_NOT' : OP_NOT, 'OP_0NOTEQUAL' : OP_0NOTEQUAL, 'OP_ADD' : OP_ADD, 'OP_SUB' : OP_SUB, 'OP_MUL' : OP_MUL, 'OP_DIV' : OP_DIV, 'OP_MOD' : OP_MOD, 'OP_LSHIFT' : OP_LSHIFT, 'OP_RSHIFT' : OP_RSHIFT, 'OP_BOOLAND' : OP_BOOLAND, 'OP_BOOLOR' : OP_BOOLOR, 'OP_NUMEQUAL' : OP_NUMEQUAL, 'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY, 'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL, 'OP_LESSTHAN' : OP_LESSTHAN, 'OP_GREATERTHAN' : OP_GREATERTHAN, 'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL, 'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL, 'OP_MIN' : OP_MIN, 'OP_MAX' : OP_MAX, 'OP_WITHIN' : OP_WITHIN, 'OP_RIPEMD160' : OP_RIPEMD160, 'OP_SHA1' : OP_SHA1, 'OP_SHA256' : OP_SHA256, 'OP_HASH160' : OP_HASH160, 'OP_HASH256' : OP_HASH256, 'OP_CODESEPARATOR' : OP_CODESEPARATOR, 'OP_CHECKSIG' : OP_CHECKSIG, 'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY, 'OP_CHECKMULTISIG' : OP_CHECKMULTISIG, 'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY, 'OP_NOP1' : OP_NOP1, 'OP_NOP2' : OP_NOP2, 'OP_NOP3' : OP_NOP3, 'OP_NOP4' : OP_NOP4, 'OP_NOP5' : OP_NOP5, 'OP_NOP6' : OP_NOP6, 'OP_NOP7' : OP_NOP7, 'OP_NOP8' : OP_NOP8, 'OP_NOP9' : OP_NOP9, 'OP_NOP10' : OP_NOP10, 'OP_SMALLINTEGER' : OP_SMALLINTEGER, 'OP_PUBKEYS' : OP_PUBKEYS, 'OP_PUBKEYHASH' : OP_PUBKEYHASH, 'OP_PUBKEY' : OP_PUBKEY, } class CScriptInvalidError(Exception): """Base class for CScript exceptions""" pass class CScriptTruncatedPushDataError(CScriptInvalidError): """Invalid pushdata due to truncation""" def __init__(self, msg, data): self.data = data super(CScriptTruncatedPushDataError, self).__init__(msg) # This is used, eg, for blockchain heights in coinbase scripts (bip34) class CScriptNum(object): def __init__(self, d=0): self.value = d @staticmethod def encode(obj): r = bytearray(0) if obj.value == 0: return bytes(r) neg = obj.value < 0 absvalue = -obj.value if neg else obj.value while (absvalue): r.append(chr(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes(bchr(len(r)) + r) class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bchr(other) elif isinstance(other, CScriptNum): if (other.value == 0): other = bchr(CScriptOp(OP_0)) else: other = CScriptNum.encode(other) elif isinstance(other, (int, long)): if 0 <= other <= 16: other = bytes(bchr(CScriptOp.encode_op_n(other))) elif other == -1: other = bytes(bchr(OP_1NEGATE)) else: other = CScriptOp.encode_op_pushdata(bignum.bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # Do the coercion outside of the try block so that errors in it are # noticed. other = self.__coerce_instance(other) try: # bytes.__add__ always returns bytes instances unfortunately return CScript(super(CScript, self).__add__(other)) except TypeError: raise TypeError('Can not add a %r instance to a CScript' % other.__class__) def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) else: def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = bord(self[i]) i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: pushdata_type = 'PUSHDATA(%d)' % opcode datasize = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError('PUSHDATA1: missing data length') datasize = bord(self[i]) i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError('PUSHDATA2: missing data length') datasize = bord(self[i]) + (bord(self[i+1]) << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError('PUSHDATA4: missing data length') datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) i += 4 else: assert False # shouldn't happen data = bytes(self[i:i+datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ for (opcode, data, sop_idx) in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): # For Python3 compatibility add b before strings so testcases don't # need to change def _repr(o): if isinstance(o, bytes): return "x('%s')" % binascii.hexlify(o).decode('utf8') else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: op = '%s...<ERROR: %s>' % (_repr(err.data), err) break except CScriptInvalidError as err: op = '<ERROR: %s>' % err break except StopIteration: break finally: if op is not None: ops.append(op) return "CScript([%s])" % ', '.join(ops) def GetSigOpCount(self, fAccurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ n = 0 lastOpcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): n += opcode.decode_op_n() else: n += 20 lastOpcode = opcode return n SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True for (opcode, data, sop_idx) in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx if script[sop_idx:sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' if inIdx >= len(txTo.vin): return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) txtmp = CTransaction(txTo) for txin in txtmp.vin: txin.scriptSig = b'' txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) if (hashtype & 0x1f) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 elif (hashtype & 0x1f) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) tmp = txtmp.vout[outIdx] txtmp.vout = [] for i in range(outIdx): txtmp.vout.append(CTxOut()) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b"<I", hashtype) hash = hash256(s) return (hash, None)
mit
mschenck/aurora
src/main/python/apache/aurora/executor/common/resource_manager.py
13
4112
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import threading from mesos.interface import mesos_pb2 from twitter.common.metrics import LambdaGauge from apache.aurora.executor.common.status_checker import ( StatusChecker, StatusCheckerProvider, StatusResult ) from apache.aurora.executor.common.task_info import mesos_task_instance_from_assigned_task from apache.thermos.monitoring.monitor import TaskMonitor from apache.thermos.monitoring.resource import TaskResourceMonitor class ResourceManager(StatusChecker): """ Manage resources consumed by a Task """ def __init__(self, resources, resource_monitor): """ resources: Resources object specifying cpu, ram, disk limits for the task resource_monitor: The ResourceMonitor to monitor resources """ self._resource_monitor = resource_monitor # TODO(wickman) Remove cpu/ram reporting if MESOS-1458 is resolved. self._max_cpu = resources.cpu().get() self._max_ram = resources.ram().get() self._max_disk = resources.disk().get() self._kill_reason = None self._kill_event = threading.Event() @property def _num_procs(self): """ Total number of processes the task consists of (including child processes) """ return self._resource_monitor.sample()[1].num_procs @property def _ps_sample(self): """ ProcessSample representing the aggregate resource consumption of the Task's processes """ return self._resource_monitor.sample()[1].process_sample @property def _disk_sample(self): """ Integer in bytes representing the disk consumption in the Task's sandbox """ return self._resource_monitor.sample()[1].disk_usage @property def status(self): sample = self._disk_sample if sample > self._max_disk: self._kill_event.set() return StatusResult('Disk limit exceeded. Reserved %s bytes vs used %s bytes.' % ( self._max_disk, sample), mesos_pb2.TASK_FAILED) def name(self): return 'resource_manager' def register_metrics(self): self.metrics.register(LambdaGauge('disk_used', lambda: self._disk_sample)) self.metrics.register(LambdaGauge('disk_reserved', lambda: self._max_disk)) self.metrics.register(LambdaGauge('disk_percent', lambda: 1.0 * self._disk_sample / self._max_disk)) self.metrics.register(LambdaGauge('cpu_used', lambda: self._ps_sample.rate)) self.metrics.register(LambdaGauge('cpu_reserved', lambda: self._max_cpu)) self.metrics.register(LambdaGauge('cpu_percent', lambda: 1.0 * self._ps_sample.rate / self._max_cpu)) self.metrics.register(LambdaGauge('ram_used', lambda: self._ps_sample.rss)) self.metrics.register(LambdaGauge('ram_reserved', lambda: self._max_ram)) self.metrics.register(LambdaGauge('ram_percent', lambda: 1.0 * self._ps_sample.rss / self._max_ram)) def start(self): super(ResourceManager, self).start() self.register_metrics() self._resource_monitor.start() class ResourceManagerProvider(StatusCheckerProvider): def __init__(self, checkpoint_root, **resource_monitor_options): self._checkpoint_root = checkpoint_root self._resource_monitor_options = resource_monitor_options def from_assigned_task(self, assigned_task, sandbox): task_id = assigned_task.taskId resources = mesos_task_instance_from_assigned_task(assigned_task).task().resources() task_monitor = TaskMonitor(self._checkpoint_root, task_id) resource_monitor = TaskResourceMonitor( task_id, task_monitor, **self._resource_monitor_options) return ResourceManager(resources, resource_monitor)
apache-2.0
rysson/filmkodi
plugin.video.mrknow/mylib/third_party/pep8/lib2to3/lib2to3/fixer_base.py
305
6846
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Base class for fixers (optional, but recommended).""" # Python imports import logging import itertools # Local imports from .patcomp import PatternCompiler from . import pygram from .fixer_util import does_tree_import class BaseFix(object): """Optional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. """ PATTERN = None # Most subclasses should override with a string literal pattern = None # Compiled pattern, set by compile_pattern() pattern_tree = None # Tree representation of the pattern options = None # Options object passed to initializer filename = None # The filename (set by set_filename) logger = None # A logger (set by set_filename) numbers = itertools.count(1) # For new_name() used_names = set() # A set of all used NAMEs order = "post" # Does the fixer prefer pre- or post-order traversal explicit = False # Is this ignored by refactor.py -f all? run_order = 5 # Fixers will be sorted by run order before execution # Lower numbers will be run first. _accept_type = None # [Advanced and not public] This tells RefactoringTool # which node type to accept when there's not a pattern. keep_line_order = False # For the bottom matcher: match with the # original line order BM_compatible = False # Compatibility with the bottom matching # module; every fixer should set this # manually # Shortcut for access to Python grammar symbols syms = pygram.python_symbols def __init__(self, options, log): """Initializer. Subclass may override. Args: options: an dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. """ self.options = options self.log = log self.compile_pattern() def compile_pattern(self): """Compiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). """ if self.PATTERN is not None: PC = PatternCompiler() self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, with_tree=True) def set_filename(self, filename): """Set the filename, and a logger derived from it. The main refactoring tool should call this. """ self.filename = filename self.logger = logging.getLogger(filename) def match(self, node): """Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. """ results = {"node": node} return self.pattern.match(node, results) and results def transform(self, node, results): """Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. """ raise NotImplementedError() def new_name(self, template=u"xxx_todo_changeme"): """Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. """ name = template while name in self.used_names: name = template + unicode(self.numbers.next()) self.used_names.add(name) return name def log_message(self, message): if self.first_log: self.first_log = False self.log.append("### In file %s ###" % self.filename) self.log.append(message) def cannot_convert(self, node, reason=None): """Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() for_output = node.clone() for_output.prefix = u"" msg = "Line %d: could not convert: %s" self.log_message(msg % (lineno, for_output)) if reason: self.log_message(reason) def warning(self, node, reason): """Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() self.log_message("Line %d: %s" % (lineno, reason)) def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True def finish_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ pass class ConditionalFix(BaseFix): """ Base class for fixers which not execute if an import is found. """ # This is the name of the import which, if found, will cause the test to be skipped skip_on = None def start_tree(self, *args): super(ConditionalFix, self).start_tree(*args) self._should_skip = None def should_skip(self, node): if self._should_skip is not None: return self._should_skip pkg = self.skip_on.split(".") name = pkg[-1] pkg = ".".join(pkg[:-1]) self._should_skip = does_tree_import(pkg, name, node) return self._should_skip
apache-2.0
xiaoyaozi5566/GEM5_DRAMSim2
ext/ply/test/yacc_unused.py
174
1669
# ----------------------------------------------------------------------------- # yacc_unused.py # # A grammar with an unused rule # ----------------------------------------------------------------------------- import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.yacc as yacc from calclex import tokens # Parsing rules precedence = ( ('left','PLUS','MINUS'), ('left','TIMES','DIVIDE'), ('right','UMINUS'), ) # dictionary of names names = { } def p_statement_assign(t): 'statement : NAME EQUALS expression' names[t[1]] = t[3] def p_statement_expr(t): 'statement : expression' print(t[1]) def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3] def p_expression_uminus(t): 'expression : MINUS expression %prec UMINUS' t[0] = -t[2] def p_expression_group(t): 'expression : LPAREN expression RPAREN' t[0] = t[2] def p_expression_number(t): 'expression : NUMBER' t[0] = t[1] def p_expression_name(t): 'expression : NAME' try: t[0] = names[t[1]] except LookupError: print("Undefined name '%s'" % t[1]) t[0] = 0 def p_expr_list(t): 'exprlist : exprlist COMMA expression' pass def p_expr_list_2(t): 'exprlist : expression' pass def p_error(t): print("Syntax error at '%s'" % t.value) yacc.yacc()
bsd-3-clause
longde123/MultiversePlatform
lib/IPCE/Lib/ctypes.py
1
5974
# Copyright (c) 2006 Seo Sanghyeon # 2006-06-08 sanxiyn Created # 2006-06-11 sanxiyn Implemented .value on primitive types # 2006-11-02 sanxiyn Support for multiple signatures __all__ = [ 'c_int', 'c_float', 'c_double', 'c_char_p', 'c_void_p', 'LibraryLoader', 'CDLL', 'cdll', 'byref', 'sizeof' ] # -------------------------------------------------------------------- # Dynamic module definition from System import AppDomain from System.Reflection import AssemblyName from System.Reflection.Emit import AssemblyBuilderAccess def pinvoke_module(): domain = AppDomain.CurrentDomain name = AssemblyName('pinvoke') flag = AssemblyBuilderAccess.Run assembly = domain.DefineDynamicAssembly(name, flag) module = assembly.DefineDynamicModule('pinvoke') return module # -------------------------------------------------------------------- # General interface class pinvoke_value: type = None value = None def get_type(obj): if isinstance(obj, pinvoke_value): return obj.type else: return type(obj) def get_value(obj): if isinstance(obj, pinvoke_value): return obj.value else: return obj # -------------------------------------------------------------------- # Primitive types from System import Single, Double, IntPtr class pinvoke_primitive(pinvoke_value): def __init__(self, value=None): if value is None: value = self.type() if not isinstance(value, self.type): expected = self.type.__name__ given = value.__class__.__name__ msg = "%s expected instead of %s" % (expected, given) raise TypeError(msg) self.value = value def __repr__(self): clsname = self.__class__.__name__ return "%s(%r)" % (clsname, self.value) class c_int(pinvoke_primitive): type = int class c_float(pinvoke_primitive): type = Single class c_double(pinvoke_primitive): type = Double class c_char_p(pinvoke_primitive): type = str class c_void_p(pinvoke_primitive): type = IntPtr # -------------------------------------------------------------------- # Reference from System import Type class pinvoke_reference(pinvoke_value): def __init__(self, obj): self.obj = obj self.type = Type.MakeByRefType(obj.type) self.value = obj.value def __repr__(self): return "byref(%r)" % (self.obj,) def byref(obj): if not isinstance(obj, pinvoke_value): raise TypeError("byref() argument must be a ctypes instance") ref = pinvoke_reference(obj) return ref # -------------------------------------------------------------------- # Utility from System.Runtime.InteropServices import Marshal def sizeof(obj): return Marshal.SizeOf(obj.type) # -------------------------------------------------------------------- # Dynamic P/Invoke from System import Array from System.Reflection import CallingConventions, MethodAttributes from System.Runtime.InteropServices import CallingConvention, CharSet from IronPython.Runtime.Calls import BuiltinFunction, FunctionType class pinvoke_method: pinvoke_attributes = ( MethodAttributes.Public | MethodAttributes.Static | MethodAttributes.PinvokeImpl ) calling_convention = None return_type = None def __init__(self, dll, entry): self.dll = dll self.entry = entry self.restype = None self.argtypes = None self.func = None self.signatures = set() def create(self, restype, argtypes): dll = self.dll entry = self.entry attributes = self.pinvoke_attributes cc = self.calling_convention clr_argtypes = Array[Type](argtypes) module = pinvoke_module() module.DefinePInvokeMethod( entry, dll, attributes, CallingConventions.Standard, restype, clr_argtypes, cc, CharSet.Ansi) module.CreateGlobalFunctions() method = module.GetMethod(entry) self.func = BuiltinFunction.MakeOrAdd( self.func, entry, method, FunctionType.Function) self.signatures.add((restype, argtypes)) def __call__(self, *args): if self.restype: restype = self.restype.type else: restype = self.return_type.type if self.argtypes: argtypes = [argtype.type for argtype in self.argtypes] else: argtypes = [get_type(arg) for arg in args] argtypes = tuple(argtypes) if (restype, argtypes) not in self.signatures: self.create(restype, argtypes) args = [get_value(arg) for arg in args] result = self.func(*args) return result # -------------------------------------------------------------------- # Function loader def is_special_name(name): return name.startswith('__') and name.endswith('__') class pinvoke_dll: method_class = None def __init__(self, name): self.name = name def __repr__(self): clsname = self.__class__.__name__ return "<%s '%s'>" % (clsname, self.name) def __getattr__(self, name): if is_special_name(name): raise AttributeError(name) method = self.method_class(self.name, name) setattr(self, name, method) return method class CDLL(pinvoke_dll): class method_class(pinvoke_method): calling_convention = CallingConvention.Cdecl return_type = c_int # -------------------------------------------------------------------- # Library loader class LibraryLoader(object): def __init__(self, dlltype): self.dlltype = dlltype def __getattr__(self, name): if is_special_name(name): raise AttributeError(name) dll = self.dlltype(name) setattr(self, name, dll) return dll def LoadLibrary(self, name): return self.dlltype(name) cdll = LibraryLoader(CDLL)
mit
localprojects/Change-By-Us
lib/jinja2/testsuite/core_tags.py
90
10914
# -*- coding: utf-8 -*- """ jinja2.testsuite.core_tags ~~~~~~~~~~~~~~~~~~~~~~~~~~ Test the core tags like for and if. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import unittest from jinja2.testsuite import JinjaTestCase from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \ DictLoader env = Environment() class ForLoopTestCase(JinjaTestCase): def test_simple(self): tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}') assert tmpl.render(seq=range(10)) == '0123456789' def test_else(self): tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}') assert tmpl.render() == '...' def test_empty_blocks(self): tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>') assert tmpl.render() == '<>' def test_context_vars(self): tmpl = env.from_string('''{% for item in seq -%} {{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{ loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{ loop.length }}###{% endfor %}''') one, two, _ = tmpl.render(seq=[0, 1]).split('###') (one_index, one_index0, one_revindex, one_revindex0, one_first, one_last, one_length) = one.split('|') (two_index, two_index0, two_revindex, two_revindex0, two_first, two_last, two_length) = two.split('|') assert int(one_index) == 1 and int(two_index) == 2 assert int(one_index0) == 0 and int(two_index0) == 1 assert int(one_revindex) == 2 and int(two_revindex) == 1 assert int(one_revindex0) == 1 and int(two_revindex0) == 0 assert one_first == 'True' and two_first == 'False' assert one_last == 'False' and two_last == 'True' assert one_length == two_length == '2' def test_cycling(self): tmpl = env.from_string('''{% for item in seq %}{{ loop.cycle('<1>', '<2>') }}{% endfor %}{% for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''') output = tmpl.render(seq=range(4), through=('<1>', '<2>')) assert output == '<1><2>' * 4 def test_scope(self): tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}') output = tmpl.render(seq=range(10)) assert not output def test_varlen(self): def inner(): for item in range(5): yield item tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}') output = tmpl.render(iter=inner()) assert output == '01234' def test_noniter(self): tmpl = env.from_string('{% for item in none %}...{% endfor %}') self.assert_raises(TypeError, tmpl.render) def test_recursive(self): tmpl = env.from_string('''{% for item in seq recursive -%} [{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}] {%- endfor %}''') assert tmpl.render(seq=[ dict(a=1, b=[dict(a=1), dict(a=2)]), dict(a=2, b=[dict(a=1), dict(a=2)]), dict(a=3, b=[dict(a='a')]) ]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]' def test_looploop(self): tmpl = env.from_string('''{% for row in table %} {%- set rowloop = loop -%} {% for cell in row -%} [{{ rowloop.index }}|{{ loop.index }}] {%- endfor %} {%- endfor %}''') assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]' def test_reversed_bug(self): tmpl = env.from_string('{% for i in items %}{{ i }}' '{% if not loop.last %}' ',{% endif %}{% endfor %}') assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3' def test_loop_errors(self): tmpl = env.from_string('''{% for item in [1] if loop.index == 0 %}...{% endfor %}''') self.assert_raises(UndefinedError, tmpl.render) tmpl = env.from_string('''{% for item in [] %}...{% else %}{{ loop }}{% endfor %}''') assert tmpl.render() == '' def test_loop_filter(self): tmpl = env.from_string('{% for item in range(10) if item ' 'is even %}[{{ item }}]{% endfor %}') assert tmpl.render() == '[0][2][4][6][8]' tmpl = env.from_string(''' {%- for item in range(10) if item is even %}[{{ loop.index }}:{{ item }}]{% endfor %}''') assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]' def test_loop_unassignable(self): self.assert_raises(TemplateSyntaxError, env.from_string, '{% for loop in seq %}...{% endfor %}') def test_scoped_special_var(self): t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}' '|{{ loop.first }}{% endfor %}]{% endfor %}') assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]' def test_scoped_loop_var(self): t = env.from_string('{% for x in seq %}{{ loop.first }}' '{% for y in seq %}{% endfor %}{% endfor %}') assert t.render(seq='ab') == 'TrueFalse' t = env.from_string('{% for x in seq %}{% for y in seq %}' '{{ loop.first }}{% endfor %}{% endfor %}') assert t.render(seq='ab') == 'TrueFalseTrueFalse' def test_recursive_empty_loop_iter(self): t = env.from_string(''' {%- for item in foo recursive -%}{%- endfor -%} ''') assert t.render(dict(foo=[])) == '' def test_call_in_loop(self): t = env.from_string(''' {%- macro do_something() -%} [{{ caller() }}] {%- endmacro %} {%- for i in [1, 2, 3] %} {%- call do_something() -%} {{ i }} {%- endcall %} {%- endfor -%} ''') assert t.render() == '[1][2][3]' def test_scoping_bug(self): t = env.from_string(''' {%- for item in foo %}...{{ item }}...{% endfor %} {%- macro item(a) %}...{{ a }}...{% endmacro %} {{- item(2) -}} ''') assert t.render(foo=(1,)) == '...1......2...' def test_unpacking(self): tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}' '{{ a }}|{{ b }}|{{ c }}{% endfor %}') assert tmpl.render() == '1|2|3' class IfConditionTestCase(JinjaTestCase): def test_simple(self): tmpl = env.from_string('''{% if true %}...{% endif %}''') assert tmpl.render() == '...' def test_elif(self): tmpl = env.from_string('''{% if false %}XXX{% elif true %}...{% else %}XXX{% endif %}''') assert tmpl.render() == '...' def test_else(self): tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}') assert tmpl.render() == '...' def test_empty(self): tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]') assert tmpl.render() == '[]' def test_complete(self): tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}' 'C{% else %}D{% endif %}') assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C' def test_no_scope(self): tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}') assert tmpl.render(a=True) == '1' tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}') assert tmpl.render() == '1' class MacrosTestCase(JinjaTestCase): env = Environment(trim_blocks=True) def test_simple(self): tmpl = self.env.from_string('''\ {% macro say_hello(name) %}Hello {{ name }}!{% endmacro %} {{ say_hello('Peter') }}''') assert tmpl.render() == 'Hello Peter!' def test_scoping(self): tmpl = self.env.from_string('''\ {% macro level1(data1) %} {% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %} {{ level2('bar') }}{% endmacro %} {{ level1('foo') }}''') assert tmpl.render() == 'foo|bar' def test_arguments(self): tmpl = self.env.from_string('''\ {% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %} {{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''') assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d' def test_varargs(self): tmpl = self.env.from_string('''\ {% macro test() %}{{ varargs|join('|') }}{% endmacro %}\ {{ test(1, 2, 3) }}''') assert tmpl.render() == '1|2|3' def test_simple_call(self): tmpl = self.env.from_string('''\ {% macro test() %}[[{{ caller() }}]]{% endmacro %}\ {% call test() %}data{% endcall %}''') assert tmpl.render() == '[[data]]' def test_complex_call(self): tmpl = self.env.from_string('''\ {% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\ {% call(data) test() %}{{ data }}{% endcall %}''') assert tmpl.render() == '[[data]]' def test_caller_undefined(self): tmpl = self.env.from_string('''\ {% set caller = 42 %}\ {% macro test() %}{{ caller is not defined }}{% endmacro %}\ {{ test() }}''') assert tmpl.render() == 'True' def test_include(self): self.env = Environment(loader=DictLoader({'include': '{% macro test(foo) %}[{{ foo }}]{% endmacro %}'})) tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}') assert tmpl.render() == '[foo]' def test_macro_api(self): tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}' '{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}' '{% macro baz() %}{{ caller() }}{% endmacro %}') assert tmpl.module.foo.arguments == ('a', 'b') assert tmpl.module.foo.defaults == () assert tmpl.module.foo.name == 'foo' assert not tmpl.module.foo.caller assert not tmpl.module.foo.catch_kwargs assert not tmpl.module.foo.catch_varargs assert tmpl.module.bar.arguments == () assert tmpl.module.bar.defaults == () assert not tmpl.module.bar.caller assert tmpl.module.bar.catch_kwargs assert tmpl.module.bar.catch_varargs assert tmpl.module.baz.caller def test_callself(self): tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|' '{{ foo(x - 1) }}{% endif %}{% endmacro %}' '{{ foo(5) }}') assert tmpl.render() == '5|4|3|2|1' def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(ForLoopTestCase)) suite.addTest(unittest.makeSuite(IfConditionTestCase)) suite.addTest(unittest.makeSuite(MacrosTestCase)) return suite
agpl-3.0