repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
BorisJeremic/Real-ESSI-Examples
valgrind_test/compare_function/compare_txt.py
202
2092
#!/usr/bin/python import h5py import sys import numpy as np import os import re import random # find the path to my own python function: cur_dir=os.getcwd() sep='test_cases' test_DIR=cur_dir.split(sep,1)[0] scriptDIR=test_DIR+'compare_function' sys.path.append(scriptDIR) # import my own function for color and comparator from mycomparator import * from mycolor_fun import * # analytic_solution = sys.argv[1] # numeric_result = sys.argv[2] analytic_solution = 'analytic_solution.txt' numeric_result = 'numeric_result.txt' analytic_sol = np.loadtxt(analytic_solution) numeric_res = np.loadtxt(numeric_result) abs_error = abs(analytic_sol - numeric_res) rel_error = abs_error/analytic_sol analytic_sol = float(analytic_sol) numeric_res = float(numeric_res) rel_error = float(rel_error) # print the results case_flag=1 print headrun() , "-----------Testing results-----------------" print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error ') print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error ) if(case_flag==1): print headOKCASE(),"-----------Done this case!-----------------" # legacy backup # find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \; # find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \; # find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \; # find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \; # find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \; # sed -i "s/node\.fei/add_node.include/" main.fei # sed -i "s/add_node\.fei/add_node.include/" main.fei # sed -i "s/element\.fei/add_element.include/" main.fei # sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei # sed -i "s/constraint\.fei/add_constraint.include/" main.fei # find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
cc0-1.0
cloudmesh/chef
fabfile/chef.py
1
2220
import platform from fabric.api import local, task from fabric.context_managers import lcd def _control_server(command): local('sudo chef-server-ctl {0}'.format(command)) def _get_distro(): return platform.linux_distribution()[0] def _install_centos(): download_url = 'https://opscode-omnibus-packages.s3.amazonaws.com/el/6/x86_64/chef-server-11.0.8-1.el6.x86_64.rpm' package_name = 'chef-server.rpm' with lcd('/tmp'): local('curl -o {0} {1}'.format(package_name, download_url)) local('sudo yum -y localinstall {0}'.format(package_name)) local('sudo chef-server-ctl reconfigure') def _install_ubuntu(): download_url = 'https://opscode-omnibus-packages.s3.amazonaws.com/ubuntu/12.04/x86_64/chef-server_11.0.8-1.ubuntu.12.04_amd64.deb' package_name = 'chef-server.deb' with lcd('/tmp'): local('curl -o {0} {1}'.format(package_name, download_url)) local('sudo dpkg -i {0}'.format(package_name)) local('sudo chef-server-ctl reconfigure') def _uninstall_centos(): pass def _uninstall_ubuntu(): pass @task def clean(): """Remove data from the Chef Server""" pass @task def info(): """Outputs Chef Server information and Chef artifacts""" pass @task def install(): """Installs Chef Server on either CentOS or Ubuntu""" distro = _get_distro() if distro == "Ubuntu": _install_ubuntu() elif distro == "CentOS": _install_centos() else: local('echo "Unsupported operating system distribution."') @task def kill(): """Removes data from the Chef Server and stops the service""" clean() stop() @task def start(): """Start the Chef Server service""" _control_server('start') @task def stop(): """Stop the Chef Server service""" _control_server('stop') @task def uninstall(): """Uninstall the Chef Server""" distro = _get_distro() _control_server('uninstall') if distro == "Ubuntu": _uninstall_ubuntu() elif distro == "CentOS": _uninstall_centos() else: local('echo "Unsupported operating system distribution."') def client(): local("curl -L https://www.opscode.com/chef/install.sh | sudo bash")
apache-2.0
MadManRises/Madgine
shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/examples/xarm.py
3
1420
import pybullet as p import pybullet_data as pd import time p.connect(p.GUI)#, options="--background_color_red=1.0 --background_color_blue=1.0 --background_color_green=1.0") p.setAdditionalSearchPath(pd.getDataPath()) useFixedBase = True flags = p.URDF_INITIALIZE_SAT_FEATURES#0#p.URDF_USE_SELF_COLLISION #plane_pos = [0,0,0] #plane = p.loadURDF("plane.urdf", plane_pos, flags = flags, useFixedBase=useFixedBase) table_pos = [0,0,-0.625] table = p.loadURDF("table/table.urdf", table_pos, flags = flags, useFixedBase=useFixedBase) xarm = p.loadURDF("xarm/xarm6_robot.urdf", flags = flags, useFixedBase=useFixedBase) jointIds = [] paramIds = [] for j in range(p.getNumJoints(xarm)): p.changeDynamics(xarm, j, linearDamping=0, angularDamping=0) info = p.getJointInfo(xarm, j) #print(info) jointName = info[1] jointType = info[2] if (jointType == p.JOINT_PRISMATIC or jointType == p.JOINT_REVOLUTE): jointIds.append(j) paramIds.append(p.addUserDebugParameter(jointName.decode("utf-8"), -4, 4, 0)) skip_cam_frames = 10 while (1): p.stepSimulation() for i in range(len(paramIds)): c = paramIds[i] targetPos = p.readUserDebugParameter(c) p.setJointMotorControl2(xarm, jointIds[i], p.POSITION_CONTROL, targetPos, force=5 * 240.) skip_cam_frames -= 1 if (skip_cam_frames<0): p.getCameraImage(320,200, renderer=p.ER_BULLET_HARDWARE_OPENGL ) skip_cam_frames = 10 time.sleep(1./240.)
mit
xiangel/hue
desktop/core/ext-py/boto-2.38.0/boto/mws/connection.py
135
49808
# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import xml.sax import hashlib import string import collections from boto.connection import AWSQueryConnection from boto.exception import BotoServerError import boto.mws.exception import boto.mws.response from boto.handler import XmlHandler from boto.compat import filter, map, six, encodebytes __all__ = ['MWSConnection'] api_version_path = { 'Feeds': ('2009-01-01', 'Merchant', '/'), 'Reports': ('2009-01-01', 'Merchant', '/'), 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), 'Inbound': ('2010-10-01', 'SellerId', '/FulfillmentInboundShipment/2010-10-01'), 'Outbound': ('2010-10-01', 'SellerId', '/FulfillmentOutboundShipment/2010-10-01'), 'Inventory': ('2010-10-01', 'SellerId', '/FulfillmentInventory/2010-10-01'), 'Recommendations': ('2013-04-01', 'SellerId', '/Recommendations/2013-04-01'), 'CustomerInfo': ('2014-03-01', 'SellerId', '/CustomerInformation/2014-03-01'), 'CartInfo': ('2014-03-01', 'SellerId', '/CartInformation/2014-03-01'), 'Subscriptions': ('2013-07-01', 'SellerId', '/Subscriptions/2013-07-01'), 'OffAmazonPayments': ('2013-01-01', 'SellerId', '/OffAmazonPayments/2013-01-01'), } content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip() decorated_attrs = ('action', 'response', 'section', 'quota', 'restore', 'version') api_call_map = {} def add_attrs_from(func, to): for attr in decorated_attrs: setattr(to, attr, getattr(func, attr, None)) to.__wrapped__ = func return to def structured_lists(*fields): def decorator(func): def wrapper(self, *args, **kw): for key, acc in [f.split('.') for f in fields]: if key in kw: newkey = key + '.' + acc + (acc and '.' or '') for i in range(len(kw[key])): kw[newkey + str(i + 1)] = kw[key][i] kw.pop(key) return func(self, *args, **kw) wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def http_body(field): def decorator(func): def wrapper(*args, **kw): if any([f not in kw for f in (field, 'content_type')]): message = "{0} requires {1} and content_type arguments for " \ "building HTTP body".format(func.action, field) raise KeyError(message) kw['body'] = kw.pop(field) kw['headers'] = { 'Content-Type': kw.pop('content_type'), 'Content-MD5': content_md5(kw['body']), } return func(*args, **kw) wrapper.__doc__ = "{0}\nRequired HTTP Body: " \ "{1}".format(func.__doc__, field) return add_attrs_from(func, to=wrapper) return decorator def destructure_object(value, into, prefix, members=False): if isinstance(value, boto.mws.response.ResponseElement): destructure_object(value.__dict__, into, prefix, members=members) elif isinstance(value, collections.Mapping): for name in value: if name.startswith('_'): continue destructure_object(value[name], into, prefix + '.' + name, members=members) elif isinstance(value, six.string_types): into[prefix] = value elif isinstance(value, collections.Iterable): for index, element in enumerate(value): suffix = (members and '.member.' or '.') + str(index + 1) destructure_object(element, into, prefix + suffix, members=members) elif isinstance(value, bool): into[prefix] = str(value).lower() else: into[prefix] = value def structured_objects(*fields, **kwargs): def decorator(func): def wrapper(*args, **kw): members = kwargs.get('members', False) for field in filter(lambda i: i in kw, fields): destructure_object(kw.pop(field), kw, field, members=members) return func(*args, **kw) wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ "(ResponseElement or anything iterable/dict-like)" \ .format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def requires(*groups): def decorator(func): def requires(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if 1 != len(list(filter(hasgroup, groups))): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} requires {1} argument(s)" \ "".format(func.action, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, message) return add_attrs_from(func, to=requires) return decorator def exclusive(*groups): def decorator(func): def wrapper(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if len(list(filter(hasgroup, groups))) not in (0, 1): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} requires either {1}" \ "".format(func.action, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, message) return add_attrs_from(func, to=wrapper) return decorator def dependent(field, *groups): def decorator(func): def wrapper(*args, **kw): hasgroup = lambda group: all(key in kw for key in group) if field in kw and not any(hasgroup(g) for g in groups): message = ' OR '.join(['+'.join(g) for g in groups]) message = "{0} argument {1} requires {2}" \ "".format(func.action, field, message) raise KeyError(message) return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, field, message) return add_attrs_from(func, to=wrapper) return decorator def requires_some_of(*fields): def decorator(func): def requires(*args, **kw): if not any(i in kw for i in fields): message = "{0} requires at least one of {1} argument(s)" \ "".format(func.action, ', '.join(fields)) raise KeyError(message) return func(*args, **kw) requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=requires) return decorator def boolean_arguments(*fields): def decorator(func): def wrapper(*args, **kw): for field in [f for f in fields if isinstance(kw.get(f), bool)]: kw[field] = str(kw[field]).lower() return func(*args, **kw) wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator def api_action(section, quota, restore, *api): def decorator(func, quota=int(quota), restore=float(restore)): version, accesskey, path = api_version_path[section] action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) def wrapper(self, *args, **kw): kw.setdefault(accesskey, getattr(self, accesskey, None)) if kw[accesskey] is None: message = "{0} requires {1} argument. Set the " \ "MWSConnection.{2} attribute?" \ "".format(action, accesskey, accesskey) raise KeyError(message) kw['Action'] = action kw['Version'] = version response = self._response_factory(action, connection=self) request = dict(path=path, quota=quota, restore=restore) return func(self, request, response, *args, **kw) for attr in decorated_attrs: setattr(wrapper, attr, locals().get(attr)) wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ "{4}".format(action, version, quota, restore, func.__doc__) api_call_map[action] = func.__name__ return wrapper return decorator class MWSConnection(AWSQueryConnection): ResponseFactory = boto.mws.response.ResponseFactory ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory def __init__(self, *args, **kw): kw.setdefault('host', 'mws.amazonservices.com') self._sandboxed = kw.pop('sandbox', False) self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') self.SellerId = kw.pop('SellerId', None) or self.Merchant kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) super(MWSConnection, self).__init__(*args, **kw) def _setup_factories(self, extrascopes, **kw): for factory, (scope, Default) in { 'response_factory': (boto.mws.response, self.ResponseFactory), 'response_error_factory': (boto.mws.exception, self.ResponseErrorFactory), }.items(): if factory in kw: setattr(self, '_' + factory, kw.pop(factory)) else: scopes = extrascopes + [scope] setattr(self, '_' + factory, Default(scopes=scopes)) return kw def _sandboxify(self, path): if not self._sandboxed: return path splat = path.split('/') splat[-2] += '_Sandbox' return '/'.join(splat) def _required_auth_capability(self): return ['mws'] def _post_request(self, request, params, parser, body='', headers=None): """Make a POST request, optionally with a content body, and return the response, optionally as raw text. """ headers = headers or {} path = self._sandboxify(request['path']) request = self.build_base_http_request('POST', path, None, data=body, params=params, headers=headers, host=self.host) try: response = self._mexe(request, override_num_retries=None) except BotoServerError as bs: raise self._response_error_factory(bs.status, bs.reason, bs.body) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) raise self._response_error_factory(response.status, response.reason, body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self._response_error_factory(response.status, response.reason, body) digest = response.getheader('Content-MD5') if digest is not None: assert content_md5(body) == digest contenttype = response.getheader('Content-Type') return self._parse_response(parser, contenttype, body) def _parse_response(self, parser, contenttype, body): if not contenttype.startswith('text/xml'): return body handler = XmlHandler(parser, self) xml.sax.parseString(body, handler) return parser def method_for(self, name): """Return the MWS API method referred to in the argument. The named method can be in CamelCase or underlined_lower_case. This is the complement to MWSConnection.any_call.action """ action = '_' in name and string.capwords(name, '_') or name if action in api_call_map: return getattr(self, api_call_map[action]) return None def iter_call(self, call, *args, **kw): """Pass a call name as the first argument and a generator is returned for the initial response and any continuation call responses made using the NextToken. """ method = self.method_for(call) assert method, 'No call named "{0}"'.format(call) return self.iter_response(method(*args, **kw)) def iter_response(self, response): """Pass a call's response as the initial argument and a generator is returned for the initial response and any continuation call responses made using the NextToken. """ yield response more = self.method_for(response._action + 'ByNextToken') while more and response._result.HasNext == 'true': response = more(NextToken=response._result.NextToken) yield response @requires(['FeedType']) @boolean_arguments('PurgeAndReplace') @http_body('FeedContent') @structured_lists('MarketplaceIdList.Id') @api_action('Feeds', 15, 120) def submit_feed(self, request, response, headers=None, body='', **kw): """Uploads a feed for processing by Amazon MWS. """ headers = headers or {} return self._post_request(request, kw, response, body=body, headers=headers) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) def get_feed_submission_list(self, request, response, **kw): """Returns a list of all feed submissions submitted in the previous 90 days. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Feeds', 0, 0) def get_feed_submission_list_by_next_token(self, request, response, **kw): """Returns a list of feed submissions using the NextToken parameter. """ return self._post_request(request, kw, response) @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) def get_feed_submission_count(self, request, response, **kw): """Returns a count of the feeds submitted in the previous 90 days. """ return self._post_request(request, kw, response) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') @api_action('Feeds', 10, 45) def cancel_feed_submissions(self, request, response, **kw): """Cancels one or more feed submissions and returns a count of the feed submissions that were canceled. """ return self._post_request(request, kw, response) @requires(['FeedSubmissionId']) @api_action('Feeds', 15, 60) def get_feed_submission_result(self, request, response, **kw): """Returns the feed processing report. """ return self._post_request(request, kw, response) def get_service_status(self, **kw): """Instruct the user on how to get service status. """ sections = ', '.join(map(str.lower, api_version_path.keys())) message = "Use {0}.get_(section)_service_status(), " \ "where (section) is one of the following: " \ "{1}".format(self.__class__.__name__, sections) raise AttributeError(message) @requires(['ReportType']) @structured_lists('MarketplaceIdList.Id') @boolean_arguments('ReportOptions=ShowSalesChannel') @api_action('Reports', 15, 60) def request_report(self, request, response, **kw): """Creates a report request and submits the request to Amazon MWS. """ return self._post_request(request, kw, response) @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) def get_report_request_list(self, request, response, **kw): """Returns a list of report requests that you can use to get the ReportRequestId for a report. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_request_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportRequestListByNextToken or GetReportRequestList, where the value of HasNext was true in that previous request. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) def get_report_request_count(self, request, response, **kw): """Returns a count of report requests that have been submitted to Amazon MWS for processing. """ return self._post_request(request, kw, response) @api_action('Reports', 10, 45) def cancel_report_requests(self, request, response, **kw): """Cancel one or more report requests, returning the count of the canceled report requests and the report request information. """ return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') @api_action('Reports', 10, 60) def get_report_list(self, request, response, **kw): """Returns a list of reports that were created in the previous 90 days that match the query parameters. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_list_by_next_token(self, request, response, **kw): """Returns a list of reports using the NextToken, which was supplied by a previous request to either GetReportListByNextToken or GetReportList, where the value of HasNext was true in the previous call. """ return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_count(self, request, response, **kw): """Returns a count of the reports, created in the previous 90 days, with a status of _DONE_ and that are available for download. """ return self._post_request(request, kw, response) @requires(['ReportId']) @api_action('Reports', 15, 60) def get_report(self, request, response, **kw): """Returns the contents of a report. """ return self._post_request(request, kw, response) @requires(['ReportType', 'Schedule']) @api_action('Reports', 10, 45) def manage_report_schedule(self, request, response, **kw): """Creates, updates, or deletes a report request schedule for a specified report type. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_schedule_list(self, request, response, **kw): """Returns a list of order report requests that are scheduled to be submitted to Amazon MWS for processing. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) def get_report_schedule_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportScheduleListByNextToken or GetReportScheduleList, where the value of HasNext was true in that previous request. """ return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) def get_report_schedule_count(self, request, response, **kw): """Returns a count of order report requests that are scheduled to be submitted to Amazon MWS. """ return self._post_request(request, kw, response) @requires(['ReportIdList']) @boolean_arguments('Acknowledged') @structured_lists('ReportIdList.Id') @api_action('Reports', 10, 45) def update_report_acknowledgements(self, request, response, **kw): """Updates the acknowledged status of one or more reports. """ return self._post_request(request, kw, response) @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') @api_action('Inbound', 30, 0.5) def create_inbound_shipment_plan(self, request, response, **kw): """Returns the information required to create an inbound shipment. """ return self._post_request(request, kw, response) @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) def create_inbound_shipment(self, request, response, **kw): """Creates an inbound shipment. """ return self._post_request(request, kw, response) @requires(['ShipmentId']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) def update_inbound_shipment(self, request, response, **kw): """Updates an existing inbound shipment. Amazon documentation is ambiguous as to whether the InboundShipmentHeader and InboundShipmentItems arguments are required. """ return self._post_request(request, kw, response) @requires_some_of('ShipmentIdList', 'ShipmentStatusList') @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') @api_action('Inbound', 30, 0.5) def list_inbound_shipments(self, request, response, **kw): """Returns a list of inbound shipments based on criteria that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) def list_inbound_shipments_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipments using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) @api_action('Inbound', 30, 0.5) def list_inbound_shipment_items(self, request, response, **kw): """Returns a list of items in a specified inbound shipment, or a list of items that were updated within a specified time frame. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) def list_inbound_shipment_items_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Inbound', 2, 300, 'GetServiceStatus') def get_inbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inbound Shipment API section. """ return self._post_request(request, kw, response) @requires(['SellerSkus'], ['QueryStartDateTime']) @structured_lists('SellerSkus.member') @api_action('Inventory', 30, 0.5) def list_inventory_supply(self, request, response, **kw): """Returns information about the availability of a seller's inventory. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inventory', 30, 0.5) def list_inventory_supply_by_next_token(self, request, response, **kw): """Returns the next page of information about the availability of a seller's inventory using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Inventory', 2, 300, 'GetServiceStatus') def get_inventory_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inventory API section. """ return self._post_request(request, kw, response) @requires(['PackageNumber']) @api_action('Outbound', 30, 0.5) def get_package_tracking_details(self, request, response, **kw): """Returns delivery tracking information for a package in an outbound shipment for a Multi-Channel Fulfillment order. """ return self._post_request(request, kw, response) @requires(['Address', 'Items']) @structured_objects('Address', 'Items') @api_action('Outbound', 30, 0.5) def get_fulfillment_preview(self, request, response, **kw): """Returns a list of fulfillment order previews based on items and shipping speed categories that you specify. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', 'ShippingSpeedCategory', 'DisplayableOrderDateTime', 'DestinationAddress', 'DisplayableOrderComment', 'Items']) @structured_objects('DestinationAddress', 'Items') @api_action('Outbound', 30, 0.5) def create_fulfillment_order(self, request, response, **kw): """Requests that Amazon ship items from the seller's inventory to a destination address. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) def get_fulfillment_order(self, request, response, **kw): """Returns a fulfillment order based on a specified SellerFulfillmentOrderId. """ return self._post_request(request, kw, response) @api_action('Outbound', 30, 0.5) def list_all_fulfillment_orders(self, request, response, **kw): """Returns a list of fulfillment orders fulfilled after (or at) a specified date or by fulfillment method. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Outbound', 30, 0.5) def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) def cancel_fulfillment_order(self, request, response, **kw): """Requests that Amazon stop attempting to fulfill an existing fulfillment order. """ return self._post_request(request, kw, response) @api_action('Outbound', 2, 300, 'GetServiceStatus') def get_outbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Outbound API section. """ return self._post_request(request, kw, response) @requires(['CreatedAfter'], ['LastUpdatedAfter']) @requires(['MarketplaceId']) @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) @dependent('CreatedBefore', ['CreatedAfter']) @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId']) @dependent('LastUpdatedBefore', ['LastUpdatedAfter']) @exclusive(['CreatedAfter'], ['LastUpdatedBefore']) @structured_objects('OrderTotal', 'ShippingAddress', 'PaymentExecutionDetail') @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', 'FulfillmentChannel.Channel', 'PaymentMethod.') @api_action('Orders', 6, 60) def list_orders(self, request, response, **kw): """Returns a list of orders created or updated during a time frame that you specify. """ toggle = set(('FulfillmentChannel.Channel.1', 'OrderStatus.Status.1', 'PaymentMethod.1', 'LastUpdatedAfter', 'LastUpdatedBefore')) for do, dont in { 'BuyerEmail': toggle.union(['SellerOrderId']), 'SellerOrderId': toggle.union(['BuyerEmail']), }.items(): if do in kw and any(i in dont for i in kw): message = "Don't include {0} when specifying " \ "{1}".format(' or '.join(dont), do) raise AssertionError(message) return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 6, 60) def list_orders_by_next_token(self, request, response, **kw): """Returns the next page of orders using the NextToken value that was returned by your previous request to either ListOrders or ListOrdersByNextToken. """ return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @structured_lists('AmazonOrderId.Id') @api_action('Orders', 6, 60) def get_order(self, request, response, **kw): """Returns an order for each AmazonOrderId that you specify. """ return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @api_action('Orders', 30, 2) def list_order_items(self, request, response, **kw): """Returns order item information for an AmazonOrderId that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 30, 2) def list_order_items_by_next_token(self, request, response, **kw): """Returns the next page of order items using the NextToken value that was returned by your previous request to either ListOrderItems or ListOrderItemsByNextToken. """ return self._post_request(request, kw, response) @api_action('Orders', 2, 300, 'GetServiceStatus') def get_orders_service_status(self, request, response, **kw): """Returns the operational status of the Orders API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Query']) @api_action('Products', 20, 20) def list_matching_products(self, request, response, **kw): """Returns a list of products and their attributes, ordered by relevancy, based on a search query that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 20) def get_matching_product(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of ASIN values that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'IdType', 'IdList']) @structured_lists('IdList.Id') @api_action('Products', 20, 20) def get_matching_product_for_id(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of Product IDs that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') def get_competitive_pricing_for_sku(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the SellerSKUs and MarketplaceId that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') def get_competitive_pricing_for_asin(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the ASINs and MarketplaceId that you specify. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') def get_lowest_offer_listings_for_sku(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and SellerSKUs. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') def get_lowest_offer_listings_for_asin(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and ASINs. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKU']) @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') def get_product_categories_for_sku(self, request, response, **kw): """Returns the product categories that a SellerSKU belongs to. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASIN']) @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') def get_product_categories_for_asin(self, request, response, **kw): """Returns the product categories that an ASIN belongs to. """ return self._post_request(request, kw, response) @api_action('Products', 2, 300, 'GetServiceStatus') def get_products_service_status(self, request, response, **kw): """Returns the operational status of the Products API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetMyPriceForSKU') def get_my_price_for_sku(self, request, response, **kw): """Returns pricing information for your own offer listings, based on SellerSKU. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetMyPriceForASIN') def get_my_price_for_asin(self, request, response, **kw): """Returns pricing information for your own offer listings, based on ASIN. """ return self._post_request(request, kw, response) @api_action('Sellers', 15, 60) def list_marketplace_participations(self, request, response, **kw): """Returns a list of marketplaces that the seller submitting the request can sell in, and a list of participations that include seller-specific information in that marketplace. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Sellers', 15, 60) def list_marketplace_participations_by_next_token(self, request, response, **kw): """Returns the next page of marketplaces and participations using the NextToken value that was returned by your previous request to either ListMarketplaceParticipations or ListMarketplaceParticipationsByNextToken. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Recommendations', 5, 2) def get_last_updated_time_for_recommendations(self, request, response, **kw): """Checks whether there are active recommendations for each category for the given marketplace, and if there are, returns the time when recommendations were last updated for each category. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @structured_lists('CategoryQueryList.CategoryQuery') @api_action('Recommendations', 5, 2) def list_recommendations(self, request, response, **kw): """Returns your active recommendations for a specific category or for all categories for a specific marketplace. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Recommendations', 5, 2) def list_recommendations_by_next_token(self, request, response, **kw): """Returns the next page of recommendations using the NextToken parameter. """ return self._post_request(request, kw, response) @api_action('Recommendations', 2, 300, 'GetServiceStatus') def get_recommendations_service_status(self, request, response, **kw): """Returns the operational status of the Recommendations API section. """ return self._post_request(request, kw, response) @api_action('CustomerInfo', 15, 12) def list_customers(self, request, response, **kw): """Returns a list of customer accounts based on search criteria that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('CustomerInfo', 50, 3) def list_customers_by_next_token(self, request, response, **kw): """Returns the next page of customers using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['CustomerIdList']) @structured_lists('CustomerIdList.CustomerId') @api_action('CustomerInfo', 15, 12) def get_customers_for_customer_id(self, request, response, **kw): """Returns a list of customer accounts based on search criteria that you specify. """ return self._post_request(request, kw, response) @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') def get_customerinfo_service_status(self, request, response, **kw): """Returns the operational status of the Customer Information API section. """ return self._post_request(request, kw, response) @requires(['DateRangeStart']) @api_action('CartInfo', 15, 12) def list_carts(self, request, response, **kw): """Returns a list of shopping carts in your Webstore that were last updated during the time range that you specify. """ return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('CartInfo', 50, 3) def list_carts_by_next_token(self, request, response, **kw): """Returns the next page of shopping carts using the NextToken parameter. """ return self._post_request(request, kw, response) @requires(['CartIdList']) @structured_lists('CartIdList.CartId') @api_action('CartInfo', 15, 12) def get_carts(self, request, response, **kw): """Returns shopping carts based on the CartId values that you specify. """ return self._post_request(request, kw, response) @api_action('CartInfo', 2, 300, 'GetServiceStatus') def get_cartinfo_service_status(self, request, response, **kw): """Returns the operational status of the Cart Information API section. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def register_destination(self, request, response, **kw): """Specifies a new destination where you want to receive notifications. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def deregister_destination(self, request, response, **kw): """Removes an existing destination from the list of registered destinations. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Subscriptions', 25, 0.5) def list_registered_destinations(self, request, response, **kw): """Lists all current destinations that you have registered. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def send_test_notification_to_destination(self, request, response, **kw): """Sends a test notification to an existing destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Subscription']) @structured_objects('Subscription', members=True) @api_action('Subscriptions', 25, 0.5) def create_subscription(self, request, response, **kw): """Creates a new subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'NotificationType', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def get_subscription(self, request, response, **kw): """Gets the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'NotificationType', 'Destination']) @structured_objects('Destination', members=True) @api_action('Subscriptions', 25, 0.5) def delete_subscription(self, request, response, **kw): """Deletes the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @requires(['MarketplaceId']) @api_action('Subscriptions', 25, 0.5) def list_subscriptions(self, request, response, **kw): """Returns a list of all your current subscriptions. """ return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Subscription']) @structured_objects('Subscription', members=True) @api_action('Subscriptions', 25, 0.5) def update_subscription(self, request, response, **kw): """Updates the subscription for the specified notification type and destination. """ return self._post_request(request, kw, response) @api_action('Subscriptions', 2, 300, 'GetServiceStatus') def get_subscriptions_service_status(self, request, response, **kw): """Returns the operational status of the Subscriptions API section. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) @structured_objects('OrderReferenceAttributes') @api_action('OffAmazonPayments', 10, 1) def set_order_reference_details(self, request, response, **kw): """Sets order reference details such as the order total and a description for the order. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 20, 2) def get_order_reference_details(self, request, response, **kw): """Returns details about the Order Reference object and its current state. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def confirm_order_reference(self, request, response, **kw): """Confirms that the order reference is free of constraints and all required information has been set on the order reference. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def cancel_order_reference(self, request, response, **kw): """Cancel an order reference; all authorizations associated with this order reference are also closed. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId']) @api_action('OffAmazonPayments', 10, 1) def close_order_reference(self, request, response, **kw): """Confirms that an order reference has been fulfilled (fully or partially) and that you do not expect to create any new authorizations on this order reference. """ return self._post_request(request, kw, response) @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', 'AuthorizationAmount']) @structured_objects('AuthorizationAmount') @api_action('OffAmazonPayments', 10, 1) def authorize(self, request, response, **kw): """Reserves a specified amount against the payment method(s) stored in the order reference. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId']) @api_action('OffAmazonPayments', 20, 2) def get_authorization_details(self, request, response, **kw): """Returns the status of a particular authorization and the total amount captured on the authorization. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) @structured_objects('CaptureAmount') @api_action('OffAmazonPayments', 10, 1) def capture(self, request, response, **kw): """Captures funds from an authorized payment instrument. """ return self._post_request(request, kw, response) @requires(['AmazonCaptureId']) @api_action('OffAmazonPayments', 20, 2) def get_capture_details(self, request, response, **kw): """Returns the status of a particular capture and the total amount refunded on the capture. """ return self._post_request(request, kw, response) @requires(['AmazonAuthorizationId']) @api_action('OffAmazonPayments', 10, 1) def close_authorization(self, request, response, **kw): """Closes an authorization. """ return self._post_request(request, kw, response) @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) @structured_objects('RefundAmount') @api_action('OffAmazonPayments', 10, 1) def refund(self, request, response, **kw): """Refunds a previously captured amount. """ return self._post_request(request, kw, response) @requires(['AmazonRefundId']) @api_action('OffAmazonPayments', 20, 2) def get_refund_details(self, request, response, **kw): """Returns the status of a particular refund. """ return self._post_request(request, kw, response) @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') def get_offamazonpayments_service_status(self, request, response, **kw): """Returns the operational status of the Off-Amazon Payments API section. """ return self._post_request(request, kw, response)
apache-2.0
ucloud/uai-sdk
examples/caffe/train/faster-rcnn/code/tools/demo.py
10
5028
#!/usr/bin/env python # -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """ Demo script showing detections in sample images. See README.md for installation instructions before running. """ import _init_paths from fast_rcnn.config import cfg from fast_rcnn.test import im_detect from fast_rcnn.nms_wrapper import nms from utils.timer import Timer import matplotlib.pyplot as plt import numpy as np import scipy.io as sio import caffe, os, sys, cv2 import argparse CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') NETS = {'vgg16': ('VGG16', 'VGG16_faster_rcnn_final.caffemodel'), 'zf': ('ZF', 'ZF_faster_rcnn_final.caffemodel')} def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return im = im[:, :, (2, 1, 0)] fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') for i in inds: bbox = dets[i, :4] score = dets[i, -1] ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('{} detections with ' 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh), fontsize=14) plt.axis('off') plt.tight_layout() plt.draw() def demo(net, image_name): """Detect object classes in an image using pre-computed object proposals.""" # Load the demo image im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name) im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] vis_detections(im, cls, dets, thresh=CONF_THRESH) def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description='Faster R-CNN demo') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--cpu', dest='cpu_mode', help='Use CPU mode (overrides --gpu)', action='store_true') parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]', choices=NETS.keys(), default='vgg16') args = parser.parse_args() return args if __name__ == '__main__': cfg.TEST.HAS_RPN = True # Use RPN for proposals args = parse_args() prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0], 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt') caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models', NETS[args.demo_net][1]) if not os.path.isfile(caffemodel): raise IOError(('{:s} not found.\nDid you run ./data/script/' 'fetch_faster_rcnn_models.sh?').format(caffemodel)) if args.cpu_mode: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(args.gpu_id) cfg.GPU_ID = args.gpu_id net = caffe.Net(prototxt, caffemodel, caffe.TEST) print '\n\nLoaded network {:s}'.format(caffemodel) # Warmup on a dummy image im = 128 * np.ones((300, 500, 3), dtype=np.uint8) for i in xrange(2): _, _= im_detect(net, im) im_names = ['000456.jpg', '000542.jpg', '001150.jpg', '001763.jpg', '004545.jpg'] for im_name in im_names: print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Demo for data/demo/{}'.format(im_name) demo(net, im_name) plt.show()
apache-2.0
zuntrax/schedule-ng
fahrplan/datetime.py
1
1394
import datetime as dt from .exception import FahrplanError TIME_FORMAT = "%H:%M" def format_datetime(datetime: dt.datetime): return datetime.isoformat() def parse_datetime(date_string: str): return dt.datetime.fromisoformat(date_string) def format_time(time: dt.time): return time.strftime(TIME_FORMAT) def parse_time(time_string: str): try: hours, _, minutes = time_string.partition(":") hours = int(hours, 10) minutes = int(minutes, 10) return dt.time(hours, minutes) except ValueError: raise FahrplanError(f"{time_string} is not in required format %H:%M") def format_date(date: dt.date): return str(date) def parse_date(date_string: str): try: items = [int(i, 10) for i in date_string.split("-")] return dt.date(*items) except (TypeError, ValueError): raise FahrplanError(f"{date_string} is not in required format %Y-%m-%d") def format_duration(duration: dt.timedelta): # just cut away the seconds part return str(duration)[:-3] def parse_duration(duration_string: str): try: hours, _, minutes = duration_string.partition(":") hours = int(hours, 10) minutes = int(minutes, 10) return dt.timedelta(hours=hours, minutes=minutes) except ValueError: raise FahrplanError(f"{duration_string} is not in required format %H:%M")
gpl-3.0
xgin/letsencrypt
letsencrypt-compatibility-test/letsencrypt_compatibility_test/configurators/common.py
39
5511
"""Provides a common base for configurator proxies""" import logging import os import shutil import tempfile import docker from letsencrypt import constants from letsencrypt_compatibility_test import errors from letsencrypt_compatibility_test import util logger = logging.getLogger(__name__) class Proxy(object): # pylint: disable=too-many-instance-attributes """A common base for compatibility test configurators""" _NOT_ADDED_ARGS = True @classmethod def add_parser_arguments(cls, parser): """Adds command line arguments needed by the plugin""" if Proxy._NOT_ADDED_ARGS: group = parser.add_argument_group("docker") group.add_argument( "--docker-url", default="unix://var/run/docker.sock", help="URL of the docker server") group.add_argument( "--no-remove", action="store_true", help="do not delete container on program exit") Proxy._NOT_ADDED_ARGS = False def __init__(self, args): """Initializes the plugin with the given command line args""" self._temp_dir = tempfile.mkdtemp() self.le_config = util.create_le_config(self._temp_dir) config_dir = util.extract_configs(args.configs, self._temp_dir) self._configs = [ os.path.join(config_dir, config) for config in os.listdir(config_dir)] self.args = args self._docker_client = docker.Client( base_url=self.args.docker_url, version="auto") self.http_port, self.https_port = util.get_two_free_ports() self._container_id = None def has_more_configs(self): """Returns true if there are more configs to test""" return bool(self._configs) def cleanup_from_tests(self): """Performs any necessary cleanup from running plugin tests""" self._docker_client.stop(self._container_id, 0) if not self.args.no_remove: self._docker_client.remove_container(self._container_id) def load_config(self): """Returns the next config directory to be tested""" shutil.rmtree(self.le_config.work_dir, ignore_errors=True) backup = os.path.join(self.le_config.work_dir, constants.BACKUP_DIR) os.makedirs(backup) return self._configs.pop() def start_docker(self, image_name, command): """Creates and runs a Docker container with the specified image""" logger.warning("Pulling Docker image. This may take a minute.") for line in self._docker_client.pull(image_name, stream=True): logger.debug(line) host_config = docker.utils.create_host_config( binds={self._temp_dir: {"bind": self._temp_dir, "mode": "rw"}}, port_bindings={ 80: ("127.0.0.1", self.http_port), 443: ("127.0.0.1", self.https_port)},) container = self._docker_client.create_container( image_name, command, ports=[80, 443], volumes=self._temp_dir, host_config=host_config) if container["Warnings"]: logger.warning(container["Warnings"]) self._container_id = container["Id"] self._docker_client.start(self._container_id) def check_call(self, command, *args, **kwargs): # pylint: disable=unused-argument """Simulates a call to check_call but executes the command in the running docker image """ if self.popen(command).returncode: raise errors.Error( "{0} exited with a nonzero value".format(command)) def popen(self, command, *args, **kwargs): # pylint: disable=unused-argument """Simulates a call to Popen but executes the command in the running docker image """ class SimplePopen(object): # pylint: disable=too-few-public-methods """Simplified Popen object""" def __init__(self, returncode, output): self.returncode = returncode self._stdout = output self._stderr = output def communicate(self): """Returns stdout and stderr""" return self._stdout, self._stderr if isinstance(command, list): command = " ".join(command) returncode, output = self.execute_in_docker(command) return SimplePopen(returncode, output) def execute_in_docker(self, command): """Executes command inside the running docker image""" logger.debug("Executing '%s'", command) exec_id = self._docker_client.exec_create(self._container_id, command) output = self._docker_client.exec_start(exec_id) returncode = self._docker_client.exec_inspect(exec_id)["ExitCode"] return returncode, output def copy_certs_and_keys(self, cert_path, key_path, chain_path=None): """Copies certs and keys into the temporary directory""" cert_and_key_dir = os.path.join(self._temp_dir, "certs_and_keys") if not os.path.isdir(cert_and_key_dir): os.mkdir(cert_and_key_dir) cert = os.path.join(cert_and_key_dir, "cert") shutil.copy(cert_path, cert) key = os.path.join(cert_and_key_dir, "key") shutil.copy(key_path, key) if chain_path: chain = os.path.join(cert_and_key_dir, "chain") shutil.copy(chain_path, chain) else: chain = None return cert, key, chain
apache-2.0
benfinkelcbt/CPD200
CPD200-Lab09-Python/pyasn1_modules/rfc4210.py
77
27391
# # Certificate Management Protocol structures as per RFC4210 # # Based on Alex Railean's work # from pyasn1.type import tag,namedtype,namedval,univ,constraint,char,useful from pyasn1_modules import rfc2459, rfc2511, rfc2314 MAX = 64 class KeyIdentifier(univ.OctetString): pass class CMPCertificate(rfc2459.Certificate): pass class OOBCert(CMPCertificate): pass class CertAnnContent(CMPCertificate): pass class PKIFreeText(univ.SequenceOf): """ PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String """ componentType = char.UTF8String() subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX) class PollRepContent(univ.SequenceOf): """ PollRepContent ::= SEQUENCE OF SEQUENCE { certReqId INTEGER, checkAfter INTEGER, -- time in seconds reason PKIFreeText OPTIONAL } """ class CertReq(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('certReqId', univ.Integer()), namedtype.NamedType('checkAfter', univ.Integer()), namedtype.OptionalNamedType('reason', PKIFreeText()) ) componentType = CertReq() class PollReqContent(univ.SequenceOf): """ PollReqContent ::= SEQUENCE OF SEQUENCE { certReqId INTEGER } """ class CertReq(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('certReqId', univ.Integer()) ) componentType = CertReq() class InfoTypeAndValue(univ.Sequence): """ InfoTypeAndValue ::= SEQUENCE { infoType OBJECT IDENTIFIER, infoValue ANY DEFINED BY infoType OPTIONAL }""" componentType = namedtype.NamedTypes( namedtype.NamedType('infoType', univ.ObjectIdentifier()), namedtype.OptionalNamedType('infoValue', univ.Any()) ) class GenRepContent(univ.SequenceOf): componentType = InfoTypeAndValue() class GenMsgContent(univ.SequenceOf): componentType = InfoTypeAndValue() class PKIConfirmContent(univ.Null): pass class CRLAnnContent(univ.SequenceOf): componentType = rfc2459.CertificateList() class CAKeyUpdAnnContent(univ.Sequence): """ CAKeyUpdAnnContent ::= SEQUENCE { oldWithNew CMPCertificate, newWithOld CMPCertificate, newWithNew CMPCertificate } """ componentType = namedtype.NamedTypes( namedtype.NamedType('oldWithNew', CMPCertificate()), namedtype.NamedType('newWithOld', CMPCertificate()), namedtype.NamedType('newWithNew', CMPCertificate()) ) class RevDetails(univ.Sequence): """ RevDetails ::= SEQUENCE { certDetails CertTemplate, crlEntryDetails Extensions OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('certDetails', rfc2511.CertTemplate()), namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions()) ) class RevReqContent(univ.SequenceOf): componentType = RevDetails() class CertOrEncCert(univ.Choice): """ CertOrEncCert ::= CHOICE { certificate [0] CMPCertificate, encryptedCert [1] EncryptedValue } """ componentType = namedtype.NamedTypes( namedtype.NamedType('certificate', CMPCertificate().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) ) ), namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) ) ) ) class CertifiedKeyPair(univ.Sequence): """ CertifiedKeyPair ::= SEQUENCE { certOrEncCert CertOrEncCert, privateKey [0] EncryptedValue OPTIONAL, publicationInfo [1] PKIPublicationInfo OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('certOrEncCert', CertOrEncCert()), namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) ) ), namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) ) ) ) class POPODecKeyRespContent(univ.SequenceOf): componentType = univ.Integer() class Challenge(univ.Sequence): """ Challenge ::= SEQUENCE { owf AlgorithmIdentifier OPTIONAL, witness OCTET STRING, challenge OCTET STRING } """ componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()), namedtype.NamedType('witness', univ.OctetString()), namedtype.NamedType('challenge', univ.OctetString()) ) class PKIStatus(univ.Integer): """ PKIStatus ::= INTEGER { accepted (0), grantedWithMods (1), rejection (2), waiting (3), revocationWarning (4), revocationNotification (5), keyUpdateWarning (6) } """ namedValues = namedval.NamedValues( ('accepted', 0), ('grantedWithMods', 1), ('rejection', 2), ('waiting', 3), ('revocationWarning', 4), ('revocationNotification', 5), ('keyUpdateWarning', 6) ) class PKIFailureInfo(univ.BitString): """ PKIFailureInfo ::= BIT STRING { badAlg (0), badMessageCheck (1), badRequest (2), badTime (3), badCertId (4), badDataFormat (5), wrongAuthority (6), incorrectData (7), missingTimeStamp (8), badPOP (9), certRevoked (10), certConfirmed (11), wrongIntegrity (12), badRecipientNonce (13), timeNotAvailable (14), unacceptedPolicy (15), unacceptedExtension (16), addInfoNotAvailable (17), badSenderNonce (18), badCertTemplate (19), signerNotTrusted (20), transactionIdInUse (21), unsupportedVersion (22), notAuthorized (23), systemUnavail (24), systemFailure (25), duplicateCertReq (26) """ namedValues = namedval.NamedValues( ('badAlg', 0), ('badMessageCheck', 1), ('badRequest', 2), ('badTime', 3), ('badCertId', 4), ('badDataFormat', 5), ('wrongAuthority', 6), ('incorrectData', 7), ('missingTimeStamp', 8), ('badPOP', 9), ('certRevoked', 10), ('certConfirmed', 11), ('wrongIntegrity', 12), ('badRecipientNonce', 13), ('timeNotAvailable', 14), ('unacceptedPolicy', 15), ('unacceptedExtension', 16), ('addInfoNotAvailable', 17), ('badSenderNonce', 18), ('badCertTemplate', 19), ('signerNotTrusted', 20), ('transactionIdInUse', 21), ('unsupportedVersion', 22), ('notAuthorized', 23), ('systemUnavail', 24), ('systemFailure', 25), ('duplicateCertReq', 26) ) class PKIStatusInfo(univ.Sequence): """ PKIStatusInfo ::= SEQUENCE { status PKIStatus, statusString PKIFreeText OPTIONAL, failInfo PKIFailureInfo OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('status', PKIStatus()), namedtype.OptionalNamedType('statusString', PKIFreeText()), namedtype.OptionalNamedType('failInfo', PKIFailureInfo()) ) class ErrorMsgContent(univ.Sequence): """ ErrorMsgContent ::= SEQUENCE { pKIStatusInfo PKIStatusInfo, errorCode INTEGER OPTIONAL, -- implementation-specific error codes errorDetails PKIFreeText OPTIONAL -- implementation-specific error details } """ componentType = namedtype.NamedTypes( namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()), namedtype.OptionalNamedType('errorCode', univ.Integer()), namedtype.OptionalNamedType('errorDetails', PKIFreeText()) ) class CertStatus(univ.Sequence): """ CertStatus ::= SEQUENCE { certHash OCTET STRING, certReqId INTEGER, statusInfo PKIStatusInfo OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('certHash', univ.OctetString()), namedtype.NamedType('certReqId', univ.Integer()), namedtype.OptionalNamedType('statusInfo', PKIStatusInfo()) ) class CertConfirmContent(univ.SequenceOf): componentType = CertStatus() class RevAnnContent(univ.Sequence): """ RevAnnContent ::= SEQUENCE { status PKIStatus, certId CertId, willBeRevokedAt GeneralizedTime, badSinceDate GeneralizedTime, crlDetails Extensions OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('status', PKIStatus()), namedtype.NamedType('certId', rfc2511.CertId()), namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()), namedtype.NamedType('badSinceDate', useful.GeneralizedTime()), namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions()) ) class RevRepContent(univ.Sequence): """ RevRepContent ::= SEQUENCE { status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo, revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId OPTIONAL, crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList OPTIONAL """ componentType = namedtype.NamedTypes( namedtype.NamedType('status', PKIStatusInfo()), namedtype.OptionalNamedType('revCerts', univ.SequenceOf( componentType=rfc2511.CertId() ).subtype( subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) ) ), namedtype.OptionalNamedType('crls', univ.SequenceOf( componentType=rfc2459.CertificateList() ).subtype( subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) ) ) ) class KeyRecRepContent(univ.Sequence): """ KeyRecRepContent ::= SEQUENCE { status PKIStatusInfo, newSigCert [0] CMPCertificate OPTIONAL, caCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate OPTIONAL, keyPairHist [2] SEQUENCE SIZE (1..MAX) OF CertifiedKeyPair OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('status', PKIStatusInfo()), namedtype.OptionalNamedType('newSigCert', CMPCertificate().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0) ) ), namedtype.OptionalNamedType('caCerts', univ.SequenceOf( componentType=CMPCertificate() ).subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), subtypeSpec=constraint.ValueSizeConstraint(1, MAX) ) ), namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf( componentType=CertifiedKeyPair() ).subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2), subtypeSpec=constraint.ValueSizeConstraint(1, MAX) ) ) ) class CertResponse(univ.Sequence): """ CertResponse ::= SEQUENCE { certReqId INTEGER, status PKIStatusInfo, certifiedKeyPair CertifiedKeyPair OPTIONAL, rspInfo OCTET STRING OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('certReqId', univ.Integer()), namedtype.NamedType('status', PKIStatusInfo()), namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()), namedtype.OptionalNamedType('rspInfo', univ.OctetString()) ) class CertRepMessage(univ.Sequence): """ CertRepMessage ::= SEQUENCE { caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate OPTIONAL, response SEQUENCE OF CertResponse } """ componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('caPubs', univ.SequenceOf( componentType=CMPCertificate() ).subtype( subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1) ) ), namedtype.NamedType('response', univ.SequenceOf( componentType=CertResponse()) ) ) class POPODecKeyChallContent(univ.SequenceOf): componentType = Challenge() class OOBCertHash(univ.Sequence): """ OOBCertHash ::= SEQUENCE { hashAlg [0] AlgorithmIdentifier OPTIONAL, certId [1] CertId OPTIONAL, hashVal BIT STRING } """ componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('hashAlg', rfc2459.AlgorithmIdentifier().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) ), namedtype.OptionalNamedType('certId', rfc2511.CertId().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1) ) ), namedtype.NamedType('hashVal', univ.BitString()) ) # pyasn1 does not naturally handle recursive definitions, thus this hack: # NestedMessageContent ::= PKIMessages class NestedMessageContent(univ.SequenceOf): """ NestedMessageContent ::= PKIMessages """ componentType = univ.Any() class DHBMParameter(univ.Sequence): """ DHBMParameter ::= SEQUENCE { owf AlgorithmIdentifier, -- AlgId for a One-Way Function (SHA-1 recommended) mac AlgorithmIdentifier -- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11], } -- or HMAC [RFC2104, RFC2202]) """ componentType = namedtype.NamedTypes( namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()), namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier()) ) id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30') class PBMParameter(univ.Sequence): """ PBMParameter ::= SEQUENCE { salt OCTET STRING, owf AlgorithmIdentifier, iterationCount INTEGER, mac AlgorithmIdentifier } """ componentType = namedtype.NamedTypes( namedtype.NamedType('salt', univ.OctetString().subtype( subtypeSpec=constraint.ValueSizeConstraint(0, 128) ) ), namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()), namedtype.NamedType('iterationCount', univ.Integer()), namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier()) ) id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13') class PKIProtection(univ.BitString): pass # pyasn1 does not naturally handle recursive definitions, thus this hack: # NestedMessageContent ::= PKIMessages nestedMessageContent = NestedMessageContent().subtype(explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)) class PKIBody(univ.Choice): """ PKIBody ::= CHOICE { -- message-specific body elements ir [0] CertReqMessages, --Initialization Request ip [1] CertRepMessage, --Initialization Response cr [2] CertReqMessages, --Certification Request cp [3] CertRepMessage, --Certification Response p10cr [4] CertificationRequest, --imported from [PKCS10] popdecc [5] POPODecKeyChallContent, --pop Challenge popdecr [6] POPODecKeyRespContent, --pop Response kur [7] CertReqMessages, --Key Update Request kup [8] CertRepMessage, --Key Update Response krr [9] CertReqMessages, --Key Recovery Request krp [10] KeyRecRepContent, --Key Recovery Response rr [11] RevReqContent, --Revocation Request rp [12] RevRepContent, --Revocation Response ccr [13] CertReqMessages, --Cross-Cert. Request ccp [14] CertRepMessage, --Cross-Cert. Response ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann. cann [16] CertAnnContent, --Certificate Ann. rann [17] RevAnnContent, --Revocation Ann. crlann [18] CRLAnnContent, --CRL Announcement pkiconf [19] PKIConfirmContent, --Confirmation nested [20] NestedMessageContent, --Nested Message genm [21] GenMsgContent, --General Message genp [22] GenRepContent, --General Response error [23] ErrorMsgContent, --Error Message certConf [24] CertConfirmContent, --Certificate confirm pollReq [25] PollReqContent, --Polling request pollRep [26] PollRepContent --Polling response """ componentType = namedtype.NamedTypes( namedtype.NamedType('ir', rfc2511.CertReqMessages().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) ), namedtype.NamedType('ip', CertRepMessage().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,1) ) ), namedtype.NamedType('cr', rfc2511.CertReqMessages().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,2) ) ), namedtype.NamedType('cp', CertRepMessage().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,3) ) ), namedtype.NamedType('p10cr', rfc2314.CertificationRequest().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,4) ) ), namedtype.NamedType('popdecc', POPODecKeyChallContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,5) ) ), namedtype.NamedType('popdecr', POPODecKeyRespContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,6) ) ), namedtype.NamedType('kur', rfc2511.CertReqMessages().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,7) ) ), namedtype.NamedType('kup', CertRepMessage().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,8) ) ), namedtype.NamedType('krr', rfc2511.CertReqMessages().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,9) ) ), namedtype.NamedType('krp', KeyRecRepContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,10) ) ), namedtype.NamedType('rr', RevReqContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,11) ) ), namedtype.NamedType('rp', RevRepContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,12) ) ), namedtype.NamedType('ccr', rfc2511.CertReqMessages().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,13) ) ), namedtype.NamedType('ccp', CertRepMessage().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,14) ) ), namedtype.NamedType('ckuann', CAKeyUpdAnnContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,15) ) ), namedtype.NamedType('cann', CertAnnContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,16) ) ), namedtype.NamedType('rann', RevAnnContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,17) ) ), namedtype.NamedType('crlann', CRLAnnContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,18) ) ), namedtype.NamedType('pkiconf', PKIConfirmContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,19) ) ), namedtype.NamedType('nested', nestedMessageContent), # namedtype.NamedType('nested', NestedMessageContent().subtype( # explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20) # ) # ), namedtype.NamedType('genm', GenMsgContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,21) ) ), namedtype.NamedType('gen', GenRepContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,22) ) ), namedtype.NamedType('error', ErrorMsgContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,23) ) ), namedtype.NamedType('certConf', CertConfirmContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,24) ) ), namedtype.NamedType('pollReq', PollReqContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,25) ) ), namedtype.NamedType('pollRep', PollRepContent().subtype( explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,26) ) ) ) class PKIHeader(univ.Sequence): """ PKIHeader ::= SEQUENCE { pvno INTEGER { cmp1999(1), cmp2000(2) }, sender GeneralName, recipient GeneralName, messageTime [0] GeneralizedTime OPTIONAL, protectionAlg [1] AlgorithmIdentifier OPTIONAL, senderKID [2] KeyIdentifier OPTIONAL, recipKID [3] KeyIdentifier OPTIONAL, transactionID [4] OCTET STRING OPTIONAL, senderNonce [5] OCTET STRING OPTIONAL, recipNonce [6] OCTET STRING OPTIONAL, freeText [7] PKIFreeText OPTIONAL, generalInfo [8] SEQUENCE SIZE (1..MAX) OF InfoTypeAndValue OPTIONAL } """ componentType = namedtype.NamedTypes( namedtype.NamedType('pvno', univ.Integer( namedValues=namedval.NamedValues( ('cmp1999', 1), ('cmp2000', 2) ) ) ), namedtype.NamedType('sender', rfc2459.GeneralName()), namedtype.NamedType('recipient', rfc2459.GeneralName()), namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype( explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))), namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))), namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))), namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))), namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))), namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))), namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))), namedtype.OptionalNamedType('generalInfo', univ.SequenceOf( componentType=InfoTypeAndValue().subtype( subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8) ) ) ) ) class ProtectedPart(univ.Sequence): """ ProtectedPart ::= SEQUENCE { header PKIHeader, body PKIBody } """ componentType = namedtype.NamedTypes( namedtype.NamedType('header', PKIHeader()), namedtype.NamedType('infoValue', PKIBody()) ) class PKIMessage(univ.Sequence): """ PKIMessage ::= SEQUENCE { header PKIHeader, body PKIBody, protection [0] PKIProtection OPTIONAL, extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate OPTIONAL }""" componentType = namedtype.NamedTypes( namedtype.NamedType('header', PKIHeader()), namedtype.NamedType('body', PKIBody()), namedtype.OptionalNamedType('protection', PKIProtection().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.OptionalNamedType( 'extraCerts', univ.SequenceOf( componentType=CMPCertificate() ).subtype( subtypeSpec=constraint.ValueSizeConstraint(1, MAX), explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1) ) ) ) class PKIMessages(univ.SequenceOf): """ PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage """ componentType = PKIMessage() subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX) # pyasn1 does not naturally handle recursive definitions, thus this hack: # NestedMessageContent ::= PKIMessages NestedMessageContent.componentType = PKIMessages() nestedMessageContent.componentType = PKIMessages()
gpl-3.0
tonyyarusso/ansible-modules-core
cloud/docker/docker.py
9
55665
#!/usr/bin/python # (c) 2013, Cove Schneider # (c) 2014, Joshua Conner <joshua.conner@gmail.com> # (c) 2014, Pavel Antonov <antonov@adwz.ru> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### DOCUMENTATION = ''' --- module: docker version_added: "1.4" short_description: manage docker containers description: - Manage the life cycle of docker containers. options: count: description: - Number of matching containers that should be in the desired state. default: 1 image: description: - Container image used to match and launch containers. required: true pull: description: - Control when container images are updated from the C(docker_url) registry. If "missing," images will be pulled only when missing from the host; if '"always," the registry will be checked for a newer version of the image' each time the task executes. default: missing choices: [ "missing", "always" ] version_added: "1.9" command: description: - Command used to match and launch containers. default: null name: description: - Name used to match and uniquely name launched containers. Explicit names are used to uniquely identify a single container or to link among containers. Mutually exclusive with a "count" other than "1". default: null version_added: "1.5" ports: description: - List containing private to public port mapping specification. Use docker - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. default: null version_added: "1.5" expose: description: - List of additional container ports to expose for port mappings or links. If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again. default: null version_added: "1.5" publish_all_ports: description: - Publish all exposed ports to the host interfaces. default: false version_added: "1.5" volumes: description: - List of volumes to mount within the container using docker CLI-style - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' default: null volumes_from: description: - List of names of containers to mount volumes from. default: null links: description: - List of other containers to link within this container with an optional - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable string like "512MB". Leave as "0" to specify no limit. default: 0 docker_url: description: - URL of the host running the docker daemon. This will default to the env var DOCKER_HOST if unspecified. default: ${DOCKER_HOST} or unix://var/run/docker.sock use_tls: description: - Whether to use tls to connect to the docker server. "no" means not to use tls (and ignore any other tls related parameters). "encrypt" means to use tls to encrypt the connection to the server. "verify" means to also verify that the server's certificate is valid for the server (this both verifies the certificate against the CA and that the certificate was issued for that host. If this is unspecified, tls will only be used if one of the other tls options require it. choices: [ "no", "encrypt", "verify" ] version_added: "1.9" tls_client_cert: description: - Path to the PEM-encoded certificate used to authenticate docker client. If specified tls_client_key must be valid default: ${DOCKER_CERT_PATH}/cert.pem version_added: "1.9" tls_client_key: description: - Path to the PEM-encoded key used to authenticate docker client. If specified tls_client_cert must be valid default: ${DOCKER_CERT_PATH}/key.pem version_added: "1.9" tls_ca_cert: description: - Path to a PEM-encoded certificate authority to secure the Docker connection. This has no effect if use_tls is encrypt. default: ${DOCKER_CERT_PATH}/ca.pem version_added: "1.9" tls_hostname: description: - A hostname to check matches what's supplied in the docker server's certificate. If unspecified, the hostname is taken from the docker_url. default: Taken from docker_url version_added: "1.9" docker_api_version: description: - Remote API version to use. This defaults to the current default as specified by docker-py. default: docker-py default remote API version version_added: "1.8" username: description: - Remote API username. default: null password: description: - Remote API password. default: null email: description: - Remote API email. default: null hostname: description: - Container hostname. default: null domainname: description: - Container domain name. default: null env: description: - Pass a dict of environment variables to the container. default: null dns: description: - List of custom DNS servers for the container. required: false default: null detach: description: - Enable detached mode to leave the container running in background. default: true state: description: - Assert the container's desired state. "present" only asserts that the matching containers exist. "started" asserts that the matching containers both exist and are running, but takes no action if any configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching containers are running and restarts any that have any images or configuration out of date. "restarted" unconditionally restarts (or starts) the matching containers. "stopped" and '"killed" stop and kill all matching containers. "absent" stops and then' removes any matching containers. required: false default: started choices: - present - started - reloaded - restarted - stopped - killed - absent privileged: description: - Whether the container should run in privileged mode or not. default: false lxc_conf: description: - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). default: null stdin_open: description: - Keep stdin open after a container is launched. default: false version_added: "1.6" tty: description: - Allocate a pseudo-tty within the container. default: false version_added: "1.6" net: description: - 'Network mode for the launched container: bridge, none, container:<name|id>' - or host. Requires docker >= 0.11. default: false version_added: "1.8" pid: description: - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0 required: false default: None aliases: [] version_added: "1.9" registry: description: - Remote registry URL to pull images from. default: DockerHub aliases: [] version_added: "1.8" restart_policy: description: - Container restart policy. choices: ["no", "on-failure", "always"] default: null version_added: "1.9" restart_policy_retry: description: - Maximum number of times to restart a container. Leave as "0" for unlimited retries. default: 0 version_added: "1.9" insecure_registry: description: - Use insecure private registry by HTTP instead of HTTPS. Needed for docker-py >= 0.5.0. default: false version_added: "1.9" author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] ''' EXAMPLES = ''' # Containers are matched either by name (if provided) or by an exact match of # the image they were launched with and the command they're running. The module # can accept either a name to target a container uniquely, or a count to operate # on multiple containers at once when it makes sense to do so. # Ensure that a data container with the name "mydata" exists. If no container # by this name exists, it will be created, but not started. - name: data container docker: name: mydata image: busybox state: present volumes: - /data # Ensure that a Redis server is running, using the volume from the data # container. Expose the default Redis port. - name: redis container docker: name: myredis image: redis command: redis-server --appendonly yes state: started expose: - 6379 volumes_from: - mydata # Ensure that a container of your application server is running. This will: # - pull the latest version of your application image from DockerHub. # - ensure that a container is running with the specified name and exact image. # If any configuration options have changed, the existing container will be # stopped and removed, and a new one will be launched in its place. # - link this container to the existing redis container launched above with # an alias. # - bind TCP port 9000 within the container to port 8080 on all interfaces # on the host. # - bind UDP port 9001 within the container to port 8081 on the host, only # listening on localhost. # - set the environment variable SECRET_KEY to "ssssh". - name: application container docker: name: myapplication image: someuser/appimage state: reloaded pull: always links: - "myredis:aliasedredis" ports: - "8080:9000" - "127.0.0.1:8081:9001/udp" env: SECRET_KEY: ssssh # Ensure that exactly five containers of another server are running with this # exact image and command. If fewer than five are running, more will be launched; # if more are running, the excess will be stopped. - name: load-balanced containers docker: state: reloaded count: 5 image: someuser/anotherappimage command: sleep 1d # Unconditionally restart a service container. This may be useful within a # handler, for example. - name: application service docker: name: myservice image: someuser/serviceimage state: restarted # Stop all containers running the specified image. - name: obsolete container docker: image: someuser/oldandbusted state: stopped # Stop and remove a container with the specified name. - name: obsolete container docker: name: ohno image: someuser/oldandbusted state: absent ''' HAS_DOCKER_PY = True import sys import json import os import shlex from urlparse import urlparse try: import docker.client import docker.utils from requests.exceptions import RequestException except ImportError: HAS_DOCKER_PY = False if HAS_DOCKER_PY: try: from docker.errors import APIError as DockerAPIError except ImportError: from docker.client import APIError as DockerAPIError def _human_to_bytes(number): suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] if isinstance(number, int): return number if number[-1] == suffixes[0] and number[-2].isdigit(): return number[:-1] i = 1 for each in suffixes[1:]: if number[-len(each):] == suffixes[i]: return int(number[:-len(each)]) * (1024 ** i) i = i + 1 print "failed=True msg='Could not convert %s to integer'" % (number) sys.exit(1) def _ansible_facts(container_list): return {"docker_containers": container_list} def _docker_id_quirk(inspect): # XXX: some quirk in docker if 'ID' in inspect: inspect['Id'] = inspect['ID'] del inspect['ID'] return inspect def get_split_image_tag(image): # If image contains a host or org name, omit that from our check if '/' in image: registry, resource = image.rsplit('/', 1) else: registry, resource = None, image # now we can determine if image has a tag if ':' in resource: resource, tag = resource.split(':', 1) if registry: resource = '/'.join((registry, resource)) else: tag = "latest" resource = image return resource, tag def normalize_image(image): """ Normalize a Docker image name to include the implied :latest tag. """ return ":".join(get_split_image_tag(image)) def is_running(container): '''Return True if an inspected container is in a state we consider "running."''' return container['State']['Running'] == True and not container['State'].get('Ghost', False) def get_docker_py_versioninfo(): if hasattr(docker, '__version__'): # a '__version__' attribute was added to the module but not until # after 0.3.0 was pushed to pypi. If it's there, use it. version = [] for part in docker.__version__.split('.'): try: version.append(int(part)) except ValueError: for idx, char in enumerate(part): if not char.isdigit(): nondigit = part[idx:] digit = part[:idx] if digit: version.append(int(digit)) if nondigit: version.append(nondigit) elif hasattr(docker.Client, '_get_raw_response_socket'): # HACK: if '__version__' isn't there, we check for the existence of # `_get_raw_response_socket` in the docker.Client class, which was # added in 0.3.0 version = (0, 3, 0) else: # This is untrue but this module does not function with a version less # than 0.3.0 so it's okay to lie here. version = (0,) return tuple(version) def check_dependencies(module): """ Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a helpful error message if it isn't. """ if not HAS_DOCKER_PY: module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") else: versioninfo = get_docker_py_versioninfo() if versioninfo < (0, 3, 0): module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") class DockerManager(object): counters = dict( created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 ) reload_reasons = [] _capabilities = set() # Map optional parameters to minimum (docker-py version, server APIVersion) # docker-py version is a tuple of ints because we have to compare them # server APIVersion is passed to a docker-py function that takes strings _cap_ver_req = { 'dns': ((0, 3, 0), '1.10'), 'volumes_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), 'pid': ((1, 0, 0), '1.17'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } def __init__(self, module): self.module = module self.binds = None self.volumes = None if self.module.params.get('volumes'): self.binds = {} self.volumes = {} vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) if len(parts) == 2: self.volumes[parts[1]] = {} self.binds[parts[0]] = parts[1] # with bind mode elif len(parts) == 3: if parts[2] not in ['ro', 'rw']: self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') ro = parts[2] == 'ro' self.volumes[parts[1]] = {} self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) else: self.volumes[parts[0]] = {} self.lxc_conf = None if self.module.params.get('lxc_conf'): self.lxc_conf = [] options = self.module.params.get('lxc_conf') for option in options: parts = option.split(':') self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) self.exposed_ports = None if self.module.params.get('expose'): self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) self.port_bindings = None if self.module.params.get('ports'): self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) self.links = None if self.module.params.get('links'): self.links = self.get_links(self.module.params.get('links')) self.env = self.module.params.get('env', None) # Connect to the docker server using any configured host and TLS settings. env_host = os.getenv('DOCKER_HOST') env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') env_cert_path = os.getenv('DOCKER_CERT_PATH') env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') docker_url = module.params.get('docker_url') if not docker_url: if env_host: docker_url = env_host else: docker_url = 'unix://var/run/docker.sock' docker_api_version = module.params.get('docker_api_version') if not docker_api_version: docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION tls_client_cert = module.params.get('tls_client_cert', None) if not tls_client_cert and env_cert_path: tls_client_cert = os.path.join(env_cert_path, 'cert.pem') tls_client_key = module.params.get('tls_client_key', None) if not tls_client_key and env_cert_path: tls_client_key = os.path.join(env_cert_path, 'key.pem') tls_ca_cert = module.params.get('tls_ca_cert') if not tls_ca_cert and env_cert_path: tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') tls_hostname = module.params.get('tls_hostname') if tls_hostname is None: if env_docker_hostname: tls_hostname = env_docker_hostname else: parsed_url = urlparse(docker_url) if ':' in parsed_url.netloc: tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] else: tls_hostname = parsed_url if not tls_hostname: tls_hostname = True # use_tls can be one of four values: # no: Do not use tls # encrypt: Use tls. We may do client auth. We will not verify the server # verify: Use tls. We may do client auth. We will verify the server # None: Only use tls if the parameters for client auth were specified # or tls_ca_cert (which requests verifying the server with # a specific ca certificate) use_tls = module.params.get('use_tls') if use_tls is None and env_docker_verify is not None: use_tls = 'verify' tls_config = None if use_tls != 'no': params = {} # Setup client auth if tls_client_cert and tls_client_key: params['client_cert'] = (tls_client_cert, tls_client_key) # We're allowed to verify the connection to the server if use_tls == 'verify' or (use_tls is None and tls_ca_cert): if tls_ca_cert: params['ca_cert'] = tls_ca_cert params['verify'] = True params['assert_hostname'] = tls_hostname else: params['verify'] = True params['assert_hostname'] = tls_hostname elif use_tls == 'encrypt': params['verify'] = False if params: # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 docker_url = docker_url.replace('tcp://', 'https://') tls_config = docker.tls.TLSConfig(**params) self.client = docker.Client(base_url=docker_url, version=docker_api_version, tls=tls_config) self.docker_py_versioninfo = get_docker_py_versioninfo() def _check_capabilties(self): """ Create a list of available capabilities """ api_version = self.client.version()['ApiVersion'] for cap, req_vers in self._cap_ver_req.items(): if (self.docker_py_versioninfo >= req_vers[0] and docker.utils.compare_version(req_vers[1], api_version) >= 0): self._capabilities.add(cap) def ensure_capability(self, capability, fail=True): """ Some of the functionality this ansible module implements are only available in newer versions of docker. Ensure that the capability is available here. If fail is set to False then return True or False depending on whether we have the capability. Otherwise, simply fail and exit the module if we lack the capability. """ if not self._capabilities: self._check_capabilties() if capability in self._capabilities: return True if not fail: return False api_version = self.client.version()['ApiVersion'] self.module.fail_json(msg='Specifying the `%s` parameter requires' ' docker-py: %s, docker server apiversion %s; found' ' docker-py: %s, server: %s' % ( capability, '.'.join(map(str, self._cap_ver_req[capability][0])), self._cap_ver_req[capability][1], '.'.join(map(str, self.docker_py_versioninfo)), api_version)) def get_links(self, links): """ Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link """ processed_links = {} for link in links: parsed_link = link.split(':', 1) if(len(parsed_link) == 2): processed_links[parsed_link[0]] = parsed_link[1] else: processed_links[parsed_link[0]] = parsed_link[0] return processed_links def get_exposed_ports(self, expose_list): """ Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. """ if expose_list: exposed = [] for port in expose_list: port = str(port).strip() if port.endswith('/tcp') or port.endswith('/udp'): port_with_proto = tuple(port.split('/')) else: # assume tcp protocol if not specified port_with_proto = (port, 'tcp') exposed.append(port_with_proto) return exposed else: return None def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. """ binds = {} for port in ports: # ports could potentially be an array like [80, 443], so we make sure they're strings # before splitting parts = str(port).split(':') container_port = parts[-1] if '/' not in container_port: container_port = int(parts[-1]) p_len = len(parts) if p_len == 1: # Bind `container_port` of the container to a dynamically # allocated TCP port on all available interfaces of the host # machine. bind = ('0.0.0.0',) elif p_len == 2: # Bind `container_port` of the container to port `parts[0]` on # all available interfaces of the host machine. bind = ('0.0.0.0', int(parts[0])) elif p_len == 3: # Bind `container_port` of the container to port `parts[1]` on # IP `parts[0]` of the host machine. If `parts[1]` empty bind # to a dynamically allocacted port of IP `parts[0]`. bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) if container_port in binds: old_bind = binds[container_port] if isinstance(old_bind, list): # append to list if it already exists old_bind.append(bind) else: # otherwise create list that contains the old and new binds binds[container_port] = [binds[container_port], bind] else: binds[container_port] = bind return binds def get_summary_message(self): ''' Generate a message that briefly describes the actions taken by this task, in English. ''' parts = [] for k, v in self.counters.iteritems(): if v == 0: continue if v == 1: plural = "" else: plural = "s" parts.append("%s %d container%s" % (k, v, plural)) if parts: return ", ".join(parts) + "." else: return "No action taken." def get_reload_reason_message(self): ''' Generate a message describing why any reloaded containers were reloaded. ''' if self.reload_reasons: return ", ".join(self.reload_reasons) else: return None def get_summary_counters_msg(self): msg = "" for k, v in self.counters.iteritems(): msg = msg + "%s %d " % (k, v) return msg def increment_counter(self, name): self.counters[name] = self.counters[name] + 1 def has_changed(self): for k, v in self.counters.iteritems(): if v > 0: return True return False def get_inspect_image(self): try: return self.client.inspect_image(self.module.params.get('image')) except DockerAPIError as e: if e.response.status_code == 404: return None else: raise e def get_image_repo_tags(self): image, tag = get_split_image_tag(self.module.params.get('image')) if tag is None: tag = 'latest' resource = '%s:%s' % (image, tag) for image in self.client.images(name=image): if resource in image.get('RepoTags', []): return image['RepoTags'] return None def get_inspect_containers(self, containers): inspect = [] for i in containers: details = self.client.inspect_container(i['Id']) details = _docker_id_quirk(details) inspect.append(details) return inspect def get_differing_containers(self): """ Inspect all matching, running containers, and return those that were started with parameters that differ from the ones that are provided during this module run. A list containing the differing containers will be returned, and a short string describing the specific difference encountered in each container will be appended to reload_reasons. This generates the set of containers that need to be stopped and started with new parameters with state=reloaded. """ running = self.get_running_containers() current = self.get_inspect_containers(running) image = self.get_inspect_image() if image is None: # The image isn't present. Assume that we're about to pull a new # tag and *everything* will be restarted. # # This will give false positives if you untag an image on the host # and there's nothing more to pull. return current differing = [] for container in current: # IMAGE # Compare the image by ID rather than name, so that containers # will be restarted when new versions of an existing image are # pulled. if container['Image'] != image['Id']: self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) differing.append(container) continue # COMMAND expected_command = self.module.params.get('command') if expected_command: expected_command = shlex.split(expected_command) actual_command = container["Config"]["Cmd"] if actual_command != expected_command: self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) differing.append(container) continue # EXPOSED PORTS expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys()) for p in (self.exposed_ports or []): expected_exposed_ports.add("/".join(p)) actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys()) if actually_exposed_ports != expected_exposed_ports: self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) differing.append(container) continue # VOLUMES expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) if self.volumes: expected_volume_keys.update(self.volumes.keys()) actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) if actual_volume_keys != expected_volume_keys: self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) differing.append(container) continue # MEM_LIMIT expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) actual_mem = container['Config']['Memory'] if expected_mem and actual_mem != expected_mem: self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) differing.append(container) continue # ENVIRONMENT # actual_env is likely to include environment variables injected by # the Dockerfile. expected_env = {} for image_env in image['ContainerConfig']['Env'] or []: name, value = image_env.split('=', 1) expected_env[name] = value if self.env: for name, value in self.env.iteritems(): expected_env[name] = str(value) actual_env = {} for container_env in container['Config']['Env'] or []: name, value = container_env.split('=', 1) actual_env[name] = value if actual_env != expected_env: # Don't include the environment difference in the output. self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) differing.append(container) continue # HOSTNAME expected_hostname = self.module.params.get('hostname') actual_hostname = container['Config']['Hostname'] if expected_hostname and actual_hostname != expected_hostname: self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) differing.append(container) continue # DOMAINNAME expected_domainname = self.module.params.get('domainname') actual_domainname = container['Config']['Domainname'] if expected_domainname and actual_domainname != expected_domainname: self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) differing.append(container) continue # DETACH # We don't have to check for undetached containers. If it wasn't # detached, it would have stopped before the playbook continued! # NAME # We also don't have to check name, because this is one of the # criteria that's used to determine which container(s) match in # the first place. # STDIN_OPEN expected_stdin_open = self.module.params.get('stdin_open') actual_stdin_open = container['Config']['AttachStdin'] if actual_stdin_open != expected_stdin_open: self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) differing.append(container) continue # TTY expected_tty = self.module.params.get('tty') actual_tty = container['Config']['Tty'] if actual_tty != expected_tty: self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) differing.append(container) continue # -- "start" call differences -- # LXC_CONF if self.lxc_conf: expected_lxc = set(self.lxc_conf) actual_lxc = set(container['HostConfig']['LxcConf'] or []) if actual_lxc != expected_lxc: self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) differing.append(container) continue # BINDS expected_binds = set() if self.binds: for host_path, config in self.binds.iteritems(): if isinstance(config, dict): container_path = config['bind'] if config['ro']: mode = 'ro' else: mode = 'rw' else: container_path = config mode = 'rw' expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) actual_binds = set() for bind in (container['HostConfig']['Binds'] or []): if len(bind.split(':')) == 2: actual_binds.add(bind + ":rw") else: actual_binds.add(bind) if actual_binds != expected_binds: self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) differing.append(container) continue # PORT BINDINGS expected_bound_ports = {} if self.port_bindings: for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): container_port = "{0}/tcp".format(container_port) bind = {} if len(config) == 1: bind['HostIp'] = "0.0.0.0" bind['HostPort'] = "" else: bind['HostIp'] = config[0] bind['HostPort'] = str(config[1]) expected_bound_ports[container_port] = [bind] actual_bound_ports = container['HostConfig']['PortBindings'] or {} if actual_bound_ports != expected_bound_ports: self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) differing.append(container) continue # PUBLISHING ALL PORTS # What we really care about is the set of ports that is actually # published. That should be caught above. # PRIVILEGED expected_privileged = self.module.params.get('privileged') actual_privileged = container['HostConfig']['Privileged'] if actual_privileged != expected_privileged: self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) differing.append(container) continue # LINKS expected_links = set() for link, alias in (self.links or {}).iteritems(): expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) actual_links = set(container['HostConfig']['Links'] or []) if actual_links != expected_links: self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) differing.append(container) continue # NETWORK MODE expected_netmode = self.module.params.get('net') or '' actual_netmode = container['HostConfig']['NetworkMode'] if actual_netmode != expected_netmode: self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) continue # DNS expected_dns = set(self.module.params.get('dns') or []) actual_dns = set(container['HostConfig']['Dns'] or []) if actual_dns != expected_dns: self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) differing.append(container) continue # VOLUMES_FROM expected_volumes_from = set(self.module.params.get('volumes_from') or []) actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) if actual_volumes_from != expected_volumes_from: self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) differing.append(container) return differing def get_deployed_containers(self): """ Return any matching containers that are already present. """ command = self.module.params.get('command') if command: command = command.strip() name = self.module.params.get('name') if name and not name.startswith('/'): name = '/' + name deployed = [] # "images" will be a collection of equivalent "name:tag" image names # that map to the same Docker image. inspected = self.get_inspect_image() if inspected: repo_tags = self.get_image_repo_tags() else: repo_tags = [normalize_image(self.module.params.get('image'))] for i in self.client.containers(all=True): details = None if name: matches = name in i.get('Names', []) else: details = self.client.inspect_container(i['Id']) details = _docker_id_quirk(details) running_image = normalize_image(details['Config']['Image']) running_command = i['Command'].strip() image_matches = running_image in repo_tags # if a container has an entrypoint, `command` will actually equal # '{} {}'.format(entrypoint, command) command_matches = (not command or running_command.endswith(command)) matches = image_matches and command_matches if matches: if not details: details = self.client.inspect_container(i['Id']) details = _docker_id_quirk(details) deployed.append(details) return deployed def get_running_containers(self): return [c for c in self.get_deployed_containers() if is_running(c)] def pull_image(self): extra_params = {} if self.module.params.get('insecure_registry'): if self.ensure_capability('insecure_registry', fail=False): extra_params['insecure_registry'] = self.module.params.get('insecure_registry') resource = self.module.params.get('image') image, tag = get_split_image_tag(resource) if self.module.params.get('username'): try: self.client.login( self.module.params.get('username'), password=self.module.params.get('password'), email=self.module.params.get('email'), registry=self.module.params.get('registry') ) except Exception as e: self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) try: changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params)) try: last = changes[-1] except IndexError: last = '{}' status = json.loads(last).get('status', '') if status.startswith('Status: Image is up to date for'): # Image is already up to date. Don't increment the counter. pass elif (status.startswith('Status: Downloaded newer image for') or status.startswith('Download complete')): # Image was updated. Increment the pull counter. self.increment_counter('pulled') else: # Unrecognized status string. self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes) except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) def create_containers(self, count=1): params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), 'environment': self.env, 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), 'detach': self.module.params.get('detach'), 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), } def do_create(count, params): results = [] for _ in range(count): result = self.client.create_container(**params) self.increment_counter('created') results.append(result) return results try: containers = do_create(count, params) except: self.pull_image() containers = do_create(count, params) return containers def start_containers(self, containers): params = { 'lxc_conf': self.lxc_conf, 'binds': self.binds, 'port_bindings': self.port_bindings, 'publish_all_ports': self.module.params.get('publish_all_ports'), 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), } optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry', 'pid'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: self.ensure_capability('dns') params['dns'] = optionals['dns'] if optionals['volumes_from'] is not None: self.ensure_capability('volumes_from') params['volumes_from'] = optionals['volumes_from'] if optionals['restart_policy'] is not None: self.ensure_capability('restart_policy') params['restart_policy'] = { 'Name': optionals['restart_policy'] } if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] for i in containers: self.client.start(i['Id'], **params) self.increment_counter('started') def stop_containers(self, containers): for i in containers: self.client.stop(i['Id']) self.increment_counter('stopped') return [self.client.wait(i['Id']) for i in containers] def remove_containers(self, containers): for i in containers: self.client.remove_container(i['Id']) self.increment_counter('removed') def kill_containers(self, containers): for i in containers: self.client.kill(i['Id']) self.increment_counter('killed') def restart_containers(self, containers): for i in containers: self.client.restart(i['Id']) self.increment_counter('restarted') class ContainerSet: def __init__(self, manager): self.manager = manager self.running = [] self.deployed = [] self.changed = [] def refresh(self): ''' Update our view of the matching containers from the Docker daemon. ''' self.deployed = self.manager.get_deployed_containers() self.running = [c for c in self.deployed if is_running(c)] def notice_changed(self, containers): ''' Record a collection of containers as "changed". ''' self.changed.extend(containers) def present(manager, containers, count, name): '''Ensure that exactly `count` matching containers exist in any state.''' containers.refresh() delta = count - len(containers.deployed) if delta > 0: containers.notice_changed(manager.create_containers(delta)) if delta < 0: # If both running and stopped containers exist, remove # stopped containers first. containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) to_stop = [] to_remove = [] for c in containers.deployed[0:-delta]: if is_running(c): to_stop.append(c) to_remove.append(c) manager.stop_containers(to_stop) manager.remove_containers(to_remove) containers.notice_changed(to_remove) def started(manager, containers, count, name): '''Ensure that exactly `count` matching containers exist and are running.''' containers.refresh() delta = count - len(containers.running) if delta > 0: if name and containers.deployed: # A stopped container exists with the requested name. # Clean it up before attempting to start a new one. manager.remove_containers(containers.deployed) created = manager.create_containers(delta) manager.start_containers(created) containers.notice_changed(created) if delta < 0: excess = containers.running[0:-delta] manager.stop_containers(excess) manager.remove_containers(excess) containers.notice_changed(excess) def reloaded(manager, containers, count, name): ''' Ensure that exactly `count` matching containers exist and are running. If any associated settings have been changed (volumes, ports or so on), restart those containers. ''' containers.refresh() for container in manager.get_differing_containers(): manager.stop_containers([container]) manager.remove_containers([container]) started(manager, containers, count, name) def restarted(manager, containers, count, name): ''' Ensure that exactly `count` matching containers exist and are running. Unconditionally restart any that were already running. ''' containers.refresh() manager.restart_containers(containers.running) started(manager, containers, count, name) def stopped(manager, containers, count, name): '''Stop any matching containers that are running.''' containers.refresh() manager.stop_containers(containers.running) containers.notice_changed(containers.running) def killed(manager, containers, count, name): '''Kill any matching containers that are running.''' containers.refresh() manager.kill_containers(containers.running) containers.notice_changed(containers.running) def absent(manager, containers, count, name): '''Stop and remove any matching containers.''' containers.refresh() manager.stop_containers(containers.running) manager.remove_containers(containers.deployed) containers.notice_changed(containers.deployed) def main(): module = AnsibleModule( argument_spec = dict( count = dict(default=1), image = dict(required=True), pull = dict(required=False, default='missing', choices=['missing', 'always']), command = dict(required=False, default=None), expose = dict(required=False, default=None, type='list'), ports = dict(required=False, default=None, type='list'), publish_all_ports = dict(default=False, type='bool'), volumes = dict(default=None, type='list'), volumes_from = dict(default=None), links = dict(default=None, type='list'), memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(), use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), tls_client_cert = dict(required=False, default=None, type='str'), tls_client_key = dict(required=False, default=None, type='str'), tls_ca_cert = dict(required=False, default=None, type='str'), tls_hostname = dict(required=False, type='str', default=None), docker_api_version = dict(), username = dict(default=None), password = dict(), email = dict(), registry = dict(), hostname = dict(default=None), domainname = dict(default=None), env = dict(type='dict'), dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), stdin_open = dict(default=False, type='bool'), tty = dict(default=False, type='bool'), lxc_conf = dict(default=None, type='list'), name = dict(default=None), net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], ), ) check_dependencies(module) try: manager = DockerManager(module) count = int(module.params.get('count')) name = module.params.get('name') pull = module.params.get('pull') state = module.params.get('state') if state == 'running': # Renamed running to started in 1.9 state = 'started' if count < 0: module.fail_json(msg="Count must be greater than zero") if count > 1 and name: module.fail_json(msg="Count and name must not be used together") # Explicitly pull new container images, if requested. # Do this before noticing running and deployed containers so that the image names will differ # if a newer image has been pulled. if pull == "always": manager.pull_image() containers = ContainerSet(manager) if state == 'present': present(manager, containers, count, name) elif state == 'started': started(manager, containers, count, name) elif state == 'reloaded': reloaded(manager, containers, count, name) elif state == 'restarted': restarted(manager, containers, count, name) elif state == 'stopped': stopped(manager, containers, count, name) elif state == 'killed': killed(manager, containers, count, name) elif state == 'absent': absent(manager, containers, count, name) else: module.fail_json(msg='Unrecognized state %s. Must be one of: ' 'present; started; reloaded; restarted; ' 'stopped; killed; absent.' % state) module.exit_json(changed=manager.has_changed(), msg=manager.get_summary_message(), summary=manager.counters, containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), ansible_facts=_ansible_facts(containers.changed)) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) except RequestException as e: module.fail_json(changed=manager.has_changed(), msg=repr(e)) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
kumarkrishna/sympy
sympy/sets/conditionset.py
2
1062
from __future__ import print_function, division from sympy.core.basic import Basic from sympy.logic.boolalg import And from sympy.sets.sets import (Set, Interval, Intersection, EmptySet, Union, FiniteSet) class ConditionSet(Set): """ Set of elements which satisfies a given condition. {x | condition(x) is True for x in S} Examples ======== >>> from sympy import Symbol, S, ConditionSet, Lambda, pi, Eq, sin, Interval >>> x = Symbol('x') >>> sin_sols = ConditionSet(Lambda(x, Eq(sin(x), 0)), Interval(0, 2*pi)) >>> 2*pi in sin_sols True >>> pi/2 in sin_sols False >>> 3*pi in sin_sols False >>> 5 in ConditionSet(Lambda(x, x**2 > 4), S.Reals) True """ def __new__(cls, condition, base_set): return Basic.__new__(cls, condition, base_set) condition = property(lambda self: self.args[0]) base_set = property(lambda self: self.args[1]) def contains(self, other): return And(self.condition(other), self.base_set.contains(other))
bsd-3-clause
shownomercy/django
tests/middleware/test_security.py
291
7781
from django.http import HttpResponse from django.test import RequestFactory, SimpleTestCase from django.test.utils import override_settings class SecurityMiddlewareTest(SimpleTestCase): @property def middleware(self): from django.middleware.security import SecurityMiddleware return SecurityMiddleware() @property def secure_request_kwargs(self): return {"wsgi.url_scheme": "https"} def response(self, *args, **kwargs): headers = kwargs.pop("headers", {}) response = HttpResponse(*args, **kwargs) for k, v in headers.items(): response[k] = v return response def process_response(self, *args, **kwargs): request_kwargs = {} if kwargs.pop("secure", False): request_kwargs.update(self.secure_request_kwargs) request = (kwargs.pop("request", None) or self.request.get("/some/url", **request_kwargs)) ret = self.middleware.process_request(request) if ret: return ret return self.middleware.process_response( request, self.response(*args, **kwargs)) request = RequestFactory() def process_request(self, method, *args, **kwargs): if kwargs.pop("secure", False): kwargs.update(self.secure_request_kwargs) req = getattr(self.request, method.lower())(*args, **kwargs) return self.middleware.process_request(req) @override_settings(SECURE_HSTS_SECONDS=3600) def test_sts_on(self): """ With HSTS_SECONDS=3600, the middleware adds "strict-transport-security: max-age=3600" to the response. """ self.assertEqual( self.process_response(secure=True)["strict-transport-security"], "max-age=3600") @override_settings(SECURE_HSTS_SECONDS=3600) def test_sts_already_present(self): """ The middleware will not override a "strict-transport-security" header already present in the response. """ response = self.process_response( secure=True, headers={"strict-transport-security": "max-age=7200"}) self.assertEqual(response["strict-transport-security"], "max-age=7200") @override_settings(HSTS_SECONDS=3600) def test_sts_only_if_secure(self): """ The "strict-transport-security" header is not added to responses going over an insecure connection. """ self.assertNotIn("strict-transport-security", self.process_response(secure=False)) @override_settings(HSTS_SECONDS=0) def test_sts_off(self): """ With HSTS_SECONDS of 0, the middleware does not add a "strict-transport-security" header to the response. """ self.assertNotIn("strict-transport-security", self.process_response(secure=True)) @override_settings( SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True) def test_sts_include_subdomains(self): """ With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS True, the middleware adds a "strict-transport-security" header with the "includeSubDomains" tag to the response. """ response = self.process_response(secure=True) self.assertEqual( response["strict-transport-security"], "max-age=600; includeSubDomains", ) @override_settings( SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False) def test_sts_no_include_subdomains(self): """ With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS False, the middleware adds a "strict-transport-security" header without the "includeSubDomains" tag to the response. """ response = self.process_response(secure=True) self.assertEqual(response["strict-transport-security"], "max-age=600") @override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True) def test_content_type_on(self): """ With CONTENT_TYPE_NOSNIFF set to True, the middleware adds "x-content-type-options: nosniff" header to the response. """ self.assertEqual(self.process_response()["x-content-type-options"], "nosniff") @override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True) def test_content_type_already_present(self): """ The middleware will not override an "x-content-type-options" header already present in the response. """ response = self.process_response(secure=True, headers={"x-content-type-options": "foo"}) self.assertEqual(response["x-content-type-options"], "foo") @override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False) def test_content_type_off(self): """ With CONTENT_TYPE_NOSNIFF False, the middleware does not add an "x-content-type-options" header to the response. """ self.assertNotIn("x-content-type-options", self.process_response()) @override_settings(SECURE_BROWSER_XSS_FILTER=True) def test_xss_filter_on(self): """ With BROWSER_XSS_FILTER set to True, the middleware adds "s-xss-protection: 1; mode=block" header to the response. """ self.assertEqual( self.process_response()["x-xss-protection"], "1; mode=block") @override_settings(SECURE_BROWSER_XSS_FILTER=True) def test_xss_filter_already_present(self): """ The middleware will not override an "x-xss-protection" header already present in the response. """ response = self.process_response(secure=True, headers={"x-xss-protection": "foo"}) self.assertEqual(response["x-xss-protection"], "foo") @override_settings(BROWSER_XSS_FILTER=False) def test_xss_filter_off(self): """ With BROWSER_XSS_FILTER set to False, the middleware does not add an "x-xss-protection" header to the response. """ self.assertNotIn("x-xss-protection", self.process_response()) @override_settings(SECURE_SSL_REDIRECT=True) def test_ssl_redirect_on(self): """ With SSL_REDIRECT True, the middleware redirects any non-secure requests to the https:// version of the same URL. """ ret = self.process_request("get", "/some/url?query=string") self.assertEqual(ret.status_code, 301) self.assertEqual( ret["Location"], "https://testserver/some/url?query=string") @override_settings(SECURE_SSL_REDIRECT=True) def test_no_redirect_ssl(self): """ The middleware does not redirect secure requests. """ ret = self.process_request("get", "/some/url", secure=True) self.assertEqual(ret, None) @override_settings( SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"]) def test_redirect_exempt(self): """ The middleware does not redirect requests with URL path matching an exempt pattern. """ ret = self.process_request("get", "/insecure/page") self.assertEqual(ret, None) @override_settings( SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com") def test_redirect_ssl_host(self): """ The middleware redirects to SSL_HOST if given. """ ret = self.process_request("get", "/some/url") self.assertEqual(ret.status_code, 301) self.assertEqual(ret["Location"], "https://secure.example.com/some/url") @override_settings(SECURE_SSL_REDIRECT=False) def test_ssl_redirect_off(self): """ With SSL_REDIRECT False, the middleware does no redirect. """ ret = self.process_request("get", "/some/url") self.assertEqual(ret, None)
bsd-3-clause
kifcaliph/odoo
addons/point_of_sale/report/pos_invoice.py
317
2393
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv from openerp.tools.translate import _ class PosInvoiceReport(osv.AbstractModel): _name = 'report.point_of_sale.report_invoice' def render_html(self, cr, uid, ids, data=None, context=None): report_obj = self.pool['report'] posorder_obj = self.pool['pos.order'] report = report_obj._get_report_from_name(cr, uid, 'account.report_invoice') selected_orders = posorder_obj.browse(cr, uid, ids, context=context) ids_to_print = [] invoiced_posorders_ids = [] for order in selected_orders: if order.invoice_id: ids_to_print.append(order.invoice_id.id) invoiced_posorders_ids.append(order.id) not_invoiced_orders_ids = list(set(ids) - set(invoiced_posorders_ids)) if not_invoiced_orders_ids: not_invoiced_posorders = posorder_obj.browse(cr, uid, not_invoiced_orders_ids, context=context) not_invoiced_orders_names = list(map(lambda a: a.name, not_invoiced_posorders)) raise osv.except_osv(_('Error!'), _('No link to an invoice for %s.' % ', '.join(not_invoiced_orders_names))) docargs = { 'doc_ids': ids_to_print, 'doc_model': report.model, 'docs': selected_orders, } return report_obj.render(cr, uid, ids, 'account.report_invoice', docargs, context=context)
agpl-3.0
mrfuxi/docker-py
docker/utils/types.py
43
2304
import six class LogConfigTypesEnum(object): _values = ( 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'none' ) JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values class DictType(dict): def __init__(self, init): for k, v in six.iteritems(init): self[k] = v class LogConfig(DictType): types = LogConfigTypesEnum def __init__(self, **kwargs): log_driver_type = kwargs.get('type', kwargs.get('Type')) config = kwargs.get('config', kwargs.get('Config')) or {} if config and not isinstance(config, dict): raise ValueError("LogConfig.config must be a dictionary") super(LogConfig, self).__init__({ 'Type': log_driver_type, 'Config': config }) @property def type(self): return self['Type'] @type.setter def type(self, value): self['Type'] = value @property def config(self): return self['Config'] def set_config_value(self, key, value): self.config[key] = value def unset_config(self, key): if key in self.config: del self.config[key] class Ulimit(DictType): def __init__(self, **kwargs): name = kwargs.get('name', kwargs.get('Name')) soft = kwargs.get('soft', kwargs.get('Soft')) hard = kwargs.get('hard', kwargs.get('Hard')) if not isinstance(name, six.string_types): raise ValueError("Ulimit.name must be a string") if soft and not isinstance(soft, int): raise ValueError("Ulimit.soft must be an integer") if hard and not isinstance(hard, int): raise ValueError("Ulimit.hard must be an integer") super(Ulimit, self).__init__({ 'Name': name, 'Soft': soft, 'Hard': hard }) @property def name(self): return self['Name'] @name.setter def name(self, value): self['Name'] = value @property def soft(self): return self.get('Soft') @soft.setter def soft(self, value): self['Soft'] = value @property def hard(self): return self.get('Hard') @hard.setter def hard(self, value): self['Hard'] = value
apache-2.0
abdellatifkarroum/odoo
openerp/conf/deprecation.py
380
2602
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Regroup variables for deprecated features. To keep the OpenERP server backward compatible with older modules, some additional code is needed throughout the core library. This module keeps track of those specific measures by providing variables that can be unset by the user to check if her code is future proof. In a perfect world, all these variables are set to False, the corresponding code removed, and thus these variables made unnecessary. """ # If True, the Python modules inside the openerp namespace are made available # without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the # same module. # Introduced around 2011.02. # Change to False around 2013.02. open_openerp_namespace = False # If True, openerp.netsvc.LocalService() can be used to lookup reports or to # access openerp.workflow. # Introduced around 2013.03. # Among the related code: # - The openerp.netsvc.LocalService() function. # - The openerp.report.interface.report_int._reports dictionary. # - The register attribute in openerp.report.interface.report_int (and in its # - auto column in ir.actions.report.xml. # inheriting classes). allow_local_service = True # Applies for the register attribute in openerp.report.interface.report_int. # See comments for allow_local_service above. # Introduced around 2013.03. allow_report_int_registration = True # If True, the functions in openerp.pooler can be used. # Introduced around 2013.03 (actually they are deprecated since much longer # but no warning was dispayed in the logs). openerp_pooler = True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ahmadpriatama/Flask-Simple-Ecommerce
appname/controllers/main.py
1
1053
from flask import Blueprint, render_template, flash, request, redirect, url_for from flask.ext.login import login_user, logout_user, login_required from appname.extensions import cache from appname.forms import LoginForm from appname.models import User main = Blueprint('main', __name__) @main.route('/') @cache.cached(timeout=1000) def home(): return render_template('index.html') @main.route("/login", methods=["GET", "POST"]) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).one() login_user(user) flash("Logged in successfully.", "success") return redirect(request.args.get("next") or url_for(".home")) return render_template("login.html", form=form) @main.route("/logout") def logout(): logout_user() flash("You have been logged out.", "success") return redirect(url_for(".home")) @main.route("/restricted") @login_required def restricted(): return "You can only see this if you are logged in!", 200
bsd-2-clause
Julien-Blanc/djangocms-cascade
cmsplugin_cascade/link/plugin_base.py
3
3089
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db.models import get_model from django.forms import widgets from django.utils.translation import ugettext_lazy as _ from django.utils.safestring import mark_safe from cms.utils.compat.dj import python_2_unicode_compatible from cmsplugin_cascade.fields import PartialFormField from cmsplugin_cascade.plugin_base import CascadePluginBase from cmsplugin_cascade.utils import resolve_dependencies from .forms import LinkForm class LinkPluginBase(CascadePluginBase): text_enabled = True allow_children = False parent_classes = [] require_parent = False glossary_fields = ( PartialFormField('target', widgets.RadioSelect(choices=(('', _("Same Window")), ('_blank', _("New Window")), ('_parent', _("Parent Window")), ('_top', _("Topmost Frame")),)), initial='', label=_("Link Target"), help_text=_("Open Link in other target.") ), PartialFormField('title', widgets.TextInput(), label=_("Title"), help_text=_("Link's Title") ), ) html_tag_attributes = {'title': 'title', 'target': 'target'} # map field from glossary to these form fields glossary_field_map = {'link': ('link_type', 'cms_page', 'ext_url', 'mail_to',)} class Media: js = resolve_dependencies('cascade/js/admin/linkpluginbase.js') @classmethod def get_link(cls, obj): link = obj.glossary.get('link', {}) linktype = link.get('type') if linktype == 'exturl': return '{url}'.format(**link) if linktype == 'email': return 'mailto:{email}'.format(**link) # otherwise try to resolve by model if 'model' in link and 'pk' in link: if not hasattr(obj, '_link_model'): Model = get_model(*link['model'].split('.')) try: obj._link_model = Model.objects.get(pk=link['pk']) except Model.DoesNotExist: obj._link_model = None if obj._link_model: return obj._link_model.get_absolute_url() def get_ring_bases(self): bases = super(LinkPluginBase, self).get_ring_bases() bases.append('LinkPluginBase') return bases def get_form(self, request, obj=None, **kwargs): kwargs.setdefault('form', LinkForm.get_form_class()) return super(LinkPluginBase, self).get_form(request, obj, **kwargs) @python_2_unicode_compatible class LinkElementMixin(object): """ A mixin class to convert a CascadeElement into a proxy model for rendering the ``<a>`` element. Please note that a Link inside the Text Editor Plugin is rendered as `str(instance)` rather than `instance.content`. """ def __str__(self): return self.content @property def link(self): return self.plugin_class.get_link(self) @property def content(self): return mark_safe(self.glossary.get('link_content', ''))
mit
artscoop/django-treemenus-plus
treemenusplus/templatetags/tree_menu_tags.py
1
2416
import django from django import template from django.template.defaulttags import url from django.template import Node, TemplateSyntaxError from treemenusplus.models import Menu, MenuItem from treemenusplus.config import APP_LABEL register = template.Library() @register.simple_tag def get_treemenus_static_prefix(): if django.VERSION >= (1, 3): from django.templatetags.static import PrefixNode return PrefixNode.handle_simple("STATIC_URL") + 'img/treemenusplus' else: from django.contrib.admin.templatetags.adminmedia import admin_media_prefix return admin_media_prefix() + 'img/admin/' def show_menu(context, menu_name, menu_type=None): menu = Menu.objects.get(name=menu_name) context['menu'] = menu context['menu_name'] = menu_name if menu_type: context['menu_type'] = menu_type return context register.inclusion_tag('%s/menu.html' % APP_LABEL, takes_context=True)(show_menu) def show_menu_item(context, menu_item): if not isinstance(menu_item, MenuItem): raise template.TemplateSyntaxError('Given argument must be a MenuItem object.') context['menu_item'] = menu_item return context register.inclusion_tag('%s/menu_item.html' % APP_LABEL, takes_context=True)(show_menu_item) class ReverseNamedURLNode(Node): def __init__(self, named_url, parser): self.named_url = named_url self.parser = parser def render(self, context): from django.template.base import TOKEN_BLOCK, Token resolved_named_url = self.named_url.resolve(context) # edit hts SpectralAngel if django.VERSION >= (1, 3): tokens = resolved_named_url.split(' ') base = tokens[0] args = tokens[1:] contents = u'url "{0}" {1}'.format(base, ' '.join(args)) else: contents = u'url {0}'.format(resolved_named_url) ## edit urlNode = url(self.parser, Token(token_type=TOKEN_BLOCK, contents=contents)) return urlNode.render(context) def reverse_named_url(parser, token): bits = token.contents.split(' ', 2) if len(bits) != 2: raise TemplateSyntaxError("'%s' takes only one argument" " (named url)" % bits[0]) named_url = parser.compile_filter(bits[1]) return ReverseNamedURLNode(named_url, parser) reverse_named_url = register.tag(reverse_named_url)
bsd-3-clause
theanalyst/cinder
cinder/api/versions.py
3
8253
# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from lxml import etree from oslo.config import cfg from cinder.api.openstack import wsgi from cinder.api.views import versions as views_versions from cinder.api import xmlutil CONF = cfg.CONF _KNOWN_VERSIONS = { "v2.0": { "id": "v2.0", "status": "CURRENT", "updated": "2012-11-21T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.volume+xml;version=1", }, { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1", } ], }, "v1.0": { "id": "v1.0", "status": "CURRENT", "updated": "2012-01-04T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/", }, ], "media-types": [ { "base": "application/xml", "type": "application/vnd.openstack.volume+xml;version=1", }, { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1", } ], } } def get_supported_versions(): versions = {} if CONF.enable_v1_api: versions['v1.0'] = _KNOWN_VERSIONS['v1.0'] if CONF.enable_v2_api: versions['v2.0'] = _KNOWN_VERSIONS['v2.0'] return versions class MediaTypesTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return 'media-types' in datum def make_version(elem): elem.set('id') elem.set('status') elem.set('updated') mts = MediaTypesTemplateElement('media-types') elem.append(mts) mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') mt.set('base') mt.set('type') xmlutil.make_links(elem, 'links') version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} class VersionTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('version', selector='version') make_version(root) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class VersionsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('versions') elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class ChoicesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('choices') elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') make_version(elem) return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) class AtomSerializer(wsgi.XMLDictSerializer): NSMAP = {None: xmlutil.XMLNS_ATOM} def __init__(self, metadata=None, xmlns=None): self.metadata = metadata or {} if not xmlns: self.xmlns = wsgi.XMLNS_ATOM else: self.xmlns = xmlns def _get_most_recent_update(self, versions): recent = None for version in versions: updated = datetime.datetime.strptime(version['updated'], '%Y-%m-%dT%H:%M:%SZ') if not recent: recent = updated elif updated > recent: recent = updated return recent.strftime('%Y-%m-%dT%H:%M:%SZ') def _get_base_url(self, link_href): # Make sure no trailing / link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' def _create_feed(self, versions, feed_title, feed_id): feed = etree.Element('feed', nsmap=self.NSMAP) title = etree.SubElement(feed, 'title') title.set('type', 'text') title.text = feed_title # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) etree.SubElement(feed, 'updated').text = recent etree.SubElement(feed, 'id').text = feed_id link = etree.SubElement(feed, 'link') link.set('rel', 'self') link.set('href', feed_id) author = etree.SubElement(feed, 'author') etree.SubElement(author, 'name').text = 'Rackspace' etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' for version in versions: feed.append(self._create_version_entry(version)) return feed def _create_version_entry(self, version): entry = etree.Element('entry') etree.SubElement(entry, 'id').text = version['links'][0]['href'] title = etree.SubElement(entry, 'title') title.set('type', 'text') title.text = 'Version %s' % version['id'] etree.SubElement(entry, 'updated').text = version['updated'] for link in version['links']: link_elem = etree.SubElement(entry, 'link') link_elem.set('rel', link['rel']) link_elem.set('href', link['href']) if 'type' in link: link_elem.set('type', link['type']) content = etree.SubElement(entry, 'content') content.set('type', 'text') content.text = 'Version %s %s (%s)' % (version['id'], version['status'], version['updated']) return entry class VersionsAtomSerializer(AtomSerializer): def default(self, data): versions = data['versions'] feed_id = self._get_base_url(versions[0]['links'][0]['href']) feed = self._create_feed(versions, 'Available API Versions', feed_id) return self._to_xml(feed) class VersionAtomSerializer(AtomSerializer): def default(self, data): version = data['version'] feed_id = version['links'][0]['href'] feed = self._create_feed([version], 'About This Version', feed_id) return self._to_xml(feed) class Versions(wsgi.Resource): def __init__(self): super(Versions, self).__init__(None) @wsgi.serializers(xml=VersionsTemplate, atom=VersionsAtomSerializer) def index(self, req): """Return all versions.""" builder = views_versions.get_view_builder(req) return builder.build_versions(get_supported_versions()) @wsgi.serializers(xml=ChoicesTemplate) @wsgi.response(300) def multi(self, req): """Return multiple choices.""" builder = views_versions.get_view_builder(req) return builder.build_choices(get_supported_versions(), req) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" args = {} if request_environment['PATH_INFO'] == '/': args['action'] = 'index' else: args['action'] = 'multi' return args class VolumeVersionV1(object): @wsgi.serializers(xml=VersionTemplate, atom=VersionAtomSerializer) def show(self, req): builder = views_versions.get_view_builder(req) return builder.build_version(_KNOWN_VERSIONS['v1.0']) def create_resource(): return wsgi.Resource(VolumeVersionV1())
apache-2.0
marc-sensenich/ansible
lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py
29
9626
#!/usr/bin/python # # Copyright (c) 2017 Julien Stroheker, <juliens@microsoft.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset version_added: "2.4" short_description: Manage Azure availability set. description: - Create, update and delete Azure availability set. An availability set cannot be updated, you will have to recreate one instead. The only update operation will be for the tags. options: resource_group: description: - Name of a resource group where the availability set exists or will be created. required: true name: description: - Name of the availability set. required: true state: description: - Assert the state of the availability set. Use 'present' to create or update a availability set and 'absent' to delete a availability set. default: present choices: - absent - present location: description: - Valid azure location. Defaults to location of the resource group. platform_update_domain_count: description: - Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time. Default is 5. default: 5 platform_fault_domain_count: description: - Fault domains define the group of virtual machines that share a common power source and network switch. Should be between 1 and 3. Default is 3 default: 3 sku: description: - Define if the availability set supports managed disks. default: Classic choices: - Classic - Aligned extends_documentation_fragment: - azure - azure_tags author: - "Julien Stroheker (@julienstroheker)" ''' EXAMPLES = ''' - name: Create an availability set with default options azure_rm_availabilityset: name: myavailabilityset location: eastus resource_group: Testing - name: Create an availability set with advanced options azure_rm_availabilityset: name: myavailabilityset location: eastus resource_group: Testing platform_update_domain_count: 5 platform_fault_domain_count: 3 sku: Aligned - name: Delete an availability set azure_rm_availabilityset: name: myavailabilityset location: eastus resource_group: Testing state: absent ''' RETURN = ''' state: description: Current state of the availability set returned: always type: dict changed: description: Whether or not the resource has changed returned: always type: bool ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except ImportError: # This is handled in azure_rm_common pass def availability_set_to_dict(avaset): ''' Serializing the availability set from the API to Dict :return: dict ''' return dict( id=avaset.id, name=avaset.name, location=avaset.location, platform_update_domain_count=avaset.platform_update_domain_count, platform_fault_domain_count=avaset.platform_fault_domain_count, tags=avaset.tags, sku=avaset.sku.name ) class AzureRMAvailabilitySet(AzureRMModuleBase): """Configuration class for an Azure RM availability set resource""" def __init__(self): self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), name=dict( type='str', required=True ), state=dict( type='str', default='present', choices=['present', 'absent'] ), location=dict( type='str' ), platform_update_domain_count=dict( type='int', default=5 ), platform_fault_domain_count=dict( type='int', default=3 ), sku=dict( type='str', default='Classic', choices=['Classic', 'Aligned'] ) ) self.resource_group = None self.name = None self.location = None self.tags = None self.platform_update_domain_count = None self.platform_fault_domain_count = None self.sku = None self.state = None self.warning = False self.results = dict(changed=False, state=dict()) super(AzureRMAvailabilitySet, self).__init__(derived_arg_spec=self.module_arg_spec, supports_check_mode=True, supports_tags=True) def exec_module(self, **kwargs): """Main module execution method""" for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) resource_group = None response = None to_be_updated = False resource_group = self.get_resource_group(self.resource_group) if not self.location: self.location = resource_group.location # Check if the AS already present in the RG if self.state == 'present': response = self.get_availabilityset() self.results['state'] = response if not response: to_be_updated = True else: update_tags, response['tags'] = self.update_tags(response['tags']) if update_tags: self.log("Tags has to be updated") to_be_updated = True if response['platform_update_domain_count'] != self.platform_update_domain_count: self.faildeploy('platform_update_domain_count') if response['platform_fault_domain_count'] != self.platform_fault_domain_count: self.faildeploy('platform_fault_domain_count') if response['sku'] != self.sku: self.faildeploy('sku') if self.check_mode: return self.results if to_be_updated: self.results['state'] = self.create_or_update_availabilityset() self.results['changed'] = True elif self.state == 'absent': self.delete_availabilityset() self.results['changed'] = True return self.results def faildeploy(self, param): ''' Helper method to push fail message in the console. Useful to notify that the users cannot change some values in a Availability Set :param: variable's name impacted :return: void ''' self.fail("You tried to change {0} but is was unsuccessful. An Availability Set is immutable, except tags".format(str(param))) def create_or_update_availabilityset(self): ''' Method calling the Azure SDK to create or update the AS. :return: void ''' self.log("Creating availabilityset {0}".format(self.name)) try: params_sku = self.compute_models.Sku( name=self.sku ) params = self.compute_models.AvailabilitySet( location=self.location, tags=self.tags, platform_update_domain_count=self.platform_update_domain_count, platform_fault_domain_count=self.platform_fault_domain_count, sku=params_sku ) response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params) except CloudError as e: self.log('Error attempting to create the availability set.') self.fail("Error creating the availability set: {0}".format(str(e))) return availability_set_to_dict(response) def delete_availabilityset(self): ''' Method calling the Azure SDK to delete the AS. :return: void ''' self.log("Deleting availabilityset {0}".format(self.name)) try: response = self.compute_client.availability_sets.delete(self.resource_group, self.name) except CloudError as e: self.log('Error attempting to delete the availability set.') self.fail("Error deleting the availability set: {0}".format(str(e))) return True def get_availabilityset(self): ''' Method calling the Azure SDK to get an AS. :return: void ''' self.log("Checking if the availabilityset {0} is present".format(self.name)) found = False try: response = self.compute_client.availability_sets.get(self.resource_group, self.name) found = True except CloudError as e: self.log('Did not find the Availability set.') if found is True: return availability_set_to_dict(response) else: return False def main(): """Main execution""" AzureRMAvailabilitySet() if __name__ == '__main__': main()
gpl-3.0
aplicatii-romanesti/allinclusive-kodi-pi
.kodi/addons/plugin.video.salts/scrapers/xmovies8_scraper.py
1
3356
""" SALTS XBMC Addon Copyright (C) 2014 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import scraper import urllib import urlparse import re import xbmcaddon from salts_lib import dom_parser from salts_lib.constants import VIDEO_TYPES BASE_URL = 'http://xmovies8.tv' class XMovies8_Scraper(scraper.Scraper): base_url = BASE_URL def __init__(self, timeout=scraper.DEFAULT_TIMEOUT): self.timeout = timeout self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name())) @classmethod def provides(cls): return frozenset([VIDEO_TYPES.MOVIE]) @classmethod def get_name(cls): return 'xmovies8' def resolve_link(self, link): return link def format_source_label(self, item): return '[%s] %s' % (item['quality'], item['host']) def get_sources(self, video): source_url = self.get_url(video) hosters = [] if source_url: url = urlparse.urljoin(self.base_url, source_url) html = self._http_get(url, cache_limit=.5) for match in re.finditer('href="([^"]+)[^>]*>(\d+)x(\d+)', html): stream_url, width, _ = match.groups() hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': self._width_get_quality(width), 'views': None, 'rating': None, 'url': stream_url, 'direct': True} hosters.append(hoster) return hosters def get_url(self, video): return super(XMovies8_Scraper, self)._default_get_url(video) def search(self, video_type, title, year): search_url = urlparse.urljoin(self.base_url, '/?s=%s' % urllib.quote_plus(title)) html = self._http_get(search_url, cache_limit=.25) results = [] for result in dom_parser.parse_dom(html, 'h2'): match = re.search('href="([^"]+)"[^>]*>([^<]+)', result) if match: url, match_title_year = match.groups() match = re.search('(.*?)\s+\((\d{4})\)', match_title_year) if match: match_title, match_year = match.groups() else: match_title = match_title_year match_year = '' if not year or not match_year or year == match_year: result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': match_year} results.append(result) return results def _http_get(self, url, cookies=None, data=None, cache_limit=8): return super(XMovies8_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cookies=cookies, data=data, cache_limit=cache_limit)
apache-2.0
KaranToor/MA450
google-cloud-sdk/.install/.backup/lib/third_party/oauth2client/contrib/django_util/signals.py
59
1050
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Signals for Google OAuth2 Helper. This module contains signals for Google OAuth2 Helper. Currently it only contains one, which fires when an OAuth2 authorization flow has completed. """ import django.dispatch """Signal that fires when OAuth2 Flow has completed. It passes the Django request object and the OAuth2 credentials object to the receiver. """ oauth2_authorized = django.dispatch.Signal( providing_args=["request", "credentials"])
apache-2.0
mick-d/nipype_source
nipype/workflows/dmri/connectivity/nx.py
15
6374
import nipype.pipeline.engine as pe import nipype.interfaces.utility as util import nipype.interfaces.cmtk as cmtk import nipype.algorithms.misc as misc from .group_connectivity import pullnodeIDs from nipype.algorithms.misc import remove_identical_paths def add_global_to_filename(in_file): from nipype.utils.filemanip import split_filename path, name, ext = split_filename(in_file) return name + '_global' + ext def add_nodal_to_filename(in_file): from nipype.utils.filemanip import split_filename path, name, ext = split_filename(in_file) return name + '_nodal' + ext def create_networkx_pipeline(name="networkx", extra_column_heading="subject"): """Creates a workflow to calculate various graph measures (via NetworkX) on an input network. The output measures are then converted to comma-separated value text files, and an extra column / field is also added. Typically, the user would connect the subject name to this field. Example ------- >>> from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline >>> nx = create_networkx_pipeline("networkx", "subject_id") >>> nx.inputs.inputnode.extra_field = 'subj1' >>> nx.inputs.inputnode.network_file = 'subj1.pck' >>> nx.run() # doctest: +SKIP Inputs:: inputnode.extra_field inputnode.network_file Outputs:: outputnode.network_files outputnode.csv_files outputnode.matlab_files """ inputnode = pe.Node(interface = util.IdentityInterface(fields=["extra_field", "network_file"]), name="inputnode") pipeline = pe.Workflow(name=name) ntwkMetrics = pe.Node(interface=cmtk.NetworkXMetrics(), name="NetworkXMetrics") Matlab2CSV_node = pe.Node(interface=misc.Matlab2CSV(), name="Matlab2CSV_node") MergeCSVFiles_node = pe.Node(interface=misc.MergeCSVFiles(), name="MergeCSVFiles_node") MergeCSVFiles_node.inputs.extra_column_heading = extra_column_heading Matlab2CSV_global = Matlab2CSV_node.clone(name="Matlab2CSV_global") MergeCSVFiles_global = MergeCSVFiles_node.clone(name="MergeCSVFiles_global") MergeCSVFiles_global.inputs.extra_column_heading = extra_column_heading mergeNetworks = pe.Node(interface=util.Merge(2), name="mergeNetworks") mergeCSVs = mergeNetworks.clone("mergeCSVs") pipeline.connect([(inputnode, ntwkMetrics,[("network_file","in_file")])]) pipeline.connect([(ntwkMetrics, Matlab2CSV_node,[("node_measures_matlab","in_file")])]) pipeline.connect([(ntwkMetrics, Matlab2CSV_global,[("global_measures_matlab","in_file")])]) pipeline.connect([(Matlab2CSV_node, MergeCSVFiles_node,[("csv_files","in_files")])]) pipeline.connect([(inputnode, MergeCSVFiles_node, [(("extra_field", add_nodal_to_filename), "out_file")])]) pipeline.connect([(inputnode, MergeCSVFiles_node,[("extra_field","extra_field")])]) pipeline.connect([(inputnode, MergeCSVFiles_node, [(("network_file", pullnodeIDs), "row_headings")])]) pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global,[("csv_files","in_files")])]) pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, [(("csv_files", remove_identical_paths), "column_headings")])]) #MergeCSVFiles_global.inputs.row_heading_title = 'metric' #MergeCSVFiles_global.inputs.column_headings = ['average'] pipeline.connect([(inputnode, MergeCSVFiles_global, [(("extra_field", add_global_to_filename), "out_file")])]) pipeline.connect([(inputnode, MergeCSVFiles_global,[("extra_field","extra_field")])]) pipeline.connect([(inputnode, mergeNetworks,[("network_file","in1")])]) pipeline.connect([(ntwkMetrics, mergeNetworks,[("gpickled_network_files","in2")])]) outputnode = pe.Node(interface = util.IdentityInterface(fields=["network_files", "csv_files", "matlab_files", "node_csv", "global_csv"]), name="outputnode") pipeline.connect([(MergeCSVFiles_node, outputnode, [("csv_file", "node_csv")])]) pipeline.connect([(MergeCSVFiles_global, outputnode, [("csv_file", "global_csv")])]) pipeline.connect([(MergeCSVFiles_node, mergeCSVs, [("csv_file", "in1")])]) pipeline.connect([(MergeCSVFiles_global, mergeCSVs, [("csv_file", "in2")])]) pipeline.connect([(mergeNetworks, outputnode, [("out", "network_files")])]) pipeline.connect([(mergeCSVs, outputnode, [("out", "csv_files")])]) pipeline.connect([(ntwkMetrics, outputnode,[("matlab_matrix_files", "matlab_files")])]) return pipeline def create_cmats_to_csv_pipeline(name="cmats_to_csv", extra_column_heading="subject"): """Creates a workflow to convert the outputs from CreateMatrix into a single comma-separated value text file. An extra column / field is also added to the text file. Typically, the user would connect the subject name to this field. Example ------- >>> from nipype.workflows.dmri.connectivity.nx import create_cmats_to_csv_pipeline >>> csv = create_cmats_to_csv_pipeline("cmats_to_csv", "subject_id") >>> csv.inputs.inputnode.extra_field = 'subj1' >>> csv.inputs.inputnode.matlab_matrix_files = ['subj1_cmatrix.mat', 'subj1_mean_fiber_length.mat', 'subj1_median_fiber_length.mat', 'subj1_fiber_length_std.mat'] >>> csv.run() # doctest: +SKIP Inputs:: inputnode.extra_field inputnode.matlab_matrix_files Outputs:: outputnode.csv_file """ inputnode = pe.Node(interface = util.IdentityInterface(fields=["extra_field", "matlab_matrix_files"]), name="inputnode") pipeline = pe.Workflow(name=name) Matlab2CSV = pe.MapNode(interface=misc.Matlab2CSV(), name="Matlab2CSV", iterfield=["in_file"]) MergeCSVFiles = pe.Node(interface=misc.MergeCSVFiles(), name="MergeCSVFiles") MergeCSVFiles.inputs.extra_column_heading = extra_column_heading pipeline.connect([(inputnode, Matlab2CSV,[("matlab_matrix_files","in_file")])]) pipeline.connect([(Matlab2CSV, MergeCSVFiles,[("csv_files","in_files")])]) pipeline.connect([(inputnode, MergeCSVFiles,[("extra_field","extra_field")])]) outputnode = pe.Node(interface = util.IdentityInterface(fields=["csv_file"]), name="outputnode") pipeline.connect([(MergeCSVFiles, outputnode, [("csv_file", "csv_file")])]) return pipeline
bsd-3-clause
40323155/2016springcd_aG6
static/plugin/liquid_tags/graphviz.py
245
3198
""" GraphViz Tag --------- This implements a Liquid-style graphviz tag for Pelican. You can use different Graphviz programs like dot, neato, twopi etc. [1] [1] http://www.graphviz.org/ Syntax ------ {% graphviz <program> { <DOT code> } %} Examples -------- {% graphviz dot { digraph graphname { a -> b -> c; b -> d; } } %} {% graphviz twopi { <code goes here> } %} {% graphviz neato { <code goes here> } %} ... Output ------ <div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div> """ import base64 import re from .mdx_liquid_tags import LiquidTags SYNTAX = '{% dot graphviz [program] [dot code] %}' DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL) def run_graphviz(program, code, options=[], format='png'): """ Runs graphviz programs and returns image data Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py """ import os from subprocess import Popen, PIPE dot_args = [program] + options + ['-T', format] if os.name == 'nt': # Avoid opening shell window. # * https://github.com/tkf/ipython-hierarchymagic/issues/1 # * http://stackoverflow.com/a/2935727/727827 p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000) else: p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) wentwrong = False try: # Graphviz may close standard input when an error occurs, # resulting in a broken pipe on communicate() stdout, stderr = p.communicate(code.encode('utf-8')) except (OSError, IOError) as err: if err.errno != EPIPE: raise wentwrong = True except IOError as err: if err.errno != EINVAL: raise wentwrong = True if wentwrong: # in this case, read the standard output and standard error streams # directly, to get the error message(s) stdout, stderr = p.stdout.read(), p.stderr.read() p.wait() if p.returncode != 0: raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8'))) return stdout @LiquidTags.register('graphviz') def graphviz_parser(preprocessor, tag, markup): """ Simple Graphviz parser """ # Parse the markup string m = DOT_BLOCK_RE.search(markup) if m: # Get program and DOT code code = m.group('code') program = m.group('program').strip() # Run specified program with our markup output = run_graphviz(program, code) # Return Base64 encoded image return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output) else: raise ValueError('Error processing input. ' 'Expected syntax: {0}'.format(SYNTAX)) #---------------------------------------------------------------------- # This import allows image tag to be a Pelican plugin from .liquid_tags import register
agpl-3.0
LIKAIMO/MissionPlanner
Lib/pydoc_data/topics.py
42
429921
# Autogenerated by Sphinx on Sat Jul 3 08:52:04 2010 topics = {'assert': u'\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets. (This rule is relaxed as of\n Python 1.5; in earlier versions, the object had to be a tuple.\n Since strings are sequences, an assignment like ``a, b = "xy"`` is\n now legal as long as the string has the right length.)\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n``AttributeError`` is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer (plain or long) and the other must be a sequence.\nIn the former case, the numbers are converted to a common type and\nthen multiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: ``x == (x/y)*y + (x%y)``. Integer division and\nmodulo are also connected with the built-in function ``divmod()``:\n``divmod(x, y) == (x/y, x%y)``. These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere ``x/y`` is replaced by ``floor(x/y)`` or ``floor(x/y) - 1`` [3].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\n*String Formatting Operations*.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the ``divmod()`` function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``func_code`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec`` statement or the built-in ``eval()``\nfunction.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see *Slicings*). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed ``Ellipsis`` (a built-in name).\n\nIt is written as ``Ellipsis``.\n', 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s ``stdio`` package and can be\ncreated with the built-in ``open()`` function. File objects are also\nreturned by some other built-in functions and methods, such as\n``os.popen()`` and ``os.fdopen()`` and the ``makefile()`` method of\nsocket objects. Temporary files can be created using the ``tempfile``\nmodule, and high-level file operations such as copying, moving, and\ndeleting files and directories can be achieved with the ``shutil``\nmodule.\n\nWhen a file operation fails for an I/O-related reason, the exception\n``IOError`` is raised. This includes situations where the operation\nis not defined for some reason, like ``seek()`` on a tty device or\nwriting a file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n ``ValueError`` after the file has been closed. Calling ``close()``\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the ``with`` statement. For example, the\n following code will automatically close *f* when the ``with`` block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a context\n manager for the ``with`` statement. If your code is intended to\n work with any file-like object, you can use the function\n ``contextlib.closing()`` instead of using the object directly.\n\nfile.flush()\n\n Flush the internal buffer, like ``stdio``\'s ``fflush()``. This may\n be a no-op on some file-like objects.\n\n Note: ``flush()`` does not necessarily write the file\'s data to disk.\n Use ``flush()`` followed by ``os.fsync()`` to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the ``fcntl`` module or ``os.read()`` and\n friends.\n\n Note: File-like objects which do not have a real file descriptor should\n *not* provide this method!\n\nfile.isatty()\n\n Return ``True`` if the file is connected to a tty(-like) device,\n else ``False``.\n\n Note: If a file-like object is not associated with a real file, this\n method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example ``iter(f)`` returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a ``for`` loop (for example, ``for line in f: print\n line``), the ``next()`` method is called repeatedly. This method\n returns the next input line, or raises ``StopIteration`` when EOF\n is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a ``for``\n loop the most efficient way of looping over the lines of a file (a\n very common operation), the ``next()`` method uses a hidden read-\n ahead buffer. As a consequence of using a read-ahead buffer,\n combining ``next()`` with other file methods (like ``readline()``)\n does not work right. However, using ``seek()`` to reposition the\n file to an absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function ``fread()`` more\n than once in an effort to acquire as close to *size* bytes as\n possible. Also note that when in non-blocking mode, less data than\n was requested may be returned, even if no *size* parameter was\n given.\n\n Note: This function is simply a wrapper for the underlying ``fread()``\n C function, and will behave the same in corner cases, such as\n whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [5] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. An empty string is\n returned *only* when EOF is encountered immediately.\n\n Note: Unlike ``stdio``\'s ``fgets()``, the returned string contains null\n characters (``\'\\0\'``) if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using ``readline()`` and return a list containing\n the lines thus read. If the optional *sizehint* argument is\n present, instead of reading up to EOF, whole lines totalling\n approximately *sizehint* bytes (possibly after rounding up to an\n internal buffer size) are read. Objects implementing a file-like\n interface may choose to ignore *sizehint* if it cannot be\n implemented, or cannot be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as ``iter(f)``.\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use ``for line in file`` instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like ``stdio``\'s ``fseek()``. The\n *whence* argument is optional and defaults to ``os.SEEK_SET`` or\n ``0`` (absolute file positioning); other values are ``os.SEEK_CUR``\n or ``1`` (seek relative to the current position) and\n ``os.SEEK_END`` or ``2`` (seek relative to the file\'s end). There\n is no return value.\n\n For example, ``f.seek(2, os.SEEK_CUR)`` advances the position by\n two and ``f.seek(-3, os.SEEK_END)`` sets the position to the third\n to last.\n\n Note that if the file is opened for appending (mode ``\'a\'`` or\n ``\'a+\'``), any ``seek()`` operations will be undone at the next\n write. If the file is only opened for writing in append mode (mode\n ``\'a\'``), this method is essentially a no-op, but it remains useful\n for files opened in append mode with reading enabled (mode\n ``\'a+\'``). If the file is opened in text mode (without ``\'b\'``),\n only offsets returned by ``tell()`` are legal. Use of other\n offsets causes undefined behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like ``stdio``\'s ``ftell()``.\n\n Note: On Windows, ``tell()`` can return illegal values (after an\n ``fgets()``) when reading files with Unix-style line-endings. Use\n binary mode (``\'rb\'``) to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the ``flush()`` or ``close()`` method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n ``readlines()``; ``writelines()`` does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as ``file.readline()``, and iteration ends when the\n``readline()`` method returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the ``close()`` method changes the value. It\n may not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be ``None``, in which case\n the file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n ``open()`` built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using ``open()``, the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form ``<...>``. This is a read-only attribute and\n may not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with the *--with-universal-newlines* option to\n **configure** (the default) this read-only attribute exists, and\n for files opened in universal newline read mode it keeps track of\n the types of newlines encountered while reading the file. The\n values it can take are ``\'\\r\'``, ``\'\\n\'``, ``\'\\r\\n\'``, ``None``\n (unknown, no newlines read yet) or a tuple containing all the\n newline types seen, to indicate that multiple newline conventions\n were encountered. For files not opened in universal newline read\n mode the value of this attribute will be ``None``.\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the ``print`` statement.\n Classes that are trying to simulate a file object should also have\n a writable ``softspace`` attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n ``softspace`` attribute.\n\n Note: This attribute is not used to control the ``print`` statement,\n but to allow the implementation of ``print`` to keep track of its\n internal state.\n', 'bltin-null-object': u"\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name).\n\nIt is written as ``None``.\n", 'bltin-type-objects': u"\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<type 'int'>``.\n", 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the ``__nonzero__()`` special method for a way to\nchange this.)\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', 'break': u'\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n', 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a function) with a possibly\nempty series of arguments:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," "*" expression] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal parameter lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to a sequence. Elements from this\nsequence are treated as if they were additional positional arguments;\nif there are positional arguments *x1*,..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax ``(sublist)`` cannot be\nused as keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'coercion-rules': u"\nCoercion rules\n**************\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don't define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator '``+``', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base's ``__rop__()`` method, the right operand's ``__rop__()``\n method is tried *before* the left operand's ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand's ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type's ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like '``+=``') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n", 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack only if there\n is no ``finally`` clause that negates the exception.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': u'\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', 'debugger': u'\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``c`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run_*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', 'del': u'\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather that spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n', 'else': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exec': u'\nThe ``exec`` statement\n**********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a string, an open file object, or\na code object. If it is a string, the string is parsed as a suite of\nPython statements which is then executed (unless a syntax error\noccurs). [1] If it is an open file, the file is parsed until EOF and\nexecuted. If it is a code object, it is simply executed. In all\ncases, the code that\'s executed is expected to be valid as file input\n(see section *File input*). Be aware that the ``return`` and\n``yield`` statements may not be used outside of function definitions\neven within the context of code passed to the ``exec`` statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after ``in`` is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module\n``__builtin__`` under the key ``__builtins__`` (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function ``eval()``. The built-in functions\n``globals()`` and ``locals()`` return the current global and local\ndictionary, respectively, which may be useful to pass around for use\nby ``exec``.\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use universal newline mode to convert Windows or Mac-style\n newlines.\n', 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, ``077e010`` is legal, and denotes the same number as\n``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': u'\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either\neither a number or a keyword. If it\'s a number, it refers to a\npositional argument, and if it\'s a keyword, it refers to a named\nkeyword argument. If the numerical arg_names in a format string are\n0, 1, 2, ... in sequence, they can all be omitted (not just some) and\nthe numbers 0, 1, 2, ... will be automatically inserted in that order.\nThe *arg_name* can be followed by any number of index or attribute\nexpressions. An expression of the form ``\'.name\'`` selects the named\nattribute using ``getattr()``, while an expression of the form\n``\'[index]\'`` does an index lookup using ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'}\' (which\nsignifies the end of the field). The presence of a fill character is\nsignaled by the *next* character, which must be one of the alignment\noptions. If the second character of *format_spec* is not a valid\nalignment option, then it is assumed that both the fill character and\nthe alignment option are absent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Postive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}.\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{align}{fill}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that that same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n', 'global': u'\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in an\n``exec`` statement does not affect the code block *containing* the\n``exec`` statement, and code contained in an ``exec`` statement is\nunaffected by ``global`` statements in the code containing the\n``exec`` statement. The same applies to the ``eval()``,\n``execfile()`` and ``compile()`` functions.\n', 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Both ``as`` and ``with`` are only recognized\nwhen the ``with_statement`` future feature has been enabled. It will\nalways be enabled in Python 2.6. See section *The with statement* for\ndetails. Note that using ``as`` and ``with`` as identifiers will\nalways issue a warning, even when the ``with_statement`` future\ndirective is not in effect.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library);\n applications should not expect to define additional names using\n this convention. The set of names of this class defined by Python\n may be extended in future versions. See section *Special method\n names*.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': u'\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimprt mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case ``\'l\'`` and upper case ``\'L\'`` are allowed as\nsuffix for long integers, it is strongly recommended to always use\n``\'L\'``, since the letter ``\'l\'`` looks too much like the digit\n``\'1\'``.\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n old_lambda_form ::= "lambda" [parameter_list]: old_expression\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_form\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n', 'naming': u"\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no 's') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe global statement has the same scope as a name binding operation in\nthe same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n", 'numbers': u"\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change.\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', 'operator-summary': u'\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. Function\n ``fmod()`` in the ``math`` module returns a result whose sign\n matches the sign of the first argument instead, and so returns\n ``-1e-100`` in this case. Which approach is more appropriate\n depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': u'\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, ``10**2`` returns ``100``, but\n``10**-2`` returns ``0.01``. (This last feature was added in Python\n2.2. In Python 2.1 and before, if both arguments were of integer types\nand the second argument was negative, an exception was raised).\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``ValueError``.\n', 'print': u'\nThe ``print`` statement\n***********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n``print`` evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except ``\' \'``, or (3) when the last\nwrite operation on standard output was not a ``print`` statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the built-in\n file objects often do not properly emulate this aspect of the file\n object\'s behavior, so it is best not to rely on this.\n\nA ``\'\\n\'`` character is written at the end, unless the ``print``\nstatement ends with a comma. This is the only action if the statement\ncontains just the keyword ``print``.\n\nStandard output is defined as the file object named ``stdout`` in the\nbuilt-in module ``sys``. If no such object exists, or if it does not\nhave a ``write()`` method, a ``RuntimeError`` exception is raised.\n\n``print`` also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n"``print`` chevron." In this form, the first expression after the\n``>>`` must evaluate to a "file-like" object, specifically an object\nthat has a ``write()`` method as described above. With this extended\nform, the subsequent expressions are printed to this file object. If\nthe first expression evaluates to ``None``, then ``sys.stdout`` is\nused as the file for output.\n', 'raise': u'\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``TypeError`` exception is raised indicating that\nthis is an error (if running under IDLE, a ``Queue.Empty`` exception\nis raised instead).\n\nOtherwise, ``raise`` evaluates the expressions to get three objects,\nusing ``None`` as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be ``None``.\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is ``None``, an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not ``None``, it must be a traceback\nobject (see section *The standard type hierarchy*), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or ``None``, a ``TypeError`` exception is raised.\nThe three-expression form of ``raise`` is useful to re-raise an\nexception transparently in an except clause, but ``raise`` with no\nexpressions should be preferred if the exception to be re-raised was\nthe most recently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', 'return': u'\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement is not allowed to\ninclude an **expression_list**. In that context, a bare ``return``\nindicates that the generator is done and will cause ``StopIteration``\nto be raised.\n', 'sequence-methods': u'\nAdditional methods for emulation of sequence types\n**************************************************\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n', 'sequence-types': u"\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python's\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n", 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2, n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,\nn)``. Negative shift counts raise a ``ValueError`` exception.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n``sys.maxint``, respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that ``i <= k < j`` where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in ``Ellipsis`` object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose ``start``, ``stop`` and ``step`` attributes are the values of\nthe expressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': u"\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object's\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object's attributes. This attribute is no\n longer available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type 'bool'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can't tell the type of the\n operands.\n\n[4] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n\n[5] The advantage of leaving the newline on is that returning an empty\n string is then an unambiguous EOF indication. It is also possible\n (in cases where it might matter, for example, if you want to make\n an exact copy of a file while scanning its lines) to tell whether\n the last line of a file ended in a newline or not (yes this\n happens!).\n", 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in the\nclass dictionary of another new-style class, known as the *owner*\nclass. In the examples below, "the attribute" refers to the attribute\nwhose name is the key of the property in the owner class\'\n``__dict__``. Descriptors can only be implemented as new-style\nclasses themselves.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, A)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operator is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-conversions': u'\nString conversions\n******************\n\nA string conversion is an expression list enclosed in reverse (a.k.a.\nbackward) quotes:\n\n string_conversion ::= "\'" expression_list "\'"\n\nA string conversion evaluates the contained expression list and\nconverts the resulting object into a string according to rules\nspecific to its type.\n\nIf the object is a string, a number, ``None``, or a tuple, list or\ndictionary containing only objects whose type is one of these, the\nresulting string is a valid Python expression which can be passed to\nthe built-in function ``eval()`` to yield an expression with the same\nvalue (or an approximation, if floating point numbers are involved).\n\n(In particular, converting a string adds quotes around it and converts\n"funny" characters to escape sequences that are safe to print.)\n\nRecursive objects (for example, lists or dictionaries that contain a\nreference to themselves, directly or indirectly) use ``...`` to\nindicate a recursive reference, and the result cannot be passed to\n``eval()`` to get an equal value (``SyntaxError`` will be raised\ninstead).\n\nThe built-in function ``repr()`` performs exactly the same conversion\nin its argument as enclosing it in parentheses and reverse quotes\ndoes. The built-in function ``str()`` performs a similar but more\nuser-friendly conversion.\n', 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the **stringprefix** and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. The two prefix characters may be\ncombined; in this case, ``\'u\'`` must appear before ``\'r\'``.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., ``x[-1]`` selects the last item of\n``x``.) The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u"\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0L``, ``0.0``,\n ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__nonzero__()`` or ``__len__()`` method, when that method returns\n the integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': u'\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *list*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', 'typesmapping': u'\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 2, "two": 3}``:\n\n * ``dict(one=2, two=3)``\n\n * ``dict({\'one\': 2, \'two\': 3})``\n\n * ``dict(zip((\'one\', \'two\'), (2, 3)))``\n\n * ``dict([[\'two\', 3], [\'one\', 2]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as a tuple or other iterable of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', 'typesmethods': u"\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method's class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set a method attribute results in a ``TypeError`` being\nraised. In order to set a method attribute, you need to explicitly\nset it on the underlying function object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.im_func.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n", 'typesmodules': u"\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special member of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n", 'typesseq': u'\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``buffer``, ``xrange``\n************************************************************************************\n\nThere are six sequence types: strings, Unicode strings, lists, tuples,\nbuffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*\'th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbuffer, xrange* section. To output formatted strings use template\nstrings or the ``%`` operator described in the *String Formatting\nOperations* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with only its first character\n capitalized.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters in the string are lowercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters in the string are uppercase and\n there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than\n ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string converted to uppercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that that can be used to form decimal-radix\n numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [4] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(#)03d quote types.\' % \\\n... {\'language\': "Python", "#": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n', 'typesseq-mutable': u"\nMutable Sequence Types\n**********************\n\nList objects support additional operations that allow in-place\nmodification of the object. Other mutable sequence types (when added\nto the language) should also support these operations. Strings and\ntuples are immutable sequence types: such objects cannot be modified\nonce created. The following operations are defined on mutable sequence\ntypes (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n", 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of ``x`` is\ndefined as ``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': u'\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', 'with': u'\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': u'\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of **expression_list** is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'}
gpl-3.0
frreiss/tensorflow-fred
tensorflow/python/data/benchmarks/batch_benchmark.py
16
2887
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.batch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import sparse_tensor class BatchBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.batch()`.""" def benchmark_batch_sparse(self): non_zeros_per_row_values = [0, 1, 5, 10, 100] batch_size_values = [1, 32, 64, 128, 1024] for non_zeros_per_row in non_zeros_per_row_values: tensor = sparse_tensor.SparseTensor( indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis], values=np.arange(non_zeros_per_row, dtype=np.int64), dense_shape=[1000]) for batch_size in batch_size_values: dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch( batch_size) self.run_and_report_benchmark( dataset, num_elements=100000 // batch_size, iters=1, name="sparse_num_elements_%d_batch_size_%d" % (non_zeros_per_row, batch_size)) def benchmark_batch_dense(self): for element_exp in [10, 12, 14, 16, 18, 20, 22]: for batch_exp in [3, 6, 9]: for parallel_copy in [True, False]: element_size = 1 << element_exp batch_size = 1 << batch_exp dataset = dataset_ops.Dataset.from_tensors( np.random.rand(element_size)).repeat().batch(batch_size) options = dataset_ops.Options() options.experimental_optimization.parallel_batch = parallel_copy dataset = dataset.with_options(options) tag = "_parallel" if parallel_copy else "" self.run_and_report_benchmark( dataset, num_elements=(1 << (22 - batch_exp - element_exp // 2)), iters=1, name="batch_element_size_%d_batch_size_%d%s" % (element_size, batch_size, tag)) if __name__ == "__main__": benchmark_base.test.main()
apache-2.0
leki75/ansible
lib/ansible/playbook/role/definition.py
62
8989
# (c) 2014 Michael DeHaan, <michael@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six import iteritems, string_types from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable from ansible.template import Templar from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['RoleDefinition'] class RoleDefinition(Base, Become, Conditional, Taggable): _role = FieldAttribute(isa='string') def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleDefinition, self).__init__() self._play = play self._variable_manager = variable_manager self._loader = loader self._role_path = None self._role_basedir = role_basedir self._role_params = dict() # def __repr__(self): # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>') @staticmethod def load(data, variable_manager=None, loader=None): raise AnsibleError("not implemented") def preprocess_data(self, ds): # role names that are simply numbers can be parsed by PyYAML # as integers even when quoted, so turn it into a string type if isinstance(ds, int): ds = "%s" % ds assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject) if isinstance(ds, dict): ds = super(RoleDefinition, self).preprocess_data(ds) # save the original ds for use later self._ds = ds # we create a new data structure here, using the same # object used internally by the YAML parsing code so we # can preserve file:line:column information if it exists new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.ansible_pos = ds.ansible_pos # first we pull the role name out of the data structure, # and then use that to determine the role path (which may # result in a new role name, if it was a file path) role_name = self._load_role_name(ds) (role_name, role_path) = self._load_role_path(role_name) # next, we split the role params out from the valid role # attributes and update the new datastructure with that # result and the role name if isinstance(ds, dict): (new_role_def, role_params) = self._split_role_params(ds) new_ds.update(new_role_def) self._role_params = role_params # set the role name in the new ds new_ds['role'] = role_name # we store the role path internally self._role_path = role_path # and return the cleaned-up data structure return new_ds def _load_role_name(self, ds): ''' Returns the role name (either the role: or name: field) from the role definition, or (when the role definition is a simple string), just that string ''' if isinstance(ds, string_types): return ds role_name = ds.get('role', ds.get('name')) if not role_name or not isinstance(role_name, string_types): raise AnsibleError('role definitions must contain a role name', obj=ds) # if we have the required datastructures, and if the role_name # contains a variable, try and template it now if self._variable_manager: all_vars = self._variable_manager.get_vars(play=self._play) templar = Templar(loader=self._loader, variables=all_vars) if templar._contains_vars(role_name): role_name = templar.template(role_name) return role_name def _load_role_path(self, role_name): ''' the 'role', as specified in the ds (or as a bare string), can either be a simple name or a full path. If it is a full path, we use the basename as the role name, otherwise we take the name as-given and append it to the default role path ''' # we always start the search for roles in the base directory of the playbook role_search_paths = [ os.path.join(self._loader.get_basedir(), u'roles'), ] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: role_search_paths.extend(C.DEFAULT_ROLES_PATH) # next, append the roles basedir, if it was set, so we can # search relative to that directory for dependent roles if self._role_basedir: role_search_paths.append(self._role_basedir) # finally as a last resort we look in the current basedir as set # in the loader (which should be the playbook dir itself) but without # the roles/ dir appended role_search_paths.append(self._loader.get_basedir()) # create a templar class to template the dependency names, in # case they contain variables if self._variable_manager is not None: all_vars = self._variable_manager.get_vars(play=self._play) else: all_vars = dict() templar = Templar(loader=self._loader, variables=all_vars) role_name = templar.template(role_name) # now iterate through the possible paths and return the first one we find for path in role_search_paths: path = templar.template(path) role_path = unfrackpath(os.path.join(path, role_name)) if self._loader.path_exists(role_path): return (role_name, role_path) # if not found elsewhere try to extract path from name role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) def _split_role_params(self, ds): ''' Splits any random role params off from the role spec and store them in a dictionary of params for parsing later ''' role_def = dict() role_params = dict() base_attribute_names = frozenset(self._valid_attrs.keys()) for (key, value) in iteritems(ds): # use the list of FieldAttribute values to determine what is and is not # an extra parameter for this role (or sub-class of this role) # FIXME: hard-coded list of exception key names here corresponds to the # connection fields in the Base class. There may need to be some # other mechanism where we exclude certain kinds of field attributes, # or make this list more automatic in some way so we don't have to # remember to update it manually. if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'): if key in ('connection', 'port', 'remote_user'): display.deprecated("Using '%s' as a role param has been deprecated. " % key + "In the future, these values should be entered in the `vars:` " + "section for roles, but for now we'll store it as both a param and an attribute.", version="2.7") role_def[key] = value # this key does not match a field attribute, so it must be a role param role_params[key] = value else: # this is a field attribute, so copy it over directly role_def[key] = value return (role_def, role_params) def get_role_params(self): return self._role_params.copy() def get_role_path(self): return self._role_path
gpl-3.0
spencerpomme/coconuts-on-fire
VillageMerger/components/classtools.py
2
1052
# file classtools.py(new) "Assorted class utilities and tools" class AttrDisplay: """ Provides an inheritable display overload method that show instances with their class name and a name=value pair for each attribute stored on the instance itself(but not attributes inherited from its classes). Can be mixed into any class, and will work on any instances. """ def getherAttrs(self): attrs = [] for key in sorted(self.__dict__): attrs.append('%s=%s' % (key, getattr(self, key))) # above can alse be: (key, self.__dict__[key]) return ', '.join(attrs) def __repr__(self): return '[%s: %s]' % (self.__class__.__name__, self.getherAttrs()) if __name__ == '__main__': class Toptest(AttrDisplay): count = 0 def __init__(self): self.attr1 = Toptest.count self.attr2 = Toptest.count + 1 Toptest.count += 20 class Subtest(Toptest): pass x, y = Toptest(), Subtest() print(x) print(y)
apache-2.0
idncom/odoo
addons/account/project/wizard/account_analytic_cost_ledger_for_journal_report.py
378
2209
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv class account_analytic_cost_ledger_journal_report(osv.osv_memory): _name = 'account.analytic.cost.ledger.journal.report' _description = 'Account Analytic Cost Ledger For Journal Report' _columns = { 'date1': fields.date('Start of period', required=True), 'date2': fields.date('End of period', required=True), 'journal': fields.many2many('account.analytic.journal', 'ledger_journal_rel', 'ledger_id', 'journal_id', 'Journals'), } _defaults = { 'date1': lambda *a: time.strftime('%Y-01-01'), 'date2': lambda *a: time.strftime('%Y-%m-%d') } def check_report(self, cr, uid, ids, context=None): if context is None: context = {} data = self.read(cr, uid, ids)[0] datas = { 'ids': context.get('active_ids', []), 'model': 'account.analytic.account', 'form': data } datas['form']['active_ids'] = context.get('active_ids', False) return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticcostledgerquantity', data=datas, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jruben/jruben.github.io
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/bw.py
364
1355
# -*- coding: utf-8 -*- """ pygments.styles.bw ~~~~~~~~~~~~~~~~~~ Simple black/white only style. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Operator, Generic class BlackWhiteStyle(Style): background_color = "#ffffff" default_style = "" styles = { Comment: "italic", Comment.Preproc: "noitalic", Keyword: "bold", Keyword.Pseudo: "nobold", Keyword.Type: "nobold", Operator.Word: "bold", Name.Class: "bold", Name.Namespace: "bold", Name.Exception: "bold", Name.Entity: "bold", Name.Tag: "bold", String: "italic", String.Interpol: "bold", String.Escape: "bold", Generic.Heading: "bold", Generic.Subheading: "bold", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold", Error: "border:#FF0000" }
mit
dhp-denero/LibrERP
account_invoice_template/__init__.py
3
1065
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>) # Copyright (C) 2011 Domsense srl (<http://www.domsense.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_invoice_template import wizard
agpl-3.0
DDEFISHER/servo
tests/wpt/web-platform-tests/conformance-checkers/tools/url.py
125
23557
# -*- coding: utf-8 -*- import os ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # based on https://github.com/w3c/web-platform-tests/blob/275544eab54a0d0c7f74ccc2baae9711293d8908/url/urltestdata.txt invalid = { "scheme-trailing-tab": "a:\tfoo.com", "scheme-trailing-newline": "a:\nfoo.com", "scheme-trailing-cr": "a:\rfoo.com", "scheme-trailing-space": "a: foo.com", "scheme-trailing-tab": "a:\tfoo.com", "scheme-trailing-newline": "a:\nfoo.com", "scheme-trailing-cr": "a:\rfoo.com", "scheme-http-no-slash": "http:foo.com", "scheme-http-no-slash-colon": "http::@c:29", "scheme-http-no-slash-square-bracket": "http:[61:27]/:foo", "scheme-http-backslash": "http:\\\\foo.com\\", "scheme-http-single-slash": "http:/example.com/", "scheme-ftp-single-slash": "ftp:/example.com/", "scheme-https-single-slash": "https:/example.com/", "scheme-data-single-slash": "data:/example.com/", "scheme-ftp-no-slash": "ftp:example.com/", "scheme-https-no-slash": "https:example.com/", "scheme-javascript-no-slash-malformed": "javascript:example.com/", "userinfo-password-bad-chars": "http://&a:foo(b]c@d:2/", "userinfo-username-contains-at-sign": "http://::@c@d:2", "userinfo-backslash": "http://a\\b:c\\d@foo.com", "host-space": "http://example .org", "host-tab": "http://example\t.org", "host-newline": "http://example.\norg", "host-cr": "http://example.\rorg", "host-square-brackets-port-contains-colon": "http://[1::2]:3:4", "port-single-letter": "http://f:b/c", "port-multiple-letters": "http://f:fifty-two/c", "port-leading-colon": "http://2001::1", "port-leading-colon-bracket-colon": "http://2001::1]:80", "path-leading-backslash-at-sign": "http://foo.com/\\@", "path-leading-colon-backslash": ":\\", "path-leading-colon-chars-backslash": ":foo.com\\", "path-relative-square-brackets": "[61:24:74]:98", "fragment-contains-hash": "http://foo/path#f#g", "path-percent-encoded-malformed": "http://example.com/foo/%2e%2", "path-bare-percent-sign": "http://example.com/foo%", "path-u0091": u"http://example.com/foo\u0091".encode('utf-8'), "userinfo-username-contains-pile-of-poo": "http://💩:foo@example.com", "userinfo-password-contains-pile-of-poo": "http://foo:💩@example.com", "host-hostname-in-brackets": "http://[www.google.com]/", "host-empty": "http://", "host-empty-with-userinfo": "http://user:pass@/", "port-leading-dash": "http://foo:-80/", "host-empty-userinfo-empty": "http://@/www.example.com", "host-invalid-unicode": u"http://\ufdd0zyx.com".encode('utf-8'), "host-invalid-unicode-percent-encoded": "http://%ef%b7%90zyx.com", "host-double-percent-encoded": u"http://\uff05\uff14\uff11.com".encode('utf-8'), "host-double-percent-encoded-percent-encoded": "http://%ef%bc%85%ef%bc%94%ef%bc%91.com", "host-u0000-percent-encoded": u"http://\uff05\uff10\uff10.com".encode('utf-8'), "host-u0000-percent-encoded-percent-encoded": "http://%ef%bc%85%ef%bc%90%ef%bc%90.com", } invalid_absolute = invalid.copy() invalid_url_code_points = { "fragment-backslash": "#\\", "fragment-leading-space": "http://f:21/b# e", "path-contains-space": "/a/ /c", "path-leading-space": "http://f:21/ b", "path-tab": "http://example.com/foo\tbar", "path-trailing-space": "http://f:21/b ?", "port-cr": "http://f:\r/c", "port-newline": "http://f:\n/c", "port-space": "http://f: /c", "port-tab": "http://f:\t/c", "query-leading-space": "http://f:21/b? d", "query-trailing-space": "http://f:21/b?d #", } invalid.update(invalid_url_code_points) invalid_absolute.update(invalid_url_code_points) valid_absolute = { "scheme-private": "a:foo.com", "scheme-private-slash": "foo:/", "scheme-private-slash-slash": "foo://", "scheme-private-path": "foo:/bar.com/", "scheme-private-path-leading-slashes-only": "foo://///////", "scheme-private-path-leading-slashes-chars": "foo://///////bar.com/", "scheme-private-path-leading-slashes-colon-slashes": "foo:////://///", "scheme-private-single-letter": "c:/foo", "scheme-private-single-slash": "madeupscheme:/example.com/", "scheme-file-single-slash": "file:/example.com/", "scheme-ftps-single-slash": "ftps:/example.com/", "scheme-gopher-single-slash": "gopher:/example.com/", "scheme-ws-single-slash": "ws:/example.com/", "scheme-wss-single-slash": "wss:/example.com/", "scheme-javascript-single-slash": "javascript:/example.com/", "scheme-mailto-single-slash": "mailto:/example.com/", "scheme-private-no-slash": "madeupscheme:example.com/", "scheme-ftps-no-slash": "ftps:example.com/", "scheme-gopher-no-slash": "gopher:example.com/", "scheme-wss-no-slash": "wss:example.com/", "scheme-mailto-no-slash": "mailto:example.com/", "scheme-data-no-slash": "data:text/plain,foo", "userinfo": "http://user:pass@foo:21/bar;par?b#c", "host-ipv6": "http://[2001::1]", "host-ipv6-port": "http://[2001::1]:80", "port-none-but-colon": "http://f:/c", "port-0": "http://f:0/c", "port-00000000000000": "http://f:00000000000000/c", "port-00000000000000000000080": "http://f:00000000000000000000080/c", "port-00000000000000000000080": "http://f:00000000000000000000080/c", "userinfo-host-port-path": "http://a:b@c:29/d", "userinfo-username-non-alpha": "http://foo.com:b@d/", "query-contains-question-mark": "http://foo/abcd?efgh?ijkl", "fragment-contains-question-mark": "http://foo/abcd#foo?bar", "path-percent-encoded-dot": "http://example.com/foo/%2e", "path-percent-encoded-space": "http://example.com/%20foo", "path-non-ascii": u"http://example.com/\u00C2\u00A9zbar".encode('utf-8'), "path-percent-encoded-multiple": "http://example.com/foo%41%7a", "path-percent-encoded-u0091": "http://example.com/foo%91", "path-percent-encoded-u0000": "http://example.com/foo%00", "path-percent-encoded-mixed-case": "http://example.com/%3A%3a%3C%3c", "path-unicode-han": u"http://example.com/\u4F60\u597D\u4F60\u597D".encode('utf-8'), "path-uFEFF": u"http://example.com/\uFEFF/foo".encode('utf-8'), "path-u202E-u202D": u"http://example.com/\u202E/foo/\u202D/bar".encode('utf-8'), "host-is-pile-of-poo": "http://💩", "path-contains-pile-of-poo": "http://example.com/foo/💩", "query-contains-pile-of-poo": "http://example.com/foo?💩", "fragment-contains-pile-of-poo": "http://example.com/foo#💩", "host-192.0x00A80001": "http://192.0x00A80001", "userinfo-username-contains-percent-encoded": "http://%25DOMAIN:foobar@foodomain.com", "userinfo-empty": "http://@www.example.com", "userinfo-user-empty": "http://:b@www.example.com", "userinfo-password-empty": "http://a:@www.example.com", "host-exotic-whitespace": u"http://GOO\u200b\u2060\ufeffgoo.com".encode('utf-8'), "host-exotic-dot": u"http://www.foo\u3002bar.com".encode('utf-8'), "host-fullwidth": u"http://\uff27\uff4f.com".encode('utf-8'), "host-idn-unicode-han": u"http://\u4f60\u597d\u4f60\u597d".encode('utf-8'), "host-IP-address-broken": "http://192.168.0.257/", } valid = valid_absolute.copy() valid_relative = { "scheme-schemeless-relative": "//foo/bar", "path-slash-only-relative": "/", "path-simple-relative": "/a/b/c", "path-percent-encoded-slash-relative": "/a%2fc", "path-percent-encoded-slash-plus-slashes-relative": "/a/%2f/c", "query-empty-no-path-relative": "?", "fragment-empty-hash-only-no-path-relative": "#", "fragment-slash-relative": "#/", "fragment-semicolon-question-mark-relative": "#;?", "fragment-non-ascii-relative": u"#\u03B2".encode('utf-8'), } valid.update(valid_relative) invalid_absolute.update(valid_relative) valid_relative_colon_dot = { "scheme-none-relative": "foo.com", "path-colon-relative": ":", "path-leading-colon-letter-relative": ":a", "path-leading-colon-chars-relative": ":foo.com", "path-leading-colon-slash-relative": ":/", "path-leading-colon-hash-relative": ":#", "path-leading-colon-number-relative": ":23", "path-slash-colon-number-relative": "/:23", "path-leading-colon-colon-relative": "::", "path-colon-colon-number-relative": "::23", "path-starts-with-pile-of-poo": "💩http://foo", "path-contains-pile-of-poo": "http💩//:foo", } valid.update(valid_relative_colon_dot) invalid_file = { "scheme-file-backslash": "file:c:\\foo\\bar.html", "scheme-file-single-slash-c-bar": "file:/C|/foo/bar", "scheme-file-triple-slash-c-bar": "file:///C|/foo/bar", } invalid.update(invalid_file) valid_file = { "scheme-file-uppercase": "File://foo/bar.html", "scheme-file-slash-slash-c-bar": "file://C|/foo/bar", "scheme-file-slash-slash-abc-bar": "file://abc|/foo/bar", "scheme-file-host-included": "file://server/foo/bar", "scheme-file-host-empty": "file:///foo/bar.txt", "scheme-file-scheme-only": "file:", "scheme-file-slash-only": "file:/", "scheme-file-slash-slash-only": "file://", "scheme-file-slash-slash-slash-only": "file:///", "scheme-file-no-slash": "file:test", } valid.update(valid_file) valid_absolute.update(valid_file) warnings = { "scheme-data-contains-fragment": "data:text/html,test#test", } element_attribute_pairs = [ "a href", # "a ping", space-separated list of URLs; tested elsewhere "area href", # "area ping", space-separated list of URLs; tested elsewhere "audio src", "base href", "blockquote cite", "button formaction", "del cite", "embed src", "form action", "html manifest", "iframe src", "img src", # srcset is tested elsewhere "input formaction", # type=submit, type=image "input src", # type=image "input value", # type=url "ins cite", "link href", #"menuitem icon", # skip until parser is updated "object data", "q cite", "script src", "source src", "track src", "video poster", "video src", ] template = "<!DOCTYPE html>\n<meta charset=utf-8>\n" def write_novalid_files(): for el, attr in (pair.split() for pair in element_attribute_pairs): for desc, url in invalid.items(): if ("area" == el): f = open(os.path.join(ccdir, "html/elements/area/href/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid href: %s</title>\n' % desc) f.write('<map name=foo><%s %s="%s" alt></map>\n' % (el, attr, url)) f.close() elif ("base" == el or "embed" == el): f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb') f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc)) f.write('<%s %s="%s">\n' % (el, attr, url)) f.close() elif ("html" == el): f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-novalid.html" % desc), 'wb') f.write('<!DOCTYPE html>\n') f.write('<html manifest="%s">\n' % url) f.write('<meta charset=utf-8>\n') f.write('<title>invalid manifest: %s</title>\n' % desc) f.write('</html>\n') f.close() elif ("img" == el): f = open(os.path.join(ccdir, "html/elements/img/src/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid src: %s</title>\n' % desc) f.write('<img src="%s" alt>\n' % url) f.close() elif ("input" == el and "src" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-image-src/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid src: %s</title>\n' % desc) f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url)) f.close() elif ("input" == el and "formaction" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid formaction: %s</title>\n' % desc) f.write('<%s type=submit %s="%s">\n' % (el, attr, url)) f.close() f = open(os.path.join(ccdir, "html/elements/input/type-image-formaction/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid formaction: %s</title>\n' % desc) f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url)) f.close() elif ("input" == el and "value" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid value attribute: %s</title>\n' % desc) f.write('<%s type=url %s="%s">\n' % (el, attr, url)) f.close() elif ("link" == el): f = open(os.path.join(ccdir, "html/elements/link/href/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid href: %s</title>\n' % desc) f.write('<link href="%s" rel=help>\n' % url) f.close() elif ("source" == el or "track" == el): f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb') f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc)) f.write('<video><%s %s="%s"></video>\n' % (el, attr, url)) f.close() else: f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb') f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc)) f.write('<%s %s="%s"></%s>\n' % (el, attr, url, el)) f.close() for desc, url in invalid.items(): f = open(os.path.join(ccdir, "html/microdata/itemid/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid itemid: %s</title>\n' % desc) f.write('<div itemid="%s" itemtype="http://foo" itemscope></div>\n' % url) f.close() for desc, url in invalid_absolute.items(): f = open(os.path.join(ccdir, "html/microdata/itemtype/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid itemtype: %s</title>\n' % desc) f.write('<div itemtype="%s" itemscope></div>\n' % url) f.close() f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-novalid.html" % desc), 'wb') f.write(template + '<title>invalid value attribute: %s</title>\n' %desc) f.write('<input type=url value="%s">\n' % url) f.close() def write_haswarn_files(): for el, attr in (pair.split() for pair in element_attribute_pairs): for desc, url in warnings.items(): if ("area" == el): f = open(os.path.join(ccdir, "html/elements/area/href/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<map name=foo><%s %s="%s" alt></map>\n' % (el, attr, url)) f.close() elif ("base" == el or "embed" == el): f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s %s="%s">\n' % (el, attr, url)) f.close() elif ("html" == el): f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-haswarn.html" % desc), 'wb') f.write('<!DOCTYPE html>\n') f.write('<html manifest="%s">\n' % url) f.write('<meta charset=utf-8>\n') f.write('<title>%s warning: %s</title>\n' % (attr, desc)) f.write('</html>\n') f.close() elif ("img" == el): f = open(os.path.join(ccdir, "html/elements/img/src/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s %s="%s" alt>\n' % (el, attr, url)) f.close() elif ("input" == el and "src" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-image-src/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url)) f.close() elif ("input" == el and "formaction" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s type=submit %s="%s">\n' % (el, attr, url)) f.close() f = open(os.path.join(ccdir, "html/elements/input/type-image-formaction/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url)) f.close() elif ("input" == el and "value" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s type=url %s="%s">\n' % (el, attr, url)) f.close() elif ("link" == el): f = open(os.path.join(ccdir, "html/elements/link/href/%s-haswarn.html" % desc), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<%s %s="%s" rel=help>\n' % (el, attr, url)) f.close() elif ("source" == el or "track" == el): f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc)) f.write('<video><%s %s="%s"></video>\n' % (el, attr, url)) f.close() else: f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb') f.write(template + '<title>%s warning: %s</title>\n' % (url, desc)) f.write('<%s %s="%s"></%s>\n' % (el, attr, url, el)) f.close() for desc, url in warnings.items(): f = open(os.path.join(ccdir, "html/microdata/itemtype-%s-haswarn.html" % desc ), 'wb') f.write(template + '<title>warning: %s</title>\n' % desc) f.write('<div itemtype="%s" itemscope></div>\n' % url) f.close() f = open(os.path.join(ccdir, "html/microdata/itemid-%s-haswarn.html" % desc), 'wb') f.write(template + '<title>warning: %s</title>\n' % desc) f.write('<div itemid="%s" itemtype="http://foo" itemscope></div>\n' % url) f.close() def write_isvalid_files(): for el, attr in (pair.split() for pair in element_attribute_pairs): if ("base" == el): continue if ("html" == el): continue elif ("input" == el and "value" == attr): continue elif ("input" == el and "formaction" == attr): fs = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction-isvalid.html"), 'wb') fs.write(template + '<title>valid formaction</title>\n') fi = open(os.path.join(ccdir, "html/elements/input/type-image-formaction-isvalid.html"), 'wb') fi.write(template + '<title>valid formaction</title>\n') elif ("input" == el and "src" == attr): f = open(os.path.join(ccdir, "html/elements/input/type-image-src-isvalid.html"), 'wb') f.write(template + '<title>valid src</title>\n') else: f = open(os.path.join(ccdir, "html/elements/%s/%s-isvalid.html" % (el, attr)), 'wb') f.write(template + '<title>valid %s</title>\n' % attr) for desc, url in valid.items(): if ("area" == el): f.write('<map name=foo><%s %s="%s" alt></map><!-- %s -->\n' % (el, attr, url, desc)) elif ("embed" == el): f.write('<%s %s="%s"><!-- %s -->\n' % (el, attr, url, desc)) elif ("img" == el): f.write('<%s %s="%s" alt><!-- %s -->\n' % (el, attr, url, desc)) elif ("input" == el and "src" == attr): f.write('<%s type=image alt="foo" %s="%s"><!-- %s -->\n' % (el, attr, url, desc)) elif ("input" == el and "formaction" == attr): fs.write('<%s type=submit %s="%s"><!-- %s -->\n' % (el, attr, url, desc)) fi.write('<%s type=image alt="foo" %s="%s"><!-- %s -->\n' % (el, attr, url, desc)) elif ("link" == el): f.write('<%s %s="%s" rel=help><!-- %s -->\n' % (el, attr, url, desc)) elif ("source" == el or "track" == el): f.write('<video><%s %s="%s"></video><!-- %s -->\n' % (el, attr, url, desc)) else: f.write('<%s %s="%s"></%s><!-- %s -->\n' % (el, attr, url, el, desc)) if ("input" == el and "formaction" == attr): fs.close() fi.close() else: if ("a" == el and "href" == attr): f.write('<a href=""></a><!-- empty-href -->\n') f.close() for desc, url in valid.items(): f = open(os.path.join(ccdir, "html/elements/base/href/%s-isvalid.html" % desc), 'wb') f.write(template + '<title>valid href: %s</title>\n' % desc) f.write('<base href="%s">\n' % url) f.close() f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-isvalid.html" % desc), 'wb') f.write('<!DOCTYPE html>\n') f.write('<html manifest="%s">\n' % url) f.write('<meta charset=utf-8>\n') f.write('<title>valid manifest: %s</title>\n' % desc) f.write('</html>\n') f.close() f = open(os.path.join(ccdir, "html/elements/meta/refresh-isvalid.html"), 'wb') f.write(template + '<title>valid meta refresh</title>\n') for desc, url in valid.items(): f.write('<meta http-equiv=refresh content="0; URL=%s"><!-- %s -->\n' % (url, desc)) f.close() f = open(os.path.join(ccdir, "html/microdata/itemid-isvalid.html"), 'wb') f.write(template + '<title>valid itemid</title>\n') for desc, url in valid.items(): f.write('<div itemid="%s" itemtype="http://foo" itemscope></div><!-- %s -->\n' % (url, desc)) f.close() f = open(os.path.join(ccdir, "html/microdata/itemtype-isvalid.html"), 'wb') f.write(template + '<title>valid itemtype</title>\n') for desc, url in valid_absolute.items(): f.write('<div itemtype="%s" itemscope></div><!-- %s -->\n' % (url, desc)) f.close() f = open(os.path.join(ccdir, "html/elements/input/type-url-value-isvalid.html"), 'wb') f.write(template + '<title>valid value attribute</title>\n') for desc, url in valid_absolute.items(): f.write('<input type=url value="%s"><!-- %s -->\n' % (url, desc)) f.close() write_novalid_files() write_haswarn_files() write_isvalid_files() # vim: ts=4:sw=4
mpl-2.0
asgeir/old-school-projects
python/verkefni3/jam.py
1
2207
def jam(appearances): lines = appearances.split('\n') # there seems to be a bug in the testcase that parses Stephen K. Amos as # just Stephen K lines = [line.replace(', plus ', ', ').replace('Stephen K. Amos', 'Stephen K').split(',')[1:-1] for line in lines] for line in lines: i = 0 length = len(line) while i < length: if ' with ' in line[i]: line[i:i + 1] = line[i].split(' with ') i += 1 if ' and ' in line[i]: line[i:i + 1] = line[i].split(' and ') i += 1 i += 1 length = len(line) counter = {} for line in lines: for person in line: person = person.strip() cur = counter.get(person, 0) counter[person] = cur + 1 return counter if __name__ == '__main__': print(jam("""1/1/1 22 December 1967, Nicholas Parsons with Derek Nimmo, Clement Freud, Wilma Ewart and Beryl Reid, excuses for being late. 2/1/2 29 December 1967, Nicholas Parsons with Derek Nimmo, Clement Freud, Sheila Hancock and Carol Binstead, bedrooms. 3/1/3 5 January 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Betty Marsden and Elisabeth Beresford, ? 4/1/4 12 January 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Isobel Barnett and Bettine Le Beau, ? 5/1/5 20 January 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Andree Melly and Prunella Scales, the brownies 6/1/6 27 January 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Marjorie Proops and Millie Small, ? 7/1/7 2 February 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Aimi Macdonald and Una Stubbs, my honeymoon. 8/1/8 9 February 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Lucy Bartlett and Anona Winn, bloomer. 9/1/9 17 February 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Andree Melly and Charmian Innes, ? 743/57/5 30 August 2010, Nicholas Parsons with Paul Merton, Jenny Eclair, Fred MacAulay and Stephen K. Amos, the secret of my success. 10/1/10 23 February 1968, Nicholas Parsons with Derek Nimmo, Clement Freud, Barbara Blake and Renee Houston, my first grown-up dress."""))
mit
NewpTone/stacklab-nova
debian/python-nova/usr/share/pyshared/nova/virt/libvirt/config.py
6
20982
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration for libvirt objects. Classes to represent the configuration of various libvirt objects and support conversion to/from XML """ from nova import exception from nova.openstack.common import log as logging from lxml import etree LOG = logging.getLogger(__name__) class LibvirtConfigObject(object): def __init__(self, **kwargs): super(LibvirtConfigObject, self).__init__() self.root_name = kwargs.get("root_name") self.ns_prefix = kwargs.get('ns_prefix') self.ns_uri = kwargs.get('ns_uri') def _text_node(self, name, value): child = etree.Element(name) child.text = str(value) return child def format_dom(self): if self.ns_uri is None: return etree.Element(self.root_name) else: return etree.Element("{" + self.ns_uri + "}" + self.root_name, nsmap={self.ns_prefix: self.ns_uri}) def parse_str(self, xmlstr): self.parse_dom(etree.fromstring(xmlstr)) def parse_dom(self, xmldoc): if self.root_name != xmldoc.tag: raise exception.InvalidInput( "Root element name should be '%s' not '%s'" % (self.root_name, xmldoc.tag)) def to_xml(self, pretty_print=True): root = self.format_dom() xml_str = etree.tostring(root, pretty_print=pretty_print) LOG.debug("Generated XML %s " % (xml_str,)) return xml_str class LibvirtConfigCaps(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCaps, self).__init__(root_name="capabilities", **kwargs) self.host = None self.guests = [] def parse_dom(self, xmldoc): super(LibvirtConfigCaps, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "host": host = LibvirtConfigCapsHost() host.parse_dom(c) self.host = host elif c.tag == "guest": guest = LibvirtConfigCapsGuest() guest.parse_dom(c) self.guests.append(guest) def format_dom(self): caps = super(LibvirtConfigCaps, self).format_dom() if self.host: caps.append(self.host.format_dom()) for g in self.guests: caps.append(g.format_dom()) return caps class LibvirtConfigCapsHost(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsHost, self).__init__(root_name="host", **kwargs) self.cpu = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsHost, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "cpu": cpu = LibvirtConfigCPU() cpu.parse_dom(c) self.cpu = cpu def format_dom(self): caps = super(LibvirtConfigCapsHost, self).format_dom() if self.cpu: caps.append(self.cpu.format_dom()) return caps class LibvirtConfigCapsGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsGuest, self).__init__(root_name="guest", **kwargs) self.arch = None self.ostype = None self.domtype = list() def parse_dom(self, xmldoc): super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "os_type": self.ostype = c.text elif c.tag == "arch": self.arch = c.get("name") for sc in c.getchildren(): if sc.tag == "domain": self.domtype.append(sc.get("type")) def format_dom(self): caps = super(LibvirtConfigCapsGuest, self).format_dom() if self.ostype is not None: caps.append(self._text_node("os_type", self.ostype)) if self.arch: arch = etree.Element("arch", name=self.arch) for dt in self.domtype: dte = etree.Element("domain") dte.set("type", dt) arch.append(dte) caps.append(arch) return caps class LibvirtConfigGuestTimer(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestTimer, self).__init__(root_name="timer", **kwargs) self.name = "platform" self.track = None self.tickpolicy = None self.present = None def format_dom(self): tm = super(LibvirtConfigGuestTimer, self).format_dom() tm.set("name", self.name) if self.track is not None: tm.set("track", self.track) if self.tickpolicy is not None: tm.set("tickpolicy", self.tickpolicy) if self.present is not None: if self.present: tm.set("present", "yes") else: tm.set("present", "no") return tm class LibvirtConfigGuestClock(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestClock, self).__init__(root_name="clock", **kwargs) self.offset = "utc" self.adjustment = None self.timezone = None self.timers = [] def format_dom(self): clk = super(LibvirtConfigGuestClock, self).format_dom() clk.set("offset", self.offset) if self.adjustment: clk.set("adjustment", self.adjustment) elif self.timezone: clk.set("timezone", self.timezone) for tm in self.timers: clk.append(tm.format_dom()) return clk def add_timer(self, tm): self.timers.append(tm) class LibvirtConfigCPUFeature(LibvirtConfigObject): def __init__(self, name=None, **kwargs): super(LibvirtConfigCPUFeature, self).__init__(root_name='feature', **kwargs) self.name = name def parse_dom(self, xmldoc): super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc) self.name = xmldoc.get("name") def format_dom(self): ft = super(LibvirtConfigCPUFeature, self).format_dom() ft.set("name", self.name) return ft class LibvirtConfigCPU(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCPU, self).__init__(root_name='cpu', **kwargs) self.arch = None self.vendor = None self.model = None self.sockets = None self.cores = None self.threads = None self.features = [] def parse_dom(self, xmldoc): super(LibvirtConfigCPU, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "arch": self.arch = c.text elif c.tag == "model": self.model = c.text elif c.tag == "vendor": self.vendor = c.text elif c.tag == "topology": self.sockets = int(c.get("sockets")) self.cores = int(c.get("cores")) self.threads = int(c.get("threads")) elif c.tag == "feature": f = LibvirtConfigCPUFeature() f.parse_dom(c) self.add_feature(f) def format_dom(self): cpu = super(LibvirtConfigCPU, self).format_dom() if self.arch is not None: cpu.append(self._text_node("arch", self.arch)) if self.model is not None: cpu.append(self._text_node("model", self.model)) if self.vendor is not None: cpu.append(self._text_node("vendor", self.vendor)) if (self.sockets is not None and self.cores is not None and self.threads is not None): top = etree.Element("topology") top.set("sockets", str(self.sockets)) top.set("cores", str(self.cores)) top.set("threads", str(self.threads)) cpu.append(top) for f in self.features: cpu.append(f.format_dom()) return cpu def add_feature(self, feat): self.features.append(feat) class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature): def __init__(self, name=None, **kwargs): super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs) self.policy = "require" def format_dom(self): ft = super(LibvirtConfigGuestCPUFeature, self).format_dom() ft.set("policy", self.policy) return ft class LibvirtConfigGuestCPU(LibvirtConfigCPU): def __init__(self, **kwargs): super(LibvirtConfigGuestCPU, self).__init__(**kwargs) self.mode = None self.match = "exact" def format_dom(self): cpu = super(LibvirtConfigGuestCPU, self).format_dom() if self.mode: cpu.set("mode", self.mode) cpu.set("match", self.match) return cpu class LibvirtConfigGuestDevice(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDevice, self).__init__(**kwargs) class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestDisk, self).__init__(root_name="disk", **kwargs) self.source_type = "file" self.source_device = "disk" self.driver_name = None self.driver_format = None self.driver_cache = None self.source_path = None self.source_protocol = None self.source_host = None self.target_dev = None self.target_path = None self.target_bus = None self.auth_username = None self.auth_secret_type = None self.auth_secret_uuid = None self.serial = None def format_dom(self): dev = super(LibvirtConfigGuestDisk, self).format_dom() dev.set("type", self.source_type) dev.set("device", self.source_device) if (self.driver_name is not None or self.driver_format is not None or self.driver_cache is not None): drv = etree.Element("driver") if self.driver_name is not None: drv.set("name", self.driver_name) if self.driver_format is not None: drv.set("type", self.driver_format) if self.driver_cache is not None: drv.set("cache", self.driver_cache) dev.append(drv) if self.source_type == "file": dev.append(etree.Element("source", file=self.source_path)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_path)) elif self.source_type == "mount": dev.append(etree.Element("source", dir=self.source_path)) elif self.source_type == "network": dev.append(etree.Element("source", protocol=self.source_protocol, name=self.source_host)) if self.auth_secret_type is not None: auth = etree.Element("auth") auth.set("username", self.auth_username) auth.append(etree.Element("secret", type=self.auth_secret_type, uuid=self.auth_secret_uuid)) dev.append(auth) if self.source_type == "mount": dev.append(etree.Element("target", dir=self.target_path)) else: dev.append(etree.Element("target", dev=self.target_dev, bus=self.target_bus)) if self.serial is not None: dev.append(self._text_node("serial", self.serial)) return dev class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem", **kwargs) self.source_type = "mount" self.source_dir = None self.target_dir = "/" def format_dom(self): dev = super(LibvirtConfigGuestFilesys, self).format_dom() dev.set("type", self.source_type) dev.append(etree.Element("source", dir=self.source_dir)) dev.append(etree.Element("target", dir=self.target_dir)) return dev class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInterface, self).__init__( root_name="interface", **kwargs) self.net_type = None self.target_dev = None self.model = None self.mac_addr = None self.script = None self.source_dev = None self.vporttype = None self.vportparams = [] self.filtername = None self.filterparams = [] def format_dom(self): dev = super(LibvirtConfigGuestInterface, self).format_dom() dev.set("type", self.net_type) dev.append(etree.Element("mac", address=self.mac_addr)) if self.model: dev.append(etree.Element("model", type=self.model)) if self.net_type == "ethernet": if self.script is not None: dev.append(etree.Element("script", path=self.script)) dev.append(etree.Element("target", dev=self.target_dev)) elif self.net_type == "direct": dev.append(etree.Element("source", dev=self.source_dev, mode="private")) else: dev.append(etree.Element("source", bridge=self.source_dev)) if self.vporttype is not None: vport = etree.Element("virtualport", type=self.vporttype) for p in self.vportparams: param = etree.Element("parameters") param.set(p['key'], p['value']) vport.append(param) dev.append(vport) if self.filtername is not None: filter = etree.Element("filterref", filter=self.filtername) for p in self.filterparams: filter.append(etree.Element("parameter", name=p['key'], value=p['value'])) dev.append(filter) return dev def add_filter_param(self, key, value): self.filterparams.append({'key': key, 'value': value}) def add_vport_param(self, key, value): self.vportparams.append({'key': key, 'value': value}) class LibvirtConfigGuestInput(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInput, self).__init__(root_name="input", **kwargs) self.type = "tablet" self.bus = "usb" def format_dom(self): dev = super(LibvirtConfigGuestInput, self).format_dom() dev.set("type", self.type) dev.set("bus", self.bus) return dev class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics", **kwargs) self.type = "vnc" self.autoport = True self.keymap = None self.listen = None def format_dom(self): dev = super(LibvirtConfigGuestGraphics, self).format_dom() dev.set("type", self.type) if self.autoport: dev.set("autoport", "yes") else: dev.set("autoport", "no") if self.keymap: dev.set("keymap", self.keymap) if self.listen: dev.set("listen", self.listen) return dev class LibvirtConfigGuestChar(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestChar, self).__init__(**kwargs) self.type = "pty" self.source_path = None self.target_port = None def format_dom(self): dev = super(LibvirtConfigGuestChar, self).format_dom() dev.set("type", self.type) if self.type == "file": dev.append(etree.Element("source", path=self.source_path)) if self.target_port is not None: dev.append(etree.Element("target", port=str(self.target_port))) return dev class LibvirtConfigGuestSerial(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestSerial, self).__init__(root_name="serial", **kwargs) class LibvirtConfigGuestConsole(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestConsole, self).__init__(root_name="console", **kwargs) class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuest, self).__init__(root_name="domain", **kwargs) self.virt_type = None self.uuid = None self.name = None self.memory = 1024 * 1024 * 500 self.vcpus = 1 self.cpu = None self.acpi = False self.clock = None self.os_type = None self.os_loader = None self.os_kernel = None self.os_initrd = None self.os_cmdline = None self.os_root = None self.os_init_path = None self.os_boot_dev = None self.devices = [] def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) root.append(self._text_node("name", self.name)) root.append(self._text_node("memory", self.memory)) root.append(self._text_node("vcpu", self.vcpus)) def _format_os(self, root): os = etree.Element("os") os.append(self._text_node("type", self.os_type)) if self.os_kernel is not None: os.append(self._text_node("kernel", self.os_kernel)) if self.os_loader is not None: os.append(self._text_node("loader", self.os_loader)) if self.os_initrd is not None: os.append(self._text_node("initrd", self.os_initrd)) if self.os_cmdline is not None: os.append(self._text_node("cmdline", self.os_cmdline)) if self.os_root is not None: os.append(self._text_node("root", self.os_root)) if self.os_init_path is not None: os.append(self._text_node("init", self.os_init_path)) if self.os_boot_dev is not None: os.append(etree.Element("boot", dev=self.os_boot_dev)) root.append(os) def _format_features(self, root): if self.acpi: features = etree.Element("features") features.append(etree.Element("acpi")) root.append(features) def _format_devices(self, root): if len(self.devices) == 0: return devices = etree.Element("devices") for dev in self.devices: devices.append(dev.format_dom()) root.append(devices) def format_dom(self): root = super(LibvirtConfigGuest, self).format_dom() root.set("type", self.virt_type) self._format_basic_props(root) self._format_os(root) self._format_features(root) if self.clock is not None: root.append(self.clock.format_dom()) if self.cpu is not None: root.append(self.cpu.format_dom()) self._format_devices(root) return root def add_device(self, dev): self.devices.append(dev) def set_clock(self, clk): self.clock = clk class LibvirtConfigGuestSnapshot(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSnapshot, self).__init__( root_name="domainsnapshot", **kwargs) self.name = None def format_dom(self): ss = super(LibvirtConfigGuestSnapshot, self).format_dom() ss.append(self._text_node("name", self.name)) return ss
apache-2.0
pombredanne/cobbler-3
cobbler/modules/manage_tftpd_py.py
14
3311
""" This is some of the code behind 'cobbler sync'. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import clogger from cexceptions import CX import templar import tftpgen from utils import _ def register(): """ The mandatory cobbler module registration hook. """ return "manage" class TftpdPyManager: def what(self): return "tftpd" def __init__(self, collection_mgr, logger): """ Constructor """ self.logger = logger if self.logger is None: self.logger = clogger.Logger() self.collection_mgr = collection_mgr self.templar = templar.Templar(collection_mgr) self.settings_file = "/etc/xinetd.d/tftp" def regen_hosts(self): pass # not used def write_dns_files(self): pass # not used def write_boot_files_distro(self, distro): """ Copy files in profile["boot_files"] into /tftpboot. Used for vmware currently. """ pass # not used. Handed by tftp.py def write_boot_files(self): """ Copy files in profile["boot_files"] into /tftpboot. Used for vmware currently. """ pass # not used. Handed by tftp.py def add_single_distro(self, distro): pass # not used def write_tftpd_files(self): """ xinetd files are written when manage_tftp is set in /var/lib/cobbler/settings. """ template_file = "/etc/cobbler/tftpd.template" try: f = open(template_file, "r") except: raise CX(_("error reading template %s") % template_file) template_data = "" template_data = f.read() f.close() metadata = { "user": "nobody", "binary": "/usr/sbin/tftpd.py", "args": "-v" } self.logger.info("generating %s" % self.settings_file) self.templar.render(template_data, metadata, self.settings_file, None) def sync(self, verbose=True): """ Write out files to /tftpdboot. Mostly unused for the python server """ self.logger.info("copying bootloaders") tftpgen.TFTPGen(self.collection_mgr, self.logger).copy_bootloaders() def update_netboot(self, name): """ Write out files to /tftpdboot. Unused for the python server """ pass def add_single_system(self, name): """ Write out files to /tftpdboot. Unused for the python server """ pass def get_manager(collection_mgr, logger): return TftpdPyManager(collection_mgr, logger)
gpl-2.0
facebookexperimental/eden
eden/hg-server/edenscm/mercurial/help.py
2
29688
# Portions Copyright (c) Facebook, Inc. and its affiliates. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2. # help.py - help data for mercurial # # Copyright 2006 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import itertools import os import textwrap from bindings import cliparser from . import ( cmdutil, encoding, error, extensions, filemerge, fileset, helptext, identity, minirst, pycompat, revset, templatefilters, templatekw, templater, util, ) from .hgweb import webcommands from .i18n import _, gettext _exclkeywords = { "(ADVANCED)", "(DEPRECATED)", "(EXPERIMENTAL)", # i18n: "(ADVANCED)" is a keyword, must be translated consistently _("(ADVANCED)"), # i18n: "(DEPRECATED)" is a keyword, must be translated consistently _("(DEPRECATED)"), # i18n: "(EXPERIMENTAL)" is a keyword, must be translated consistently _("(EXPERIMENTAL)"), } def listexts(header, exts, indent=1, showdeprecated=False): """return a text listing of the given extensions""" rst = [] if exts: for name, desc in sorted(pycompat.iteritems(exts)): if not showdeprecated and any(w in desc for w in _exclkeywords): continue rst.append("%s:%s: %s\n" % (" " * indent, name, desc)) if rst: rst.insert(0, "\n%s\n\n" % header) return rst def extshelp(ui): rst = loaddoc("extensions")(ui).splitlines(True) rst.extend( listexts(_("Enabled extensions:"), extensions.enabled(), showdeprecated=True) ) rst.extend(listexts(_("Disabled extensions:"), extensions.disabled())) doc = "".join(rst) return doc def optrst(header, options, verbose): data = [] multioccur = False for option in options: if len(option) == 5: shortopt, longopt, default, desc, optlabel = option else: shortopt, longopt, default, desc = option optlabel = _("VALUE") # default label if not verbose and any(w in desc for w in _exclkeywords): continue so = "" if shortopt: so = "-" + shortopt lo = "--" + longopt if default: # default is of unknown type, and in Python 2 we abused # the %s-shows-repr property to handle integers etc. To # match that behavior on Python 3, we do str(default) and # then convert it to bytes. desc += _(" (default: %s)") % pycompat.bytestr(default) if isinstance(default, list): lo += " %s [+]" % optlabel multioccur = True elif (default is not None) and not isinstance(default, bool): lo += " %s" % optlabel data.append((so, lo, desc)) if not data: return "" if multioccur: header += _(" ([+] can be repeated)") rst = ["\n%s:\n\n" % header] rst.extend(minirst.maketable(data, 1)) return "".join(rst) def indicateomitted(rst, omitted, notomitted=None): rst.append("\n\n.. container:: omitted\n\n %s\n\n" % omitted) if notomitted: rst.append("\n\n.. container:: notomitted\n\n %s\n\n" % notomitted) def filtercmd(ui, cmd, kw, doc): if not ui.debugflag and cmd.startswith("debug") and kw != "debug": return True if not ui.verbose and doc and any(w in doc for w in _exclkeywords): return True return False def topicmatch(ui, commands, kw): """Return help topics matching kw. Returns {'section': [(name, summary), ...], ...} where section is one of topics, commands, extensions, or extensioncommands. """ kw = encoding.lower(kw) def lowercontains(container): return kw in encoding.lower(container) # translated in helptable results = {"topics": [], "commands": [], "extensions": [], "extensioncommands": []} for names, header, doc in helptable: # Old extensions may use a str as doc. if ( sum(map(lowercontains, names)) or lowercontains(header) or (callable(doc) and lowercontains(doc(ui))) ): results["topics"].append((names[0], header)) for cmd, entry in pycompat.iteritems(commands.table): if len(entry) == 3: summary = entry[2] else: summary = "" # translate docs *before* searching there docs = _(pycompat.getdoc(entry[0])) or "" if kw in cmd or lowercontains(summary) or lowercontains(docs): doclines = docs.splitlines() if doclines: summary = doclines[0] cmdname = cmd.partition("|")[0].lstrip("^") if filtercmd(ui, cmdname, kw, docs): continue results["commands"].append((cmdname, summary)) for name, docs in itertools.chain( pycompat.iteritems(extensions.enabled(False)), pycompat.iteritems(extensions.disabled()), ): if not docs: continue name = name.rpartition(".")[-1] if lowercontains(name) or lowercontains(docs): # extension docs are already translated results["extensions"].append((name, docs.splitlines()[0])) try: mod = extensions.load(ui, name, "") except ImportError: # debug message would be printed in extensions.load() continue for cmd, entry in pycompat.iteritems(getattr(mod, "cmdtable", {})): if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])): cmdname = cmd.partition("|")[0].lstrip("^") cmddoc = pycompat.getdoc(entry[0]) if cmddoc: cmddoc = gettext(cmddoc).splitlines()[0] else: cmddoc = _("(no help text available)") if filtercmd(ui, cmdname, kw, cmddoc): continue results["extensioncommands"].append((cmdname, cmddoc)) return results def loaddoc(topic, subdir=None): """Return a delayed loader for help/topic.txt.""" def loader(ui): doc = gettext(getattr(helptext, topic)) for rewriter in helphooks.get(topic, []): doc = rewriter(ui, topic, doc) return doc return loader helptable = sorted( [ (["bundlespec"], _("Bundle File Formats"), loaddoc("bundlespec")), (["color"], _("Colorizing Outputs"), loaddoc("color")), (["config", "hgrc"], _("Configuration Files"), loaddoc("config")), (["dates"], _("Date Formats"), loaddoc("dates")), (["flags"], _("Command-line flags"), loaddoc("flags")), (["patterns"], _("Specifying Files by File Name Pattern"), loaddoc("patterns")), (["environment", "env"], _("Environment Variables"), loaddoc("environment")), ( ["revisions", "revs", "revsets", "revset", "multirevs", "mrevs"], _("Specifying Commits"), loaddoc("revisions"), ), ( ["filesets", "fileset"], _("Specifying Files by their Characteristics"), loaddoc("filesets"), ), (["diffs"], _("Diff Formats"), loaddoc("diffs")), ( ["merge-tools", "mergetools", "mergetool"], _("Merge Tools"), loaddoc("merge-tools"), ), ( ["templating", "templates", "template", "style"], _("Customizing Output with Templates"), loaddoc("templates"), ), (["urls"], _("URL Paths"), loaddoc("urls")), (["extensions"], _("Using Additional Features"), extshelp), (["hgweb"], _("Configuring hgweb"), loaddoc("hgweb")), (["glossary"], _("Common Terms"), loaddoc("glossary")), (["phases"], _("Working with Phases"), loaddoc("phases")), ( ["scripting"], _("Using Mercurial from scripts and automation"), loaddoc("scripting"), ), (["pager"], _("Pager Support"), loaddoc("pager")), ] ) # Maps topics with sub-topics to a list of their sub-topics. subtopics = {} # Map topics to lists of callable taking the current topic help and # returning the updated version helphooks = {} def addtopichook(topic, rewriter): helphooks.setdefault(topic, []).append(rewriter) def makeitemsdoc(ui, topic, doc, marker, items, dedent=False): """Extract docstring from the items key to function mapping, build a single documentation block and use it to overwrite the marker in doc. """ entries = [] seen = set() for name in sorted(items): # Hide private functions like "_all()". if name.startswith("_"): continue if items[name] in seen: continue seen.add(items[name]) text = (pycompat.getdoc(items[name]) or "").rstrip() if not text or not ui.verbose and any(w in text for w in _exclkeywords): continue text = gettext(text) if dedent: text = textwrap.dedent(text) lines = text.splitlines() doclines = [(lines[0])] for l in lines[1:]: # Stop once we find some Python doctest if l.strip().startswith(">>>"): break if dedent: doclines.append(l.rstrip()) else: doclines.append(" " + l.strip()) entries.append("\n".join(doclines)) entries = "\n\n".join(entries) return doc.replace(marker, entries) def makesubcmdlist(cmd, categories, subcommands, verbose, quiet): subcommandindex = {} for name, entry in subcommands.items(): for alias in cmdutil.parsealiases(name): subcommandindex[alias] = name def getsubcommandrst(name, alias=None): entry = subcommands[name] doc = pycompat.getdoc(entry[0]) or "" doc = gettext(doc) if not verbose and doc and any(w in doc for w in _exclkeywords): return [] if doc: doc = doc.splitlines()[0].rstrip() if not doc: doc = _("(no help text available)") aliases = cmdutil.parsealiases(name) if verbose: name = ", ".join(aliases) if len(entry) > 2: name = "%s %s" % (name, entry[2]) else: name = alias or aliases[0] return [" :%s: %s\n" % (name, doc)] rst = [] seen = set() if categories: for category, aliases in categories: categoryrst = [] for alias in aliases: name = subcommandindex.get(alias) if name: seen.add(name) categoryrst.extend(getsubcommandrst(name, alias)) if categoryrst: rst.append("\n%s:\n\n" % category) rst.extend(categoryrst) otherrst = [] for name in sorted(subcommands.keys()): if name not in seen: otherrst.extend(getsubcommandrst(name)) if otherrst: rst.append("\n%s:\n\n" % (_("Other Subcommands") if seen else _("Subcommands"))) rst.extend(otherrst) if not quiet: rst.append( _("\n(use 'hg help %s SUBCOMMAND' to show complete subcommand help)\n") % cmd ) return rst def addtopicsymbols(topic, marker, symbols, dedent=False): def add(ui, topic, doc): return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent) addtopichook(topic, add) addtopicsymbols( "bundlespec", ".. bundlecompressionmarker", util.bundlecompressiontopics() ) addtopicsymbols("filesets", ".. predicatesmarker", fileset.symbols) addtopicsymbols("merge-tools", ".. internaltoolsmarker", filemerge.internalsdoc) addtopicsymbols("revisions", ".. predicatesmarker", revset.symbols) addtopicsymbols("templates", ".. keywordsmarker", templatekw.keywords) addtopicsymbols("templates", ".. filtersmarker", templatefilters.filters) addtopicsymbols("templates", ".. functionsmarker", templater.funcs) addtopicsymbols("hgweb", ".. webcommandsmarker", webcommands.commands, dedent=True) helphomecommands = [ ("Get the latest commits from the server", ["pull"]), ("View commits", ["ssl", "show", "diff"]), ("Check out a commit", ["checkout"]), ( "Work with your checkout", ["status", "add", "remove", "forget", "revert", "purge", "shelve"], ), ("Commit changes and modify commits", ["commit", "amend", "metaedit"]), ("Rearrange commits", ["rebase", "graft", "hide", "unhide"]), ( "Work with stacks of commits", ["previous", "next", "split", "fold", "histedit", "absorb"], ), ("Undo changes", ["uncommit", "unamend", "undo", "redo"]), ("Other commands", ["config", "grep", "journal", "rage"]), ] helphometopics = {"revisions", "filesets", "glossary", "patterns", "templating"} class _helpdispatch(object): def __init__( self, ui, commands, unknowncmd=False, full=False, subtopic=None, **opts ): self.ui = ui self.commands = commands self.subtopic = subtopic self.unknowncmd = unknowncmd self.full = full self.opts = opts self.commandindex = {} for name, cmd in pycompat.iteritems(commands.table): for n in name.lstrip("^").split("|"): self.commandindex[n] = cmd def dispatch(self, name): queries = [] if self.unknowncmd: queries += [self.helpextcmd] if self.opts.get("extension"): queries += [self.helpext] if self.opts.get("command"): queries += [self.helpcmd] if not queries: queries = (self.helptopic, self.helpcmd, self.helpext, self.helpextcmd) for f in queries: try: return f(name, self.subtopic) except error.UnknownCommand: pass else: if self.unknowncmd: raise error.UnknownCommand(name) else: msg = _("no such help topic: %s") % name hint = _("try 'hg help --keyword %s'") % name raise error.Abort(msg, hint=hint) def helpcmd(self, name, subtopic=None): ui = self.ui try: # Try to expand 'name' as an alias resolvedargs = cliparser.expandargs( ui._rcfg._rcfg, list(self.commands.table), name.split(), False )[0] if name == "debug": raise cliparser.AmbiguousCommand() except cliparser.AmbiguousCommand: select = lambda c: c.lstrip("^").partition("|")[0].startswith(name) rst = self.helplist(name, select) return rst except cliparser.MalformedAlias as ex: raise error.Abort(ex.args[0]) if " ".join(resolvedargs) != name: self.ui.write(_("alias for: %s\n\n") % " ".join(resolvedargs)) # Try to print ":doc" from alias configs doc = ui.config("alias", "%s:doc" % name) if doc: self.ui.write("%s\n\n" % doc) # Continue with the resolved (non-alias) name name = " ".join(resolvedargs) try: cmd, args, aliases, entry, _level = cmdutil.findsubcmd( name.split(), self.commands.table, partial=True ) except error.AmbiguousCommand as inst: # py3k fix: except vars can't be used outside the scope of the # except block, nor can be used inside a lambda. python issue4617 prefix = inst.args[0] select = lambda c: c.lstrip("^").partition("|")[0].startswith(prefix) rst = self.helplist(name, select) return rst except error.UnknownSubcommand as inst: cmd, subcmd = inst.args[:2] msg = _("'%s' has no such subcommand: %s") % (cmd, subcmd) hint = _("run 'hg help %s' to see available subcommands") % cmd raise error.Abort(msg, hint=hint) rst = [] # check if it's an invalid alias and display its error if it is if getattr(entry[0], "badalias", None): rst.append(entry[0].badalias + "\n") if entry[0].unknowncmd: try: rst.extend(self.helpextcmd(entry[0].cmdname)) except error.UnknownCommand: pass return rst # synopsis if len(entry) > 2: if entry[2].startswith("hg"): rst.append("%s\n" % entry[2]) else: rst.append("%s %s %s\n" % (identity.prog, cmd, entry[2])) else: rst.append("%s %s\n" % (identity.prog, cmd)) # aliases # try to simplify aliases, ex. compress ['ab', 'abc', 'abcd', 'abcde'] # to ['ab', 'abcde'] slimaliases = [] sortedaliases = sorted(aliases) for i, alias in enumerate(sortedaliases): if slimaliases and i + 1 < len(aliases): nextalias = sortedaliases[i + 1] if nextalias.startswith(alias) and alias.startswith(slimaliases[-1]): # Skip this alias continue slimaliases.append(alias) slimaliases = set(slimaliases) if self.full and not self.ui.quiet and len(slimaliases) > 1: rst.append( _("\naliases: %s\n") % ", ".join(a for a in aliases[1:] if a in slimaliases) ) rst.append("\n") # description doc = gettext(pycompat.getdoc(entry[0])) if not doc: doc = _("(no help text available)") if util.safehasattr(entry[0], "definition"): # aliased command aliasdoc = "" if util.safehasattr(entry[0], "aliasdoc") and entry[0].aliasdoc is not None: lines = entry[0].aliasdoc.splitlines() if lines: aliasdoc = ( "\n".join(templater.unquotestring(l) for l in lines) + "\n\n" ) source = entry[0].source if entry[0].definition.startswith("!"): # shell alias doc = _("%sshell alias for::\n\n %s\n\ndefined by: %s\n") % ( aliasdoc, entry[0].definition[1:], source, ) else: doc = _("%salias for: hg %s\n\n%s\n\ndefined by: %s\n") % ( aliasdoc, entry[0].definition, doc, source, ) doc = doc.splitlines(True) if self.ui.quiet or not self.full: rst.append(doc[0]) else: rst.extend(doc) rst.append("\n") # check if this command shadows a non-trivial (multi-line) # extension help text try: mod = extensions.find(name) doc = gettext(pycompat.getdoc(mod)) or "" if "\n" in doc.strip(): msg = _("(use 'hg help -e %s' to show help for the %s extension)") % ( name, name, ) rst.append("\n%s\n" % msg) except KeyError: pass # options if not self.ui.quiet and entry[1]: rst.append(optrst(_("Options"), entry[1], self.ui.verbose)) if self.ui.verbose: rst.append( optrst(_("Global options"), self.commands.globalopts, self.ui.verbose) ) # subcommands if util.safehasattr(entry[0], "subcommands") and entry[0].subcommands: rst.extend( makesubcmdlist( cmd, entry[0].subcommandcategories, entry[0].subcommands, self.ui.verbose, self.ui.quiet, ) ) if not self.ui.verbose: if not self.full: rst.append(_("\n(use 'hg %s -h' to show more help)\n") % name) elif not self.ui.quiet: rst.append( _("\n(some details hidden, use --verbose to show complete help)") ) return rst def _helpcmddoc(self, cmd, doc): if util.safehasattr(cmd, "aliasdoc") and cmd.aliasdoc is not None: return gettext(templater.unquotestring(cmd.aliasdoc.splitlines()[0])) doc = gettext(doc) if doc: doc = doc.splitlines()[0].rstrip() if not doc: doc = _("(no help text available)") return doc def _helpcmditem(self, name): cmd = self.commandindex.get(name) if cmd is None: return None doc = self._helpcmddoc(cmd[0], pycompat.getdoc(cmd[0])) return " :%s: %s\n" % (name, doc) def helplist(self, name, select=None, **opts): h = {} cmds = {} for c, e in pycompat.iteritems(self.commands.table): if select and not select(c): continue f = c.lstrip("^").partition("|")[0] doc = pycompat.getdoc(e[0]) if filtercmd(self.ui, f, name, doc): continue h[f] = self._helpcmddoc(e[0], doc) cmds[f] = c.lstrip("^") rst = [] if not h: if not self.ui.quiet: rst.append(_("no commands defined\n")) return rst if not self.ui.quiet: if name == "debug": header = _("Debug commands (internal and unsupported):\n\n") else: header = _("Commands:\n\n") rst.append(header) fns = sorted(h) for f in fns: if self.ui.verbose: commacmds = cmds[f].replace("|", ", ") rst.append(" :%s: %s\n" % (commacmds, h[f])) else: rst.append(" :%s: %s\n" % (f, h[f])) return rst def helphome(self): rst = [ _("@LongProduct@\n"), "\n", "hg COMMAND [OPTIONS]\n", "\n", "These are some common Mercurial commands. Use 'hg help commands' to list all " "commands, and 'hg help COMMAND' to get help on a specific command.\n", "\n", ] for desc, commands in helphomecommands: sectionrst = [] for command in commands: cmdrst = self._helpcmditem(command) if cmdrst: sectionrst.append(cmdrst) if sectionrst: rst.append(desc + ":\n\n") rst.extend(sectionrst) rst.append("\n") topics = [] for names, header, doc in helptable: if names[0] in helphometopics: topics.append((names[0], header)) if topics: rst.append(_("\nAdditional help topics:\n\n")) for t, desc in topics: rst.append(" :%s: %s\n" % (t, desc.lower())) localhelp = self.ui.config("help", "localhelp") if localhelp: rst.append("\n") rst.append(localhelp) return rst def helptopic(self, name, subtopic=None): # Look for sub-topic entry first. header, doc = None, None if subtopic and name in subtopics: for names, header, doc in subtopics[name]: if subtopic in names: break if not header: for names, header, doc in helptable: if name in names: break else: raise error.UnknownCommand(name) rst = [minirst.section(header)] # description if not doc: rst.append(" %s\n" % _("(no help text available)")) if callable(doc): rst += [" %s\n" % l for l in doc(self.ui).splitlines()] if not self.ui.verbose: omitted = _("(some details hidden, use --verbose to show complete help)") indicateomitted(rst, omitted) try: cmdutil.findcmd(name, self.commands.table) rst.append( _("\nuse 'hg help -c %s' to see help for the %s command\n") % (name, name) ) except error.UnknownCommand: pass return rst def helpext(self, name, subtopic=None): try: mod = extensions.find(name) doc = gettext(pycompat.getdoc(mod)) or _("no help text available") except KeyError: mod = None doc = extensions.disabledext(name) if not doc: raise error.UnknownCommand(name) if "\n" not in doc: head, tail = doc, "" else: head, tail = doc.split("\n", 1) rst = [_("%s extension - %s\n\n") % (name.rpartition(".")[-1], head)] if tail: rst.extend(tail.splitlines(True)) rst.append("\n") if not self.ui.verbose: omitted = _("(some details hidden, use --verbose to show complete help)") indicateomitted(rst, omitted) if mod: try: ct = mod.cmdtable except AttributeError: ct = {} rst.extend(self.helplist(name, ct.__contains__)) else: rst.append( _( "(use 'hg help extensions' for information on enabling" " extensions)\n" ) ) return rst def helpextcmd(self, name, subtopic=None): cmd, ext, mod = extensions.disabledcmd(self.ui, name) doc = gettext(pycompat.getdoc(mod)) if doc is None: doc = _("(no help text available)") else: doc = doc.splitlines()[0] rst = listexts( _("'%s' is provided by the following extension:") % cmd, {ext: doc}, indent=4, showdeprecated=True, ) rst.append("\n") rst.append( _("(use 'hg help extensions' for information on enabling extensions)\n") ) return rst def help_(ui, commands, name, unknowncmd=False, full=True, subtopic=None, **opts): """ Generate the help for 'name' as unformatted restructured text. If 'name' is None, describe the commands available. """ dispatch = _helpdispatch(ui, commands, unknowncmd, full, subtopic, **opts) rst = [] kw = opts.get("keyword") if kw or name is None and any(opts[o] for o in opts): matches = topicmatch(ui, commands, name or "") helpareas = [] if opts.get("extension"): helpareas += [("extensions", _("Extensions"))] if opts.get("command"): helpareas += [("commands", _("Commands"))] if not helpareas: helpareas = [ ("topics", _("Topics")), ("commands", _("Commands")), ("extensions", _("Extensions")), ("extensioncommands", _("Extension Commands")), ] for t, title in helpareas: if matches[t]: rst.append("%s:\n\n" % title) rst.extend(minirst.maketable(sorted(matches[t]), 1)) rst.append("\n") if not rst: msg = _("no matches") hint = _("try 'hg help' for a list of topics") raise error.Abort(msg, hint=hint) elif name == "commands": if not ui.quiet: rst = [_("@LongProduct@\n"), "\n"] rst.extend(dispatch.helplist(None, None, **opts)) elif name: rst = dispatch.dispatch(name) else: rst = dispatch.helphome() return "".join(rst) def formattedhelp(ui, commands, name, keep=None, unknowncmd=False, full=True, **opts): """get help for a given topic (as a dotted name) as rendered rst Either returns the rendered help text or raises an exception. """ if keep is None: keep = [] else: keep = list(keep) # make a copy so we can mutate this later fullname = name section = None subtopic = None if name and "." in name: name, remaining = name.split(".", 1) remaining = encoding.lower(remaining) if "." in remaining: subtopic, section = remaining.split(".", 1) else: if name in subtopics: subtopic = remaining else: section = remaining textwidth = ui.configint("ui", "textwidth") termwidth = ui.termwidth() - 2 if textwidth <= 0 or termwidth < textwidth: textwidth = termwidth text = help_( ui, commands, name, subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts ) formatted, pruned = minirst.format(text, textwidth, keep=keep, section=section) # We could have been given a weird ".foo" section without a name # to look for, or we could have simply failed to found "foo.bar" # because bar isn't a section of foo if section and not (formatted and name): raise error.Abort(_("help section not found: %s") % fullname) if "verbose" in pruned: keep.append("omitted") else: keep.append("notomitted") formatted, pruned = minirst.format(text, textwidth, keep=keep, section=section) return formatted
gpl-2.0
CUCWD/edx-platform
pavelib/utils/process.py
16
3300
""" Helper functions for managing processes. """ from __future__ import print_function import atexit import os import signal import subprocess import sys import psutil from paver import tasks def kill_process(proc): """ Kill the process `proc` created with `subprocess`. """ p1_group = psutil.Process(proc.pid) child_pids = p1_group.get_children(recursive=True) for child_pid in child_pids: os.kill(child_pid.pid, signal.SIGKILL) def run_multi_processes(cmd_list, out_log=None, err_log=None): """ Run each shell command in `cmd_list` in a separate process, piping stdout to `out_log` (a path) and stderr to `err_log` (also a path). Terminates the processes on CTRL-C and ensures the processes are killed if an error occurs. """ kwargs = {'shell': True, 'cwd': None} pids = [] if out_log: out_log_file = open(out_log, 'w') kwargs['stdout'] = out_log_file if err_log: err_log_file = open(err_log, 'w') kwargs['stderr'] = err_log_file # If the user is performing a dry run of a task, then just log # the command strings and return so that no destructive operations # are performed. if tasks.environment.dry_run: for cmd in cmd_list: tasks.environment.info(cmd) return try: for cmd in cmd_list: pids.extend([subprocess.Popen(cmd, **kwargs)]) # pylint: disable=unused-argument def _signal_handler(*args): """ What to do when process is ended """ print("\nEnding...") signal.signal(signal.SIGINT, _signal_handler) print("Enter CTL-C to end") signal.pause() print("Processes ending") # pylint: disable=broad-except except Exception as err: print("Error running process {}".format(err), file=sys.stderr) finally: for pid in pids: kill_process(pid) def run_process(cmd, out_log=None, err_log=None): """ Run the shell command `cmd` in a separate process, piping stdout to `out_log` (a path) and stderr to `err_log` (also a path). Terminates the process on CTRL-C or if an error occurs. """ return run_multi_processes([cmd], out_log=out_log, err_log=err_log) def run_background_process(cmd, out_log=None, err_log=None, cwd=None): """ Runs a command as a background process. Sends SIGINT at exit. """ kwargs = {'shell': True, 'cwd': cwd} if out_log: out_log_file = open(out_log, 'w') kwargs['stdout'] = out_log_file if err_log: err_log_file = open(err_log, 'w') kwargs['stderr'] = err_log_file proc = subprocess.Popen(cmd, **kwargs) def exit_handler(): """ Send SIGINT to the process's children. This is important for running commands under coverage, as coverage will not produce the correct artifacts if the child process isn't killed properly. """ p1_group = psutil.Process(proc.pid) child_pids = p1_group.get_children(recursive=True) for child_pid in child_pids: os.kill(child_pid.pid, signal.SIGINT) # Wait for process to actually finish proc.wait() atexit.register(exit_handler)
agpl-3.0
Learningtribes/edx-platform
common/lib/xmodule/xmodule/assetstore/tests/test_asset_xml.py
113
3631
""" Test for asset XML generation / parsing. """ from path import Path as path from lxml import etree from contracts import ContractNotRespected import unittest from opaque_keys.edx.locator import CourseLocator from xmodule.assetstore import AssetMetadata from xmodule.modulestore.tests.test_assetstore import AssetStoreTestData class TestAssetXml(unittest.TestCase): """ Tests for storing/querying course asset metadata. """ def setUp(self): super(TestAssetXml, self).setUp() xsd_filename = "assets.xsd" self.course_id = CourseLocator('org1', 'course1', 'run1') self.course_assets = [] for asset in AssetStoreTestData.all_asset_data: asset_dict = dict(zip(AssetStoreTestData.asset_fields[1:], asset[1:])) asset_md = AssetMetadata(self.course_id.make_asset_key('asset', asset[0]), **asset_dict) self.course_assets.append(asset_md) # Read in the XML schema definition and make a validator. xsd_path = path(__file__).realpath().parent / xsd_filename with open(xsd_path, 'r') as f: schema_root = etree.XML(f.read()) schema = etree.XMLSchema(schema_root) self.xmlparser = etree.XMLParser(schema=schema) def test_export_single_asset_to_from_xml(self): """ Export a single AssetMetadata to XML and verify the structure and fields. """ asset_md = self.course_assets[0] root = etree.Element("assets") asset = etree.SubElement(root, "asset") asset_md.to_xml(asset) # If this line does *not* raise, the XML is valid. etree.fromstring(etree.tostring(root), self.xmlparser) new_asset_key = self.course_id.make_asset_key('tmp', 'tmp') new_asset_md = AssetMetadata(new_asset_key) new_asset_md.from_xml(asset) # Compare asset_md to new_asset_md. for attr in AssetMetadata.XML_ATTRS: if attr in AssetMetadata.XML_ONLY_ATTRS: continue orig_value = getattr(asset_md, attr) new_value = getattr(new_asset_md, attr) self.assertEqual(orig_value, new_value) def test_export_with_None_value(self): """ Export and import a single AssetMetadata to XML with a None created_by field, without causing an exception. """ asset_md = AssetMetadata( self.course_id.make_asset_key('asset', 'none_value'), created_by=None, ) asset = etree.Element("asset") asset_md.to_xml(asset) asset_md.from_xml(asset) def test_export_all_assets_to_xml(self): """ Export all AssetMetadatas to XML and verify the structure and fields. """ root = etree.Element("assets") AssetMetadata.add_all_assets_as_xml(root, self.course_assets) # If this line does *not* raise, the XML is valid. etree.fromstring(etree.tostring(root), self.xmlparser) def test_wrong_node_type_all(self): """ Ensure full asset sections with the wrong tag are detected. """ root = etree.Element("glassets") with self.assertRaises(ContractNotRespected): AssetMetadata.add_all_assets_as_xml(root, self.course_assets) def test_wrong_node_type_single(self): """ Ensure single asset blocks with the wrong tag are detected. """ asset_md = self.course_assets[0] root = etree.Element("assets") asset = etree.SubElement(root, "smashset") with self.assertRaises(ContractNotRespected): asset_md.to_xml(asset)
agpl-3.0
eneldoserrata/marcos_openerp
addons/auth_oauth/res_config.py
118
3117
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp.osv import osv, fields import logging _logger = logging.getLogger(__name__) class base_config_settings(osv.TransientModel): _inherit = 'base.config.settings' _columns = { 'auth_oauth_google_enabled' : fields.boolean('Allow users to sign in with Google'), 'auth_oauth_google_client_id' : fields.char('Client ID'), 'auth_oauth_facebook_enabled' : fields.boolean('Allow users to sign in with Facebook'), 'auth_oauth_facebook_client_id' : fields.char('Client ID'), } def get_oauth_providers(self, cr, uid, fields, context=None): google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1] facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1] rg = self.pool.get('auth.oauth.provider').read(cr, uid, [google_id], ['enabled','client_id'], context=context) rf = self.pool.get('auth.oauth.provider').read(cr, uid, [facebook_id], ['enabled','client_id'], context=context) return { 'auth_oauth_google_enabled': rg[0]['enabled'], 'auth_oauth_google_client_id': rg[0]['client_id'], 'auth_oauth_facebook_enabled': rf[0]['enabled'], 'auth_oauth_facebook_client_id': rf[0]['client_id'], } def set_oauth_providers(self, cr, uid, ids, context=None): google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1] facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1] config = self.browse(cr, uid, ids[0], context=context) rg = { 'enabled':config.auth_oauth_google_enabled, 'client_id':config.auth_oauth_google_client_id, } rf = { 'enabled':config.auth_oauth_facebook_enabled, 'client_id':config.auth_oauth_facebook_client_id, } self.pool.get('auth.oauth.provider').write(cr, uid, [google_id], rg) self.pool.get('auth.oauth.provider').write(cr, uid, [facebook_id], rf)
agpl-3.0
kawamon/hue
desktop/core/ext-py/pytest-4.6.11/testing/io/test_saferepr.py
4
1547
# -*- coding: utf-8 -*- from _pytest._io.saferepr import saferepr def test_simple_repr(): assert saferepr(1) == "1" assert saferepr(None) == "None" def test_maxsize(): s = saferepr("x" * 50, maxsize=25) assert len(s) == 25 expected = repr("x" * 10 + "..." + "x" * 10) assert s == expected def test_maxsize_error_on_instance(): class A: def __repr__(): raise ValueError("...") s = saferepr(("*" * 50, A()), maxsize=25) assert len(s) == 25 assert s[0] == "(" and s[-1] == ")" def test_exceptions(): class BrokenRepr: def __init__(self, ex): self.ex = ex def __repr__(self): raise self.ex class BrokenReprException(Exception): __str__ = None __repr__ = None assert "Exception" in saferepr(BrokenRepr(Exception("broken"))) s = saferepr(BrokenReprException("really broken")) assert "TypeError" in s assert "TypeError" in saferepr(BrokenRepr("string")) s2 = saferepr(BrokenRepr(BrokenReprException("omg even worse"))) assert "NameError" not in s2 assert "unknown" in s2 def test_big_repr(): from _pytest._io.saferepr import SafeRepr assert len(saferepr(range(1000))) <= len("[" + SafeRepr().maxlist * "1000" + "]") def test_repr_on_newstyle(): class Function(object): def __repr__(self): return "<%s>" % (self.name) assert saferepr(Function()) def test_unicode(): val = u"£€" reprval = u"'£€'" assert saferepr(val) == reprval
apache-2.0
Tesora-Release/tesora-horizon
openstack_dashboard/contrib/trove/content/databases/views.py
1
17864
# Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing database instances. """ from collections import OrderedDict import logging from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ import six from horizon import exceptions from horizon import forms as horizon_forms from horizon import tables as horizon_tables from horizon import tabs as horizon_tabs from horizon import workflows as horizon_workflows from horizon.utils import memoized from openstack_dashboard.contrib.trove import api from openstack_dashboard.contrib.trove.content.databases import db_capability from openstack_dashboard.contrib.trove.content.databases import forms from openstack_dashboard.contrib.trove.content.databases import tables from openstack_dashboard.contrib.trove.content.databases import tabs from openstack_dashboard.contrib.trove.content.databases import workflows from openstack_dashboard.dashboards.project.instances \ import utils as instance_utils LOG = logging.getLogger(__name__) class IndexView(horizon_tables.DataTableView): table_class = tables.InstancesTable template_name = 'project/databases/index.html' page_title = _("Instances") def has_more_data(self, table): return self._more @memoized.memoized_method def get_flavors(self): try: flavors = api.trove.flavor_list(self.request) except Exception: flavors = [] msg = _('Unable to retrieve database size information.') exceptions.handle(self.request, msg) return OrderedDict((six.text_type(flavor.id), flavor) for flavor in flavors) def _extra_data(self, instance): flavor = self.get_flavors().get(instance.flavor["id"]) if flavor is not None: instance.full_flavor = flavor instance.host = tables.get_host(instance) return instance def get_data(self): marker = self.request.GET.get( tables.InstancesTable._meta.pagination_param) # Gather our instances try: instances = api.trove.instance_list(self.request, marker=marker) self._more = instances.next or False except Exception: self._more = False instances = [] msg = _('Unable to retrieve database instances.') exceptions.handle(self.request, msg) map(self._extra_data, instances) return instances class LaunchInstanceView(horizon_workflows.WorkflowView): workflow_class = workflows.LaunchInstance template_name = "project/databases/launch.html" page_title = _("Launch Database") def get_initial(self): initial = super(LaunchInstanceView, self).get_initial() initial['project_id'] = self.request.user.project_id initial['user_id'] = self.request.user.id return initial class AttachConfigurationView(horizon_forms.ModalFormView): form_class = forms.AttachConfigurationForm template_name = 'project/databases/attach_config.html' success_url = reverse_lazy('horizon:project:databases:index') @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: return api.trove.instance_get(self.request, instance_id) except Exception: msg = _('Unable to retrieve instance details.') redirect = reverse('horizon:project:databases:index') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(AttachConfigurationView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context def get_initial(self): instance = self.get_object() return {'instance_id': self.kwargs['instance_id'], 'orig_size': instance.volume.get('size', 0), 'datastore': instance.datastore.get('type', ''), 'datastore_version': instance.datastore.get('version', '')} class DBAccess(object): def __init__(self, name, access): self.name = name self.access = access class CreateUserView(horizon_forms.ModalFormView): form_class = forms.CreateUserForm template_name = 'project/databases/create_user.html' success_url = 'horizon:project:databases:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['instance_id'],)) def get_context_data(self, **kwargs): context = super(CreateUserView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context def get_initial(self): instance_id = self.kwargs['instance_id'] return {'instance_id': instance_id} class EditUserView(horizon_forms.ModalFormView): form_class = forms.EditUserForm template_name = 'project/databases/edit_user.html' success_url = 'horizon:project:databases:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['instance_id'],)) def get_context_data(self, **kwargs): context = super(EditUserView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] context['user_name'] = self.kwargs['user_name'] context['user_host'] = self.kwargs['user_host'] return context def get_initial(self): instance_id = self.kwargs['instance_id'] user_name = self.kwargs['user_name'] user_host = self.kwargs['user_host'] return {'instance_id': instance_id, 'user_name': user_name, 'user_host': user_host} class AccessDetailView(horizon_tables.DataTableView): table_class = tables.AccessTable template_name = 'project/databases/access_detail.html' page_title = _("Database Access for: {{ user_name }}") @memoized.memoized_method def _get_data(self): instance_id = self.kwargs['instance_id'] user_name = self.kwargs['user_name'] user_host = self.kwargs['user_host'] try: databases = api.trove.database_list(self.request, instance_id) except Exception: redirect = reverse('horizon:project:databases:detail', args=[instance_id]) exceptions.handle(self.request, _('Unable to retrieve databases.'), redirect=redirect) try: instance = api.trove.instance_get(self.request, instance_id) username = db_capability.get_fully_qualified_username( instance.datastore['type'], user_name, user_host) granted = api.trove.user_show_access( self.request, instance_id, username) except Exception: redirect = reverse('horizon:project:databases:detail', args=[instance_id]) exceptions.handle(self.request, _('Unable to retrieve accessible databases.'), redirect=redirect) db_access_list = [] for database in databases: if database in granted: access = True else: access = False db_access = DBAccess(database.name, access) db_access_list.append(db_access) return db_access_list def get_data(self): data = self._get_data() if data is None: return [] return sorted(data, key=lambda data: (data.name)) def get_context_data(self, **kwargs): context = super(AccessDetailView, self).get_context_data(**kwargs) context["db_access"] = self._get_data() return context class DetailView(horizon_tabs.TabbedTableView): tab_group_class = tabs.InstanceDetailTabs template_name = 'project/databases/detail.html' page_title = _("Instance Details: {{ instance.name }}") def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) instance = self.get_data() table = tables.InstancesTable(self.request) context["instance"] = instance context["url"] = self.get_redirect_url() context["actions"] = table.render_row_actions(instance) return context @memoized.memoized_method def get_data(self): try: LOG.info("Obtaining instance for detailed view ") instance_id = self.kwargs['instance_id'] instance = api.trove.instance_get(self.request, instance_id) instance.host = tables.get_host(instance) except Exception: msg = _('Unable to retrieve details ' 'for database instance: %s') % instance_id exceptions.handle(self.request, msg, redirect=self.get_redirect_url()) try: instance.full_flavor = api.trove.flavor_get( self.request, instance.flavor["id"]) except Exception: LOG.error('Unable to retrieve flavor details' ' for database instance: %s' % instance_id) return instance def get_tabs(self, request, *args, **kwargs): instance = self.get_data() return self.tab_group_class(request, instance=instance, **kwargs) @staticmethod def get_redirect_url(): return reverse('horizon:project:databases:index') class CreateDatabaseView(horizon_forms.ModalFormView): form_class = forms.CreateDatabaseForm template_name = 'project/databases/create_database.html' success_url = 'horizon:project:databases:detail' def get_success_url(self): return reverse(self.success_url, args=(self.kwargs['instance_id'],)) def get_context_data(self, **kwargs): context = super(CreateDatabaseView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context def get_initial(self): instance_id = self.kwargs['instance_id'] return {'instance_id': instance_id} class ResizeVolumeView(horizon_forms.ModalFormView): form_class = forms.ResizeVolumeForm template_name = 'project/databases/resize_volume.html' success_url = reverse_lazy('horizon:project:databases:index') page_title = _("Resize Database Volume") @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: return api.trove.instance_get(self.request, instance_id) except Exception: msg = _('Unable to retrieve instance details.') redirect = reverse('horizon:project:databases:index') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(ResizeVolumeView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context def get_initial(self): instance = self.get_object() return {'instance_id': self.kwargs['instance_id'], 'orig_size': instance.volume.get('size', 0)} class ResizeInstanceView(horizon_forms.ModalFormView): form_class = forms.ResizeInstanceForm template_name = 'project/databases/resize_instance.html' success_url = reverse_lazy('horizon:project:databases:index') page_title = _("Resize Database Instance") @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: instance = api.trove.instance_get(self.request, instance_id) flavor_id = instance.flavor['id'] flavors = {} for i, j in self.get_flavors(): flavors[str(i)] = j if flavor_id in flavors: instance.flavor_name = flavors[flavor_id] else: flavor = api.trove.flavor_get(self.request, flavor_id) instance.flavor_name = flavor.name return instance except Exception: redirect = reverse('horizon:project:databases:index') msg = _('Unable to retrieve instance details.') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = super(ResizeInstanceView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context @memoized.memoized_method def get_flavors(self, *args, **kwargs): try: flavors = api.trove.flavor_list(self.request) return instance_utils.sort_flavor_list(self.request, flavors) except Exception: redirect = reverse("horizon:project:databases:index") exceptions.handle(self.request, _('Unable to retrieve flavors.'), redirect=redirect) def get_initial(self): initial = super(ResizeInstanceView, self).get_initial() obj = self.get_object() if obj: initial.update({'instance_id': self.kwargs['instance_id'], 'old_flavor_id': obj.flavor['id'], 'old_flavor_name': getattr(obj, 'flavor_name', ''), 'flavors': self.get_flavors()}) return initial class PromoteToReplicaSourceView(horizon_forms.ModalFormView): form_class = forms.PromoteToReplicaSourceForm template_name = 'project/databases/promote_to_replica_source.html' success_url = reverse_lazy('horizon:project:databases:index') @memoized.memoized_method def get_object(self, *args, **kwargs): instance_id = self.kwargs['instance_id'] try: replica = api.trove.instance_get(self.request, instance_id) replica_source = api.trove.instance_get(self.request, replica.replica_of['id']) instances = {'replica': replica, 'replica_source': replica_source} return instances except Exception: msg = _('Unable to retrieve instance details.') redirect = reverse('horizon:project:databases:index') exceptions.handle(self.request, msg, redirect=redirect) def get_context_data(self, **kwargs): context = \ super(PromoteToReplicaSourceView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] context['replica'] = self.get_initial().get('replica') context['replica'].ip = \ self.get_initial().get('replica').ip[0] context['replica_source'] = self.get_initial().get('replica_source') context['replica_source'].ip = \ self.get_initial().get('replica_source').ip[0] return context def get_initial(self): instances = self.get_object() return {'instance_id': self.kwargs['instance_id'], 'replica': instances['replica'], 'replica_source': instances['replica_source']} class EnableRootInfo(object): def __init__(self, instance_id, instance_name, enabled, password=None): self.id = instance_id self.name = instance_name self.enabled = enabled self.password = password class ManageRootView(horizon_tables.DataTableView): table_class = tables.ManageRootTable template_name = 'project/databases/manage_root.html' page_title = _("Manage Root Access") @memoized.memoized_method def get_data(self): instance_id = self.kwargs['instance_id'] try: instance = api.trove.instance_get(self.request, instance_id) except Exception: redirect = reverse('horizon:project:databases:detail', args=[instance_id]) exceptions.handle(self.request, _('Unable to retrieve instance details.'), redirect=redirect) try: enabled = api.trove.root_show(self.request, instance_id) except Exception: redirect = reverse('horizon:project:databases:detail', args=[instance_id]) exceptions.handle(self.request, _('Unable to determine if instance root ' 'is enabled.'), redirect=redirect) root_enabled_list = [] root_enabled_info = EnableRootInfo(instance.id, instance.name, enabled.rootEnabled) root_enabled_list.append(root_enabled_info) return root_enabled_list def get_context_data(self, **kwargs): context = super(ManageRootView, self).get_context_data(**kwargs) context['instance_id'] = self.kwargs['instance_id'] return context
apache-2.0
celiafish/scikit-xray
doc/sphinxext/tests/test_docscrape.py
12
14257
# -*- encoding:utf-8 -*- import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from docscrape import NumpyDocString, FunctionDoc, ClassDoc from docscrape_sphinx import SphinxDocString, SphinxClassDoc from nose.tools import * doc_txt = '''\ numpy.multivariate_normal(mean, cov, shape=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N,N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. Warnings -------- Certain warnings apply. Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. See Also -------- some, other, funcs otherfunc : relationship Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] .. index:: random :refguide: random;distributions, random;gauss ''' doc = NumpyDocString(doc_txt) def test_signature(): assert doc['Signature'].startswith('numpy.multivariate_normal(') assert doc['Signature'].endswith('shape=None)') def test_summary(): assert doc['Summary'][0].startswith('Draw values') assert doc['Summary'][-1].endswith('covariance.') def test_extended_summary(): assert doc['Extended Summary'][0].startswith('The multivariate normal') def test_parameters(): assert_equal(len(doc['Parameters']), 3) assert_equal( [n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape']) arg, arg_type, desc = doc['Parameters'][1] assert_equal(arg_type, '(N,N) ndarray') assert desc[0].startswith('Covariance matrix') assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' def test_returns(): assert_equal(len(doc['Returns']), 1) arg, arg_type, desc = doc['Returns'][0] assert_equal(arg, 'out') assert_equal(arg_type, 'ndarray') assert desc[0].startswith('The drawn samples') assert desc[-1].endswith('distribution.') def test_notes(): assert doc['Notes'][0].startswith('Instead') assert doc['Notes'][-1].endswith('definite.') assert_equal(len(doc['Notes']), 17) def test_references(): assert doc['References'][0].startswith('..') assert doc['References'][-1].endswith('2001.') def test_examples(): assert doc['Examples'][0].startswith('>>>') assert doc['Examples'][-1].endswith('True]') def test_index(): assert_equal(doc['index']['default'], 'random') print(doc['index']) assert_equal(len(doc['index']), 2) assert_equal(len(doc['index']['refguide']), 2) def non_blank_line_by_line_compare(a, b): a = [l for l in a.split('\n') if l.strip()] b = [l for l in b.split('\n') if l.strip()] for n, line in enumerate(a): if not line == b[n]: raise AssertionError("Lines %s of a and b differ: " "\n>>> %s\n<<< %s\n" % (n, line, b[n])) def test_str(): non_blank_line_by_line_compare(str(doc), """numpy.multivariate_normal(mean, cov, shape=None) Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. Parameters ---------- mean : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 cov : (N,N) ndarray Covariance matrix of the distribution. shape : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). Returns ------- out : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. Warnings -------- Certain warnings apply. See Also -------- `some`_, `other`_, `funcs`_ `otherfunc`_ relationship Notes ----- Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. References ---------- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. Examples -------- >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] .. index:: random :refguide: random;distributions, random;gauss""") def test_sphinx_str(): sphinx_doc = SphinxDocString(doc_txt) non_blank_line_by_line_compare(str(sphinx_doc), """ .. index:: random single: random;distributions, random;gauss Draw values from a multivariate normal distribution with specified mean and covariance. The multivariate normal or Gaussian distribution is a generalisation of the one-dimensional normal distribution to higher dimensions. :Parameters: **mean** : (N,) ndarray Mean of the N-dimensional distribution. .. math:: (1+2+3)/3 **cov** : (N,N) ndarray Covariance matrix of the distribution. **shape** : tuple of ints Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). :Returns: **out** : ndarray The drawn samples, arranged according to `shape`. If the shape given is (m,n,...), then the shape of `out` is is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution. .. warning:: Certain warnings apply. .. seealso:: :obj:`some`, :obj:`other`, :obj:`funcs` :obj:`otherfunc` relationship .. rubric:: Notes Instead of specifying the full covariance matrix, popular approximations include: - Spherical covariance (`cov` is a multiple of the identity matrix) - Diagonal covariance (`cov` has non-negative elements only on the diagonal) This geometrical property can be seen in two dimensions by plotting generated data-points: >>> mean = [0,0] >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis >>> x,y = multivariate_normal(mean,cov,5000).T >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() Note that the covariance matrix must be symmetric and non-negative definite. .. rubric:: References .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic Processes," 3rd ed., McGraw-Hill Companies, 1991 .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 2nd ed., Wiley, 2001. .. only:: latex [1]_, [2]_ .. rubric:: Examples >>> mean = (1,2) >>> cov = [[1,0],[1,0]] >>> x = multivariate_normal(mean,cov,(3,3)) >>> print(x.shape) (3, 3, 2) The following is probably true, given that 0.6 is roughly twice the standard deviation: >>> print(list( (x[0,0,:] - mean) < 0.6 )) [True, True] """) doc2 = NumpyDocString(""" Returns array of indices of the maximum values of along the given axis. Parameters ---------- a : {array_like} Array to look in. axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis""") def test_parameters_without_extended_description(): assert_equal(len(doc2['Parameters']), 2) doc3 = NumpyDocString(""" my_signature(*params, **kwds) Return this and that. """) def test_escape_stars(): signature = str(doc3).split('\n')[0] assert_equal(signature, 'my_signature(\*params, \*\*kwds)') doc4 = NumpyDocString( """a.conj() Return an array with all complex-valued elements conjugated.""") def test_empty_extended_summary(): assert_equal(doc4['Extended Summary'], []) doc5 = NumpyDocString( """ a.something() Raises ------ LinAlgException If array is singular. """) def test_raises(): assert_equal(len(doc5['Raises']), 1) name, _, desc = doc5['Raises'][0] assert_equal(name, 'LinAlgException') assert_equal(desc, ['If array is singular.']) def test_see_also(): doc6 = NumpyDocString( """ z(x,theta) See Also -------- func_a, func_b, func_c func_d : some equivalent func foo.func_e : some other func over multiple lines func_f, func_g, :meth:`func_h`, func_j, func_k :obj:`baz.obj_q` :class:`class_j`: fubar foobar """) assert len(doc6['See Also']) == 12 for func, desc, role in doc6['See Also']: if func in ('func_a', 'func_b', 'func_c', 'func_f', 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): assert(not desc) else: assert(desc) if func == 'func_h': assert role == 'meth' elif func == 'baz.obj_q': assert role == 'obj' elif func == 'class_j': assert role == 'class' else: assert role is None if func == 'func_d': assert desc == ['some equivalent func'] elif func == 'foo.func_e': assert desc == ['some other func over', 'multiple lines'] elif func == 'class_j': assert desc == ['fubar', 'foobar'] def test_see_also_print(): class Dummy(object): """ See Also -------- func_a, func_b func_c : some relationship goes here func_d """ pass obj = Dummy() s = str(FunctionDoc(obj, role='func')) assert(':func:`func_a`, :func:`func_b`' in s) assert(' some relationship' in s) assert(':func:`func_d`' in s) doc7 = NumpyDocString(""" Doc starts on second line. """) def test_empty_first_line(): assert doc7['Summary'][0].startswith('Doc starts') def test_no_summary(): str(SphinxDocString(""" Parameters ----------""")) def test_unicode(): doc = SphinxDocString(""" öäöäöäöäöåååå öäöäöäööäååå Parameters ---------- ååå : äää ööö Returns ------- ååå : ööö äää """) assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') def test_plot_examples(): cfg = dict(use_plots=True) doc = SphinxDocString(""" Examples -------- >>> import matplotlib.pyplot as plt >>> plt.plot([1,2,3],[4,5,6]) >>> plt.show() """, config=cfg) assert 'plot::' in str(doc), str(doc) doc = SphinxDocString(""" Examples -------- .. plot:: import matplotlib.pyplot as plt plt.plot([1,2,3],[4,5,6]) plt.show() """, config=cfg) assert str(doc).count('plot::') == 1, str(doc) def test_class_members(): class Dummy(object): """ Dummy class. """ def spam(self, a, b): """Spam\n\nSpam spam.""" pass def ham(self, c, d): """Cheese\n\nNo cheese.""" pass for cls in (ClassDoc, SphinxClassDoc): doc = cls(Dummy, config=dict(show_class_members=False)) assert 'Methods' not in str(doc), (cls, str(doc)) assert 'spam' not in str(doc), (cls, str(doc)) assert 'ham' not in str(doc), (cls, str(doc)) doc = cls(Dummy, config=dict(show_class_members=True)) assert 'Methods' in str(doc), (cls, str(doc)) assert 'spam' in str(doc), (cls, str(doc)) assert 'ham' in str(doc), (cls, str(doc)) if cls is SphinxClassDoc: assert '.. autosummary::' in str(doc), str(doc)
bsd-3-clause
rodorad/spark-tk
python/sparktk/frame/ops/topk.py
14
5872
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def top_k(self, column_name, k, weight_column=None): """ Most or least frequent column values. Parameters ---------- :param column_name: (str) The column whose top (or bottom) K distinct values are to be calculated. :param k: (int) Number of entries to return (If k is negative, return bottom k). :param weight_column: (Optional[str]) The column that provides weights (frequencies) for the topK calculation. Must contain numerical data. Default is 1 for all items. Calculate the top (or bottom) K distinct values by count of a column. The column can be weighted. All data elements of weight <= 0 are excluded from the calculation, as are all data elements whose weight is NaN or infinite. If there are no data elements of finite weight > 0, then topK is empty. Examples -------- For this example, we calculate the top 2 counties in a data frame: <hide> >>> frame = tc.frame.create([[1, "Portland", 609456, 583776, "4.40%", "Multnomah" ], ... [2, "Salem", 160614, 154637, "3.87%", "Marion" ], ... [3, "Eugene", 159190, 156185, "1.92%", "Lane" ], ... [4, "Gresham", 109397, 105594, "3.60%", "Multnomah" ], ... [5, "Hillsboro", 97368, 91611, "6.28%", "Washington" ], ... [6, "Beaverton", 93542, 89803, "4.16%", "Washington" ], ... [15, "Grants Pass", 35076, 34533, "1.57%", "Josephine" ], ... [16, "Oregon City", 34622, 31859, "8.67%", "Clackamas" ], ... [17, "McMinnville", 33131, 32187, "2.93%", "Yamhill" ], ... [18, "Redmond", 27427, 26215, "4.62%", "Deschutes" ], ... [19, "Tualatin", 26879, 26054, "4.17%", "Washington" ], ... [20, "West Linn", 25992, 25109, "3.52%", "Clackamas" ], ... [7, "Bend", 81236, 76639, "6.00%", "Deschutes" ], ... [8, "Medford", 77677, 74907, "3.70%", "Jackson" ], ... [9, "Springfield", 60177, 59403, "1.30%", "Lane" ], ... [10, "Corvallis", 55298, 54462, "1.54%", "Benton" ], ... [11, "Albany", 51583, 50158, "2.84%", "Linn" ], ... [12, "Tigard", 50444, 48035, "5.02%", "Washington" ], ... [13, "Lake Oswego", 37610, 36619, "2.71%", "Clackamas" ], ... [14, "Keizer", 37064,36478, "1.61%", "Marion" ]], ... [('rank', int), ('city', str), ('population_2013', int), ('population_2010',int), ('change',str), ('county',str)]) -etc- </hide> Consider the following frame: >>> frame.inspect(frame.count()) [##] rank city population_2013 population_2010 change county ============================================================================= [0] 1 Portland 609456 583776 4.40% Multnomah [1] 2 Salem 160614 154637 3.87% Marion [2] 3 Eugene 159190 156185 1.92% Lane [3] 4 Gresham 109397 105594 3.60% Multnomah [4] 5 Hillsboro 97368 91611 6.28% Washington [5] 6 Beaverton 93542 89803 4.16% Washington [6] 15 Grants Pass 35076 34533 1.57% Josephine [7] 16 Oregon City 34622 31859 8.67% Clackamas [8] 17 McMinnville 33131 32187 2.93% Yamhill [9] 18 Redmond 27427 26215 4.62% Deschutes [10] 19 Tualatin 26879 26054 4.17% Washington [11] 20 West Linn 25992 25109 3.52% Clackamas [12] 7 Bend 81236 76639 6.00% Deschutes [13] 8 Medford 77677 74907 3.70% Jackson [14] 9 Springfield 60177 59403 1.30% Lane [15] 10 Corvallis 55298 54462 1.54% Benton [16] 11 Albany 51583 50158 2.84% Linn [17] 12 Tigard 50444 48035 5.02% Washington [18] 13 Lake Oswego 37610 36619 2.71% Clackamas [19] 14 Keizer 37064 36478 1.61% Marion >>> top_frame = frame.top_k("county", 2) <progress> >>> top_frame.inspect() [#] county count ====================== [0] Washington 4.0 [1] Clackamas 3.0 """ from sparktk.frame.frame import Frame return Frame(self._tc, self._scala.topK(column_name, k, self._tc.jutils.convert.to_scala_option(weight_column)))
apache-2.0
kvar/ansible
lib/ansible/modules/monitoring/sensu_silence.py
52
8505
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Steven Bambling <smbambling@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: sensu_silence version_added: "2.4" author: Steven Bambling (@smbambling) short_description: Manage Sensu silence entries description: - Create and clear (delete) a silence entries via the Sensu API for subscriptions and checks. options: check: description: - Specifies the check which the silence entry applies to. creator: description: - Specifies the entity responsible for this entry. expire: description: - If specified, the silence entry will be automatically cleared after this number of seconds. expire_on_resolve: description: - If specified as true, the silence entry will be automatically cleared once the condition it is silencing is resolved. type: bool reason: description: - If specified, this free-form string is used to provide context or rationale for the reason this silence entry was created. state: description: - Specifies to create or clear (delete) a silence entry via the Sensu API required: true default: present choices: ['present', 'absent'] subscription: description: - Specifies the subscription which the silence entry applies to. - To create a silence entry for a client prepend C(client:) to client name. Example - C(client:server1.example.dev) required: true default: [] url: description: - Specifies the URL of the Sensu monitoring host server. required: false default: http://127.0.01:4567 ''' EXAMPLES = ''' # Silence ALL checks for a given client - name: Silence server1.example.dev sensu_silence: subscription: client:server1.example.dev creator: "{{ ansible_user_id }}" reason: Performing maintenance # Silence specific check for a client - name: Silence CPU_Usage check for server1.example.dev sensu_silence: subscription: client:server1.example.dev check: CPU_Usage creator: "{{ ansible_user_id }}" reason: Investigation alert issue # Silence multiple clients from a dict silence: server1.example.dev: reason: 'Deployment in progress' server2.example.dev: reason: 'Deployment in progress' - name: Silence several clients from a dict sensu_silence: subscription: "client:{{ item.key }}" reason: "{{ item.value.reason }}" creator: "{{ ansible_user_id }}" with_dict: "{{ silence }}" ''' RETURN = ''' ''' import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url def query(module, url, check, subscription): headers = { 'Content-Type': 'application/json', } url = url + '/silenced' request_data = { 'check': check, 'subscription': subscription, } # Remove keys with None value for k, v in dict(request_data).items(): if v is None: del request_data[k] response, info = fetch_url( module, url, method='GET', headers=headers, data=json.dumps(request_data) ) if info['status'] == 500: module.fail_json( msg="Failed to query silence %s. Reason: %s" % (subscription, info) ) try: json_out = json.loads(response.read()) except Exception: json_out = "" return False, json_out, False def clear(module, url, check, subscription): # Test if silence exists before clearing (rc, out, changed) = query(module, url, check, subscription) d = dict((i['subscription'], i['check']) for i in out) subscription_exists = subscription in d if check and subscription_exists: exists = (check == d[subscription]) else: exists = subscription_exists # If check/subscription doesn't exist # exit with changed state of False if not exists: return False, out, changed # module.check_mode is inherited from the AnsibleMOdule class if not module.check_mode: headers = { 'Content-Type': 'application/json', } url = url + '/silenced/clear' request_data = { 'check': check, 'subscription': subscription, } # Remove keys with None value for k, v in dict(request_data).items(): if v is None: del request_data[k] response, info = fetch_url( module, url, method='POST', headers=headers, data=json.dumps(request_data) ) if info['status'] != 204: module.fail_json( msg="Failed to silence %s. Reason: %s" % (subscription, info) ) try: json_out = json.loads(response.read()) except Exception: json_out = "" return False, json_out, True return False, out, True def create( module, url, check, creator, expire, expire_on_resolve, reason, subscription): (rc, out, changed) = query(module, url, check, subscription) for i in out: if (i['subscription'] == subscription): if ( (check is None or check == i['check']) and ( creator == '' or creator == i['creator'])and ( reason == '' or reason == i['reason']) and ( expire is None or expire == i['expire']) and ( expire_on_resolve is None or expire_on_resolve == i['expire_on_resolve'] ) ): return False, out, False # module.check_mode is inherited from the AnsibleMOdule class if not module.check_mode: headers = { 'Content-Type': 'application/json', } url = url + '/silenced' request_data = { 'check': check, 'creator': creator, 'expire': expire, 'expire_on_resolve': expire_on_resolve, 'reason': reason, 'subscription': subscription, } # Remove keys with None value for k, v in dict(request_data).items(): if v is None: del request_data[k] response, info = fetch_url( module, url, method='POST', headers=headers, data=json.dumps(request_data) ) if info['status'] != 201: module.fail_json( msg="Failed to silence %s. Reason: %s" % (subscription, info['msg']) ) try: json_out = json.loads(response.read()) except Exception: json_out = "" return False, json_out, True return False, out, True def main(): module = AnsibleModule( argument_spec=dict( check=dict(required=False), creator=dict(required=False), expire=dict(type='int', required=False), expire_on_resolve=dict(type='bool', required=False), reason=dict(required=False), state=dict(default='present', choices=['present', 'absent']), subscription=dict(required=True), url=dict(required=False, default='http://127.0.01:4567'), ), supports_check_mode=True ) url = module.params['url'] check = module.params['check'] creator = module.params['creator'] expire = module.params['expire'] expire_on_resolve = module.params['expire_on_resolve'] reason = module.params['reason'] subscription = module.params['subscription'] state = module.params['state'] if state == 'present': (rc, out, changed) = create( module, url, check, creator, expire, expire_on_resolve, reason, subscription ) if state == 'absent': (rc, out, changed) = clear(module, url, check, subscription) if rc != 0: module.fail_json(msg="failed", result=out) module.exit_json(msg="success", result=out, changed=changed) if __name__ == '__main__': main()
gpl-3.0
eriser/supercollider
editors/sced/scedwin/py/WindowHelper.py
44
12384
# sced (SuperCollider mode for gedit) # # Copyright 2012 Jakob Leben # Copyright 2009 Artem Popov and other contributors (see AUTHORS) # # sced is free software: # you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gtk import gedit import gio from LogPanel import LogPanel from ScLang import ScLang from Logger import Logger from util import * ui_str = """<ui> <menubar name="MenuBar"> <menu name="ToolsMenu" action="Tools"> <placeholder name="ToolsOps_5"> <menuitem action="ScedSuperColliderMode"/> </placeholder> </menu> </menubar> </ui> """ scui_str = """<ui> <menubar name="MenuBar"> <placeholder name="ExtraMenu_1"> <menu action="SuperColliderMenu"> <menuitem action="ScedEvaluate"/> <separator/> <menuitem action="ScedStopSound"/> <menuitem action="ScedRecord"/> <separator/> <menuitem action="ScedServerGUI"/> <menuitem action="ScedServerMeter"/> <menuitem action="ScedStartServer"/> <menuitem action="ScedStopServer"/> <separator/> <menuitem action="ScedStartSwingOSC"/> <menuitem action="ScedStopSwingOSC"/> <separator/> <menuitem action="ScedFindDefinition"/> <menuitem action="ScedBrowseClass"/> <separator/> <menuitem action="ScedInspectObject"/> <separator/> <menuitem action="ScedRestartInterpreter"/> <menuitem action="ScedRecompile"/> <menuitem action="ScedClearOutput"/> </menu> </placeholder> </menubar> <toolbar name="ToolBar"> <separator/> <toolitem action="ScedRecord"/> </toolbar> </ui> """ # HelpBrowser does not work on Windows (yet) # <separator/> # <menuitem action="ScedFindHelp"/> # <menuitem action="ScedBrowseHelp"/> # <menuitem action="ScedSearchHelp"/> # <menuitem action="ScedMethodArgs"/> class WindowHelper: def __init__(self, plugin, window): self.__lang = None self.__logger = None self.__plugin = plugin self.__window = window self.__insert_menu() def deactivate(self): self.__deactivate() self.__remove_menu() self.__plugin = None self.__window = None def update_ui(self): pass def __insert_menu(self): manager = self.__window.get_ui_manager() self.__actions = gtk.ActionGroup("ScedActions") toggle_entries = [ ("ScedSuperColliderMode", None, "_SuperCollider Mode", None, _("Toggle SuperCollider interaction mode"), self.on_sc_mode_activate, False) ] self.__actions.add_toggle_actions(toggle_entries) manager.insert_action_group(self.__actions, -1) self.__ui_id = manager.add_ui_from_string(ui_str) def __remove_menu(self): manager = self.__window.get_ui_manager() manager.remove_ui(self.__ui_id) manager.remove_action_group(self.__actions) manager.ensure_update() def __insert_sc_menu(self): manager = self.__window.get_ui_manager() self.__sc_actions = gtk.ActionGroup("SuperColliderActions") entries = [ ("SuperColliderMenu", None, "Super_Collider"), ("ScedEvaluate", gtk.STOCK_EXECUTE, _("Evaluate"), "<control>E", _("Evaluate line or selection"), self.on_evaluate), ("ScedStopSound", gtk.STOCK_STOP, _("Stop Sound"), "Escape", _("Stop sound and free all server nodes"), self.on_stop_sound), # HelpBrowser does not work on windows (yet): # ("ScedFindHelp", None, _("Find Help"), "<control>U", # _("Find and open help file"), # self.on_find_help), # ("ScedBrowseHelp", None, _("Browse Help"), None, # _("Browse help by categories"), # self.on_browse_help), # ("ScedSearchHelp", None, _("Search Help"), "<control><alt>U", # _("Search for help"), # self.on_search_help), # ("ScedMethodArgs", None, _("Show method args"), "<alt>A", # _("Show method arguments and defaults"), # self.on_method_args), ("ScedFindDefinition", None, _("Find Class Definition"), "<control>Y", _("Find and open definition of selected class"), self.on_find_definition), ("ScedBrowseClass", None, _("Browse Class"), None, _("Show selected class in Class Browser"), self.on_browse_class), ("ScedInspectObject", None, _("Inspect Object"), None, _("Inspect object state"), self.on_inspect_object), ("ScedRestartInterpreter", None, _("Restart Interpreter"), None, _("Restart sclang"), self.on_restart), ("ScedRecompile", None, _("Recompile class library"), "<control><shift>R", _("Recompile class library"), self.on_recompile), ("ScedClearOutput", gtk.STOCK_CLEAR, _("Clear output"), None, _("Clear interpreter log"), self.on_clear_log), ("ScedServerGUI", None, _("Show Server GUI"), None, _("Show GUI for default server"), self.on_server_gui), ("ScedServerMeter", None, _("Show level meters"), None, _("Show level meters for default server"), self.on_server_meter), ("ScedStartServer", None, _("Start Server"), None, _("Start the default server"), self.on_start_server), ("ScedStopServer", None, _("Stop Server"), None, _("Stop the default server"), self.on_stop_server), ("ScedStartSwingOSC", None, _("Start SwingOSC GUI Server"), None, _("Start the SwingOSC GUI server"), self.on_start_swingosc), ("ScedStopSwingOSC", None, _("Stop SwingOSC GUI Server"), None, _("Stop the SwingOSC GUI server"), self.on_stop_swingosc), ] toggle_entries = [ ("ScedRecord", gtk.STOCK_MEDIA_RECORD, "Record", None, _("Toggle recording"), self.on_record, False) ] self.__sc_actions.add_actions(entries) self.__sc_actions.add_toggle_actions(toggle_entries) manager.insert_action_group(self.__sc_actions, -1) self.__scui_id = manager.add_ui_from_string(scui_str) def __remove_sc_menu(self): manager = self.__window.get_ui_manager() manager.remove_ui(self.__scui_id) manager.remove_action_group(self.__sc_actions) manager.ensure_update() def __activate(self): if self.__lang is not None: return self.__lang = ScLang(self.__plugin) if not self.__lang.start(): self.__lang = None self.__actions.get_action("ScedSuperColliderMode").set_active(False) return self.__log_panel = LogPanel() panel = self.__window.get_bottom_panel() panel.show() panel.add_item(self.__log_panel, _("SuperCollider output"), gtk.STOCK_EXECUTE) self.__log_panel.show() self.__insert_sc_menu() self.__logger = Logger(self.__lang.stdout, self.__log_panel) def __deactivate(self): # FIXME: un-record if self.__lang is None: return self.__lang.stop() self.__lang = None self.__logger.stop() panel = self.__window.get_bottom_panel() panel.remove_item(self.__log_panel) self.__remove_sc_menu() def on_sc_mode_activate(self, action): if action.get_active(): self.__activate() else: self.__deactivate() def on_evaluate(self, action): doc = self.__window.get_active_document() try: i1, i2 = doc.get_selection_bounds() except ValueError: i1 = doc.get_iter_at_mark(doc.get_insert()) i1.set_line_offset(0) i2 = i1.copy() i2.forward_to_line_end() if is_block_beginning(doc.get_text(i1, i2)): try: i1, i2 = find_block(doc, i1) except RuntimeError: statusbar = self.__window.get_statusbar() context = statusbar.get_context_id("supercollider") statusbar.flash_message(context, "Code block is not properly closed") return doc.select_range(i1, i2) text = doc.get_text(i1, i2) self.__lang.evaluate(text) def on_stop_sound(self, action): record = self.__sc_actions.get_action("ScedRecord"); if record.get_active(): record.activate() # untoggle self.__lang.stop_sound() def on_record(self, action): self.__lang.toggle_recording(action.get_active()) def get_selection(self): doc = self.__window.get_active_document() try: i1, i2 = doc.get_selection_bounds() except ValueError: i1 = doc.get_iter_at_mark(doc.get_insert()) i1, i2 = find_word(doc, i1) doc.select_range(i1, i2) return doc.get_text(i1, i2) def on_find_help(self, action): text = self.get_selection() self.__lang.evaluate("HelpBrowser.openHelpFor(\"" + text + "\");") def on_browse_help(self, action): self.__lang.evaluate("HelpBrowser.openBrowsePage;") def on_search_help(self, action): text = self.get_selection() self.__lang.evaluate("HelpBrowser.openSearchPage(\"" + text + "\");") def on_method_args(self, action): text = self.get_selection() self.__lang.evaluate("Help.methodArgs(\"" + text + "\");") def on_find_definition(self, action): text = self.get_selection() self.__lang.evaluate( text + ".openCodeFile" ) def on_browse_class(self, action): text = self.get_selection() self.__lang.evaluate("" + text + ".browse", silent=True) def on_open_dev_file(self, action): doc = self.__window.get_active_document() path = gio.File(doc.get_uri()).get_path() #get_location() self.__lang.evaluate("(\"gedit\"+thisProcess.platform.devLoc(\""+path+"\")).systemCmd", silent=True); def on_inspect_object(self, action): text = self.get_selection() self.__lang.evaluate("" + text + ".inspect", silent=True) def on_recompile(self, action): self.__lang.stdin.write("\x18") def on_restart(self, action): self.__deactivate() self.__activate() def on_clear_log(self, action): self.__log_panel.buffer.set_text("") def on_server_gui(self, action): self.__lang.evaluate("Server.default.makeGui;", silent=True) def on_server_meter(self, action): self.__lang.evaluate("Server.default.meter;", silent=True) def on_start_server(self, action): # FIXME: make these actions possible only if interpreter is running and okay self.__lang.evaluate("Server.default.boot;", silent=True) def on_stop_server(self, action): # FIXME: make these actions possible only if interpreter is running and okay self.__lang.evaluate("Server.default.quit;", silent=True) def on_start_swingosc(self, action): # FIXME: make these actions possible only if interpreter is running and okay self.__lang.evaluate("SwingOSC.default.boot;GUI.swing;", silent=False) def on_stop_swingosc(self, action): # FIXME: make these actions possible only if interpreter is running and okay self.__lang.evaluate("SwingOSC.default.quit;", silent=False)
gpl-3.0
cloudnull/ansible-modules-core
cloud/amazon/elasticache_subnet_group.py
107
5473
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: elasticache_subnet_group version_added: "2.0" short_description: manage Elasticache subnet groups description: - Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5. options: state: description: - Specifies whether the subnet should be present or absent. required: true default: present choices: [ 'present' , 'absent' ] name: description: - Database subnet group identifier. required: true description: description: - Elasticache subnet group description. Only set when a new group is added. required: false default: null subnets: description: - List of subnet IDs that make up the Elasticache subnet group. required: false default: null region: description: - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. required: true aliases: ['aws_region', 'ec2_region'] author: "Tim Mahoney (@timmahoney)" extends_documentation_fragment: aws ''' EXAMPLES = ''' # Add or change a subnet group - elasticache_subnet_group state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group subnets: - subnet-aaaaaaaa - subnet-bbbbbbbb # Remove a subnet group - elasticache_subnet_group: state: absent name: norwegian-blue ''' try: import boto from boto.elasticache.layer1 import ElastiCacheConnection from boto.regioninfo import RegionInfo from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state = dict(required=True, choices=['present', 'absent']), name = dict(required=True), description = dict(required=False), subnets = dict(required=False, type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or {} if state == 'present': for required in ['name', 'description', 'subnets']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'subnets']: if module.params.get(not_allowed): module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) """Get an elasticache connection""" try: endpoint = "elasticache.%s.amazonaws.com" % region connect_region = RegionInfo(name=region, endpoint=endpoint) conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=e.message) try: changed = False exists = False try: matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except BotoServerError, e: if e.error_code != 'CacheSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) if state == 'absent': if exists: conn.delete_cache_subnet_group(group_name) changed = True else: if not exists: new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True else: changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True except BotoServerError, e: if e.error_message != 'No modifications were requested.': module.fail_json(msg = e.error_message) else: changed = False module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
gpl-3.0
bobvanderlinden/machinekit
src/machinetalk/support/encdec.py
8
1543
import sys import os import binascii # this assumes the python modules generated from src/protobuf/proto/*.proto have # been built: from types_pb2 import * from value_pb2 import * from object_pb2 import * from message_pb2 import * import google.protobuf.text_format # this monkey patches two methods into the protobuf Message object # (new methods - nothing is overriden): # # msg.SerializeToJson() # msg.ParseFromJSON(buf) import pb2json import json # pretty printer container = Container() container.type = MT_HALUPDATE container.serial = 34567 arg = container.args.add() arg.type = HAL_PIN arg.pin.type = HAL_S32 arg.pin.name = "foo.1.bar" arg.pin.hals32 = 4711 print "payload:", container.ByteSize() print "text format:", str(container) buffer = container.SerializeToString() print "wire format length=%d %s" % (len(buffer), binascii.hexlify(buffer)) jsonout = container.SerializeToJSON() print "json format:",json.dumps(json.loads(jsonout), indent=4) jsonmsg = ''' { "serial": 34567, "args": [ { "type": 1, "pin": { "type": 3, "name": "foo.1.bar", "hals32": 4711 } } ], "type": 8 } ''' request = Container() print "Parsing message from JSON into protobuf: ", jsonmsg request.ParseFromJSON(jsonmsg) print "and its protobuf text format parsed back from JSON is:\n", str(request) buffer3 = request.SerializeToString() print "the protobuf wire format - length=%d:\n%s" % (len(buffer3), binascii.hexlify(buffer3))
lgpl-2.1
damonkohler/sl4a
python/src/Demo/turtle/tdemo_lindenmayer_indian.py
32
2428
#!/usr/bin/python """ turtle-example-suite: xtx_lindenmayer_indian.py Each morning women in Tamil Nadu, in southern India, place designs, created by using rice flour and known as kolam on the thresholds of their homes. These can be described by Lindenmayer systems, which can easily be implemented with turtle graphics and Python. Two examples are shown here: (1) the snake kolam (2) anklets of Krishna Taken from Marcia Ascher: Mathematics Elsewhere, An Exploration of Ideas Across Cultures """ ################################ # Mini Lindenmayer tool ############################### from turtle import * def replace( seq, replacementRules, n ): for i in range(n): newseq = "" for element in seq: newseq = newseq + replacementRules.get(element,element) seq = newseq return seq def draw( commands, rules ): for b in commands: try: rules[b]() except TypeError: try: draw(rules[b], rules) except: pass def main(): ################################ # Example 1: Snake kolam ################################ def r(): right(45) def l(): left(45) def f(): forward(7.5) snake_rules = {"-":r, "+":l, "f":f, "b":"f+f+f--f--f+f+f"} snake_replacementRules = {"b": "b+f+b--f--b+f+b"} snake_start = "b--f--b--f" drawing = replace(snake_start, snake_replacementRules, 3) reset() speed(3) tracer(1,0) ht() up() backward(195) down() draw(drawing, snake_rules) from time import sleep sleep(3) ################################ # Example 2: Anklets of Krishna ################################ def A(): color("red") circle(10,90) def B(): from math import sqrt color("black") l = 5/sqrt(2) forward(l) circle(l, 270) forward(l) def F(): color("green") forward(10) krishna_rules = {"a":A, "b":B, "f":F} krishna_replacementRules = {"a" : "afbfa", "b" : "afbfbfbfa" } krishna_start = "fbfbfbfb" reset() speed(0) tracer(3,0) ht() left(45) drawing = replace(krishna_start, krishna_replacementRules, 3) draw(drawing, krishna_rules) tracer(1) return "Done!" if __name__=='__main__': msg = main() print msg mainloop()
apache-2.0
RPI-OPENEDX/edx-platform
common/djangoapps/track/tests/test_tracker.py
35
3443
from django.conf import settings from django.test import TestCase from django.test.utils import override_settings import track.tracker as tracker from track.backends import BaseBackend SIMPLE_SETTINGS = { 'default': { 'ENGINE': 'track.tests.test_tracker.DummyBackend', 'OPTIONS': { 'flag': True } } } MULTI_SETTINGS = { 'first': { 'ENGINE': 'track.tests.test_tracker.DummyBackend', }, 'second': { 'ENGINE': 'track.tests.test_tracker.DummyBackend', } } class TestTrackerInstantiation(TestCase): """Test that a helper function can instantiate backends from their name.""" def setUp(self): # pylint: disable=protected-access super(TestTrackerInstantiation, self).setUp() self.get_backend = tracker._instantiate_backend_from_name def test_instatiate_backend(self): name = 'track.tests.test_tracker.DummyBackend' options = {'flag': True} backend = self.get_backend(name, options) self.assertIsInstance(backend, DummyBackend) self.assertTrue(backend.flag) def test_instatiate_backends_with_invalid_values(self): def get_invalid_backend(name, parameters): return self.get_backend(name, parameters) options = {} name = 'track.backends.logger' self.assertRaises(ValueError, get_invalid_backend, name, options) name = 'track.backends.logger.Foo' self.assertRaises(ValueError, get_invalid_backend, name, options) name = 'this.package.does.not.exists' self.assertRaises(ValueError, get_invalid_backend, name, options) name = 'unittest.TestCase' self.assertRaises(ValueError, get_invalid_backend, name, options) class TestTrackerDjangoInstantiation(TestCase): """Test if backends are initialized properly from Django settings.""" @override_settings(TRACKING_BACKENDS=SIMPLE_SETTINGS) def test_django_simple_settings(self): """Test configuration of a simple backend""" backends = self._reload_backends() self.assertEqual(len(backends), 1) tracker.send({}) self.assertEqual(backends.values()[0].count, 1) @override_settings(TRACKING_BACKENDS=MULTI_SETTINGS) def test_django_multi_settings(self): """Test if multiple backends can be configured properly.""" backends = self._reload_backends().values() self.assertEqual(len(backends), 2) event_count = 10 for _ in xrange(event_count): tracker.send({}) self.assertEqual(backends[0].count, event_count) self.assertEqual(backends[1].count, event_count) @override_settings(TRACKING_BACKENDS=MULTI_SETTINGS) def test_django_remove_settings(self): """Test if a backend can be remove by setting it to None.""" settings.TRACKING_BACKENDS.update({'second': None}) backends = self._reload_backends() self.assertEqual(len(backends), 1) def _reload_backends(self): # pylint: disable=protected-access # Reset backends tracker._initialize_backends_from_django_settings() return tracker.backends class DummyBackend(BaseBackend): def __init__(self, **options): super(DummyBackend, self).__init__(**options) self.flag = options.get('flag', False) self.count = 0 def send(self, event): self.count += 1
agpl-3.0
mrgloom/h2o-3
h2o-py/tests/testdir_munging/pyunit_impute.py
2
1527
import sys sys.path.insert(1, "../../") import h2o def impute(ip,port): # Connect to a pre-existing cluster prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.dim #print "Summary of the data in iris_missing.csv" #print "Each column has 50 missing observations (at random)" #prostate.summary() #print "Make a copy of the original dataset to play with." print "Impute a numeric column with the mean" nas = prostate["DPROS"].isna().sum() print "NAs before imputation: {0}".format(nas) prostate.impute("DPROS", method = "mean") nas = prostate["DPROS"].isna().sum() print "NAs after imputation: {0}".format(nas) # OTHER POSSIBLE SYNTAXES ALLOWED: prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.impute(8, method = "mean") prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.impute( "VOL", method = "mean") # USING MEDIAN print "Impute a numeric column with the median" prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.impute("VOL", method = "median") prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.impute(8, method = "median") prostate = h2o.upload_file(h2o.locate("smalldata/logreg/prostate_missing.csv")) prostate.impute("VOL", method = "median") if __name__ == "__main__": h2o.run_test(sys.argv, impute)
apache-2.0
isc-projects/forge
tests/dhcpv4/ddns/test_ddns_subnet.py
1
13831
"""DDNS without TSIG""" # pylint: disable=invalid-name,line-too-long import pytest import misc import srv_control import srv_msg from forge_cfg import world def _resend_ddns(address, exp_result=0): cmd = dict(command="lease4-resend-ddns", arguments={"ip-address": address}) return srv_msg.send_ctrl_cmd(cmd, exp_result=exp_result, channel='socket') def _check_fqdn_record(fqdn, address='', expect='notempty'): # check new DNS entry misc.test_procedure() srv_msg.dns_question_record(fqdn, 'A', 'IN') srv_msg.client_send_dns_query() if expect == 'empty': misc.pass_criteria() srv_msg.send_wait_for_query('MUST') srv_msg.dns_option('ANSWER', expect_include=False) else: misc.pass_criteria() srv_msg.send_wait_for_query('MUST') srv_msg.dns_option('ANSWER') srv_msg.dns_option_content('ANSWER', 'rdata', address) srv_msg.dns_option_content('ANSWER', 'rrname', fqdn) def _check_address_record(fqdn, arpa): misc.test_procedure() srv_msg.dns_question_record(arpa, 'PTR', 'IN') srv_msg.client_send_dns_query() misc.pass_criteria() srv_msg.send_wait_for_query('MUST') srv_msg.dns_option('ANSWER') srv_msg.dns_option_content('ANSWER', 'rdata', fqdn) srv_msg.dns_option_content('ANSWER', 'rrname', arpa) def _get_address(mac, fqdn, address): misc.test_procedure() srv_msg.client_sets_value('Client', 'chaddr', mac) srv_msg.client_send_msg('DISCOVER') misc.pass_criteria() srv_msg.send_wait_for_message('MUST', 'OFFER') srv_msg.response_check_content('yiaddr', address) misc.test_procedure() srv_msg.client_copy_option('server_id') srv_msg.client_does_include_with_value('requested_addr', address) srv_msg.client_sets_value('Client', 'FQDN_domain_name', fqdn) srv_msg.client_sets_value('Client', 'FQDN_flags', 'S') srv_msg.client_does_include('Client', 'fqdn') srv_msg.client_send_msg('REQUEST') misc.pass_criteria() srv_msg.send_wait_for_message('MUST', 'ACK') srv_msg.response_check_content('yiaddr', address) srv_msg.response_check_include_option(81) srv_msg.response_check_option_content(81, 'flags', 1) srv_msg.response_check_option_content(81, 'fqdn', fqdn) def _get_address_and_update_ddns(mac=None, fqdn=None, address=None, arpa=None): # checking if record is indeed empty on start _check_fqdn_record(fqdn, expect='empty') # getting new address that should also generate DDNS entry _get_address(mac, fqdn, address) # checking both forward and reverse DNS entries _check_fqdn_record(fqdn, address=address) _check_address_record(fqdn, arpa) @pytest.mark.v4 @pytest.mark.ddns def test_ddns4_subnet(): misc.test_setup() # simple case, ddns configuration in subnet - get and addres and dns entry srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10') world.dhcp_cfg["subnet4"][0].update({"ddns-send-updates": True, "ddns-generated-prefix": "abc", "ddns-qualifying-suffix": "example.com"}) srv_control.shared_subnet('192.168.50.0/24', 0) srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', 0) srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', 0) srv_control.add_ddns_server('127.0.0.1', '53001') srv_control.add_ddns_server_options('enable-updates', True) srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY') srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.build_and_send_config_files() srv_control.start_srv('DHCP', 'started') srv_control.start_srv('DNS', 'started', config_set=32) _get_address_and_update_ddns(mac='00:00:00:00:00:01', fqdn='aa.four.example.com.', address='192.168.50.10', arpa='10.50.168.192.in-addr.arpa.') @pytest.mark.v4 @pytest.mark.ddns def test_ddns4_shared_network(): misc.test_setup() # simple case, ddns configuration in shared network - get and addres and dns entry srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10') srv_control.shared_subnet('192.168.50.0/24', 0) srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', 0) srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', 0) world.dhcp_cfg["shared-networks"][0].update({"ddns-send-updates": True, "ddns-generated-prefix": "abc", "ddns-qualifying-suffix": "example.com"}) srv_control.add_ddns_server('127.0.0.1', '53001') srv_control.add_ddns_server_options('enable-updates', True) srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY') srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.build_and_send_config_files() srv_control.start_srv('DHCP', 'started') srv_control.start_srv('DNS', 'started', config_set=32) _get_address_and_update_ddns(mac='00:00:00:00:00:01', fqdn='aa.four.example.com.', address='192.168.50.10', arpa='10.50.168.192.in-addr.arpa.') @pytest.mark.v4 @pytest.mark.ddns def test_ddns4_gloabl(): misc.test_setup() # simple case, ddns configuration in global - get and addres and dns entry srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10') srv_control.shared_subnet('192.168.50.0/24', 0) srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', 0) srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', 0) world.dhcp_cfg.update({"ddns-send-updates": True, "ddns-generated-prefix": "abc", "ddns-qualifying-suffix": "example.com"}) srv_control.add_ddns_server('127.0.0.1', '53001') srv_control.add_ddns_server_options('enable-updates', True) srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY') srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.build_and_send_config_files() srv_control.start_srv('DHCP', 'started') srv_control.start_srv('DNS', 'started', config_set=32) _get_address_and_update_ddns(mac='00:00:00:00:00:01', fqdn='aa.four.example.com.', address='192.168.50.10', arpa='10.50.168.192.in-addr.arpa.') @pytest.mark.v4 @pytest.mark.ddns def test_ddns4_all_levels_resend_command(): misc.test_setup() srv_control.open_control_channel() srv_control.add_hooks('libdhcp_lease_cmds.so') srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10') srv_control.config_srv_another_subnet_no_interface('192.168.51.0/24', '192.168.51.10-192.168.51.10') srv_control.config_srv_another_subnet_no_interface('192.168.52.0/24', '192.168.52.10-192.168.52.10') # let's get 3 different ddns settings, global, shared-network and subnet. world.dhcp_cfg.update({"ddns-send-updates": True, "ddns-generated-prefix": "six", "ddns-qualifying-suffix": "example.com"}) world.dhcp_cfg["subnet4"][1].update({"ddns-send-updates": True, "ddns-generated-prefix": "abc", "ddns-qualifying-suffix": "example.com"}) srv_control.shared_subnet('192.168.50.0/24', 0) srv_control.shared_subnet('192.168.51.0/24', 0) srv_control.shared_subnet('192.168.52.0/24', 0) srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', 0) srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', 0) world.dhcp_cfg["shared-networks"][0].update({"ddns-send-updates": True, "ddns-generated-prefix": "xyz", "ddns-qualifying-suffix": "example.com"}) srv_control.add_ddns_server('127.0.0.1', '53001') srv_control.add_ddns_server_options('enable-updates', True) srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY') srv_control.add_forward_ddns('five.example.com.', 'EMPTY_KEY') srv_control.add_forward_ddns('three.example.com.', 'EMPTY_KEY') srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.add_reverse_ddns('51.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.add_reverse_ddns('52.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.print_cfg() srv_control.print_cfg(service='DDNS') srv_control.build_and_send_config_files() srv_control.start_srv('DHCP', 'started') srv_control.start_srv('DNS', 'started', config_set=32) _get_address_and_update_ddns(mac='ff:ff:ff:ff:ff:01', fqdn='sth4.four.example.com.', address='192.168.50.10', arpa='10.50.168.192.in-addr.arpa.') _get_address_and_update_ddns(mac='ff:ff:ff:ff:ff:02', fqdn='some.five.example.com.', address='192.168.51.10', arpa='10.51.168.192.in-addr.arpa.') _get_address_and_update_ddns(mac='ff:ff:ff:ff:ff:03', fqdn='record.three.example.com.', address='192.168.52.10', arpa='10.52.168.192.in-addr.arpa.') # stop bind, remove data files, start bind with empty zones srv_control.start_srv('DNS', 'stopped') srv_control.clear_some_data('all', service='DNS') srv_control.start_srv('DNS', 'started', config_set=32) # check is all records were removed _check_fqdn_record("sth4.four.example.com.", expect='empty') _check_fqdn_record("some.five.example.com.", expect='empty') _check_fqdn_record("record.three.example.com.", expect='empty') response = _resend_ddns('192.168.50.100', exp_result=3) assert response["text"] == "No lease found for: 192.168.50.100" response = _resend_ddns('192.168.50.10', exp_result=0) assert response["text"] == "NCR generated for: 192.168.50.10, hostname: sth4.four.example.com." response = _resend_ddns('192.168.51.10', exp_result=0) assert response["text"] == "NCR generated for: 192.168.51.10, hostname: some.five.example.com." response = _resend_ddns('192.168.52.10', exp_result=0) assert response["text"] == "NCR generated for: 192.168.52.10, hostname: record.three.example.com." _check_fqdn_record("sth4.four.example.com.", address='192.168.50.10') _check_fqdn_record("some.five.example.com.", address='192.168.51.10') _check_fqdn_record("record.three.example.com.", address='192.168.52.10') _check_address_record("sth4.four.example.com.", '10.50.168.192.in-addr.arpa.') _check_address_record("some.five.example.com.", '10.51.168.192.in-addr.arpa.') _check_address_record("record.three.example.com.", '10.52.168.192.in-addr.arpa.') @pytest.mark.v4 @pytest.mark.ddns def test_ddns4_all_levels_resend_without_ddns(): misc.test_setup() srv_control.open_control_channel() srv_control.add_hooks('libdhcp_lease_cmds.so') srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10') srv_control.config_srv_another_subnet_no_interface('192.168.51.0/24', '192.168.51.10-192.168.51.10') world.dhcp_cfg["subnet4"][0].update({"ddns-send-updates": True, "ddns-generated-prefix": "six", "ddns-qualifying-suffix": "example.com"}) srv_control.shared_subnet('192.168.50.0/24', 0) srv_control.shared_subnet('192.168.51.0/24', 0) srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', 0) srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', 0) srv_control.add_ddns_server('127.0.0.1', '53001') srv_control.add_ddns_server_options('enable-updates', True) srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY') srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY') srv_control.build_and_send_config_files() srv_control.start_srv('DHCP', 'started') srv_control.start_srv('DNS', 'started', config_set=32) _get_address_and_update_ddns(mac='ff:ff:ff:ff:ff:01', fqdn='sth4.four.example.com.', address='192.168.50.10', arpa='10.50.168.192.in-addr.arpa.') _get_address(mac='ff:ff:ff:ff:ff:02', fqdn='sth4.four.example.com.', address='192.168.51.10') _check_fqdn_record("some.five.example.com.", expect='empty') # stop bind, remove data files, start bind with empty zones srv_control.start_srv('DNS', 'stopped') srv_control.clear_some_data('all', service='DNS') srv_control.start_srv('DNS', 'started', config_set=32) # check is all records were removed _check_fqdn_record("sth4.four.example.com.", expect='empty') _check_fqdn_record("some.five.example.com.", expect='empty') _check_fqdn_record("record.three.example.com.", expect='empty') response = _resend_ddns('192.168.51.100', exp_result=3) assert response["text"] == "No lease found for: 192.168.51.100" response = _resend_ddns('192.168.50.10', exp_result=0) assert response["text"] == "NCR generated for: 192.168.50.10, hostname: sth4.four.example.com." _check_fqdn_record("sth4.four.example.com.", address='192.168.50.10') _check_address_record("sth4.four.example.com.", '10.50.168.192.in-addr.arpa.') _check_fqdn_record("some.five.example.com.", expect='empty')
isc
Carmezim/tensorflow
tensorflow/contrib/nccl/python/ops/nccl_ops_test.py
68
5752
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nccl ops. See also the cc test for nccl_communicator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import nccl from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class AllReduceTest(test.TestCase): def testAllReduce(self): if not test.is_gpu_available(): return # Test requires access to a GPU for dtype in [np.float32, np.int32, np.int64, np.float64]: # Create session inside outer loop to test use of # same communicator across multiple sessions. with self.test_session(use_gpu=True) as sess: self._testSingleAllReduce(sess, dtype, nccl.all_sum, lambda x, y: x + y) self._testSingleAllReduce(sess, dtype, nccl.all_prod, lambda x, y: x * y) self._testSingleAllReduce(sess, dtype, nccl.all_min, np.minimum) self._testSingleAllReduce(sess, dtype, nccl.all_max, np.maximum) def _testSingleAllReduce(self, sess, np_type, nccl_fn, numpy_accumulation_fn): for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]: shape = (3, 4) np_ans = None tensors = [] for d in devices: with ops.device(d): t = ((np.random.random_sample(shape) - .5) * 1024).astype(np_type) if np_ans is None: np_ans = t else: np_ans = numpy_accumulation_fn(np_ans, t) tensors.append(array_ops.identity(t)) all_reduce_tensors = nccl_fn(tensors) # Test shape inference. for r in all_reduce_tensors: self.assertEqual(shape, r.get_shape()) # Test execution and results. nccl_results = sess.run(all_reduce_tensors) for r in nccl_results: self.assertAllClose(r, np_ans) def testErrors(self): with self.assertRaisesRegexp(ValueError, 'Device assignment required'): nccl.all_sum([array_ops.identity(np.random.random_sample((3, 4)))]) with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'): nccl.all_sum([]) class BroadcastTest(test.TestCase): def testBroadcast(self): if not test.is_gpu_available(): return # Test requires access to a GPU for dtype in [np.float32, np.int32, np.int64, np.float64]: # Create session inside outer loop to test use of # same communicator across multiple sessions. with self.test_session(use_gpu=True) as sess: for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]: shape = (3, 4) sender = np.random.randint(0, len(devices) - 1) with ops.device(devices[sender]): np_ans = (( (np.random.random_sample(shape) - .5) * 1024).astype(dtype)) t = array_ops.identity(np_ans) other_devices = devices[:sender] + devices[sender + 1:] send_op, received_tensors = nccl.broadcast(t, other_devices) # Verify shape inference. for r in received_tensors: self.assertEqual(shape, r.get_shape()) # Run and verify results. nccl_results = sess.run(received_tensors + [send_op]) for r in nccl_results[:-1]: self.assertAllClose(r, np_ans) class CombinedTest(test.TestCase): """Tests using a mix of all-reduce ops in one session.run call.""" def testCombined(self): if not test.is_gpu_available(): return # Test requires access to a GPU for dtype in [np.float32, np.int32, np.int64, np.float64]: # Create session inside outer loop to test use of # same communicator across multiple sessions. with self.test_session(use_gpu=True) as sess: for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]: shape = (3, 4) # all-reduce np_ans = np.zeros(shape=shape, dtype=dtype) tensors = [] for d in devices: with ops.device(d): t = ((np.random.random_sample(shape) - .5) * 1024).astype(dtype) np_ans += t tensors.append(array_ops.identity(t)) all_reduce_tensors = nccl.all_sum(tensors) sender = np.random.randint(0, len(devices) - 1) other_devices = devices[:sender] + devices[sender + 1:] send_op, received_tensors = nccl.broadcast(all_reduce_tensors[sender], other_devices) # sender doesn't need to be fetched as part of outputs of session.run. del all_reduce_tensors[sender] # Verify shape inference. for r in received_tensors: self.assertEqual(shape, r.get_shape()) # Run and verify results. nccl_results = sess.run( received_tensors + [send_op] + all_reduce_tensors) for r in nccl_results[:len(received_tensors)]: self.assertAllClose(r, np_ans) if __name__ == '__main__': test.main()
apache-2.0
piquadrat/django
tests/custom_columns/tests.py
40
4074
from django.core.exceptions import FieldError from django.test import TestCase from .models import Article, Author class CustomColumnsTests(TestCase): def setUp(self): self.a1 = Author.objects.create(first_name="John", last_name="Smith") self.a2 = Author.objects.create(first_name="Peter", last_name="Jones") self.authors = [self.a1, self.a2] self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1) self.article.authors.set(self.authors) def test_query_all_available_authors(self): self.assertQuerysetEqual( Author.objects.all(), [ "Peter Jones", "John Smith", ], str ) def test_get_first_name(self): self.assertEqual( Author.objects.get(first_name__exact="John"), self.a1, ) def test_filter_first_name(self): self.assertQuerysetEqual( Author.objects.filter(first_name__exact="John"), [ "John Smith", ], str ) def test_field_error(self): msg = ( "Cannot resolve keyword 'firstname' into field. Choices are: " "Author_ID, article, first_name, last_name, primary_set" ) with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(firstname__exact="John") def test_attribute_error(self): with self.assertRaises(AttributeError): self.a1.firstname with self.assertRaises(AttributeError): self.a1.last def test_get_all_authors_for_an_article(self): self.assertQuerysetEqual( self.article.authors.all(), [ "Peter Jones", "John Smith", ], str ) def test_get_all_articles_for_an_author(self): self.assertQuerysetEqual( self.a1.article_set.all(), [ "Django lets you build Web apps easily", ], lambda a: a.headline ) def test_get_author_m2m_relation(self): self.assertQuerysetEqual( self.article.authors.filter(last_name='Jones'), [ "Peter Jones" ], str ) def test_author_querying(self): self.assertQuerysetEqual( Author.objects.all().order_by('last_name'), ['<Author: Peter Jones>', '<Author: John Smith>'] ) def test_author_filtering(self): self.assertQuerysetEqual( Author.objects.filter(first_name__exact='John'), ['<Author: John Smith>'] ) def test_author_get(self): self.assertEqual(self.a1, Author.objects.get(first_name__exact='John')) def test_filter_on_nonexistent_field(self): msg = ( "Cannot resolve keyword 'firstname' into field. Choices are: " "Author_ID, article, first_name, last_name, primary_set" ) with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(firstname__exact='John') def test_author_get_attributes(self): a = Author.objects.get(last_name__exact='Smith') self.assertEqual('John', a.first_name) self.assertEqual('Smith', a.last_name) with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'firstname'"): getattr(a, 'firstname') with self.assertRaisesMessage(AttributeError, "'Author' object has no attribute 'last'"): getattr(a, 'last') def test_m2m_table(self): self.assertQuerysetEqual( self.article.authors.all().order_by('last_name'), ['<Author: Peter Jones>', '<Author: John Smith>'] ) self.assertQuerysetEqual( self.a1.article_set.all(), ['<Article: Django lets you build Web apps easily>'] ) self.assertQuerysetEqual( self.article.authors.filter(last_name='Jones'), ['<Author: Peter Jones>'] )
bsd-3-clause
ostrokach/bioconda-recipes
recipes/biopet-vcfstats/1.1/biopet-vcfstats.py
48
3367
#!/usr/bin/env python # # Wrapper script for starting the biopet-vcfstats JAR package # # This script is written for use with the Conda package manager and is copied # from the peptide-shaker wrapper. Only the parameters are changed. # (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py) # # This file was automatically generated by the sbt-bioconda plugin. import os import subprocess import sys import shutil from os import access from os import getenv from os import X_OK jar_file = 'VcfStats-assembly-1.1.jar' default_jvm_mem_opts = [] # !!! End of parameter section. No user-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] exec_dir = None for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) elif arg.startswith('--exec_dir='): exec_dir = arg.split('=')[1].strip('"').strip("'") if not os.path.exists(exec_dir): shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') is None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args, exec_dir) def main(): """ PeptideShaker updates files relative to the path of the jar file. In a multiuser setting, the option --exec_dir="exec_dir" can be used as the location for the peptide-shaker distribution. If the exec_dir dies not exist, we copy the jar file, lib, and resources to the exec_dir directory. """ java = java_executable() (mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:]) jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
mit
mefly2012/platform
src/clean/guizhou_zhaobiao.py
2
1991
# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8') from common import public import re class guizhou_zhaobiao(): need_check_ziduan = [u'title', u'city', u'pubdate', u'data_sources', u'company_name_invite', u'bidwinning_pubdate' ] def check_title(self, source, ustr): """title 校验""" ret = None if ustr and len(ustr): if ustr and len(ustr): if any(c in u')(' for c in ustr): ret = u'有特殊符号' return ret def check_city(self, source, ustr): """地区 校验""" ret = None if ustr and len(ustr): if ustr != u'贵州': ret = u"city不为贵州" return ret def check_pubdate(self, source, ustr): """发布日期 校验""" ret = None if ustr and len(ustr): if not public.date_format(ustr): ret = u"不合法日期" return ret def check_data_sources(self, source, ustr): """数据来源 校验""" ret = None if ustr and len(ustr): if ustr != u'贵州招中标网': ret = u"不为贵州招中标网" return ret def check_company_name_invite(self, source, ustr): """招标单位名称 校验""" ret = None SPECIAL_STR = ur"[  .。##,,??/、\`~;;•·$¥@!!^…'’‘**%)(]" if ustr and len(ustr): if re.compile(SPECIAL_STR).search(ustr): ret = u'包含特殊字符' return ret def check_bidwinning_pubdate(self, source, ustr): """中标公告发布时间 校验""" ret = None if ustr and len(ustr): if not public.date_format(ustr): ret = u"不合法日期" return ret
apache-2.0
Kerbas-ad-astra/KerbalStuff
KerbalStuff/common.py
4
4169
from flask import session, jsonify, redirect, request, Response, abort from flask.ext.login import current_user from KerbalStuff.custom_json import CustomJSONEncoder from werkzeug.utils import secure_filename from functools import wraps from KerbalStuff.objects import User from KerbalStuff.database import db, Base import json import urllib import requests import xml.etree.ElementTree as ET def firstparagraph(text): try: para = text.index("\n\n") return text[:para + 2] except: try: para = text.index("\r\n\r\n") return text[:para + 4] except: return text def remainingparagraphs(text): try: para = text.index("\n\n") return text[para + 2:] except: try: para = text.index("\r\n\r\n") return text[para + 4:] except: return "" def dumb_object(model): if type(model) is list: return [dumb_object(x) for x in model] result = {} for col in model._sa_class_manager.mapper.mapped_table.columns: a = getattr(model, col.name) if not isinstance(a, Base): result[col.name] = a return result def wrap_mod(mod): details = dict() details['mod'] = mod if len(mod.versions) > 0: details['latest_version'] = mod.versions[0] details['safe_name'] = secure_filename(mod.name)[:64] details['details'] = '/mod/' + str(mod.id) + '/' + secure_filename(mod.name)[:64] details['dl_link'] = '/mod/' + str(mod.id) + '/' + secure_filename(mod.name)[:64] + '/download/' + mod.versions[0].friendly_version else: return None return details def getForumId(user): r = requests.post("http://forum.kerbalspaceprogram.com/ajax.php?do=usersearch", data= { 'securitytoken': 'guest', 'do': 'usersearch', 'fragment': user }) root = ET.fromstring(r.text) results = list() for child in root: results.append({ 'id': child.attrib['userid'], 'name': child.text }) if len(results) == 0: return None return results[0] def with_session(f): @wraps(f) def go(*args, **kw): try: ret = f(*args, **kw) db.commit() return ret except: db.rollback() db.close() raise return go def loginrequired(f): @wraps(f) def wrapper(*args, **kwargs): if not current_user or current_user.confirmation: return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url)) else: return f(*args, **kwargs) return wrapper def adminrequired(f): @wraps(f) def wrapper(*args, **kwargs): if not current_user or current_user.confirmation: return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url)) else: if not current_user.admin: abort(401) return f(*args, **kwargs) return wrapper def json_output(f): @wraps(f) def wrapper(*args, **kwargs): def jsonify_wrap(obj): jsonification = json.dumps(obj, default=CustomJSONEncoder) return Response(jsonification, mimetype='application/json') result = f(*args, **kwargs) if isinstance(result, tuple): return jsonify_wrap(result[0]), result[1] if isinstance(result, dict): return jsonify_wrap(result) if isinstance(result, list): return jsonify_wrap(result) # This is a fully fleshed out response, return it immediately return result return wrapper def cors(f): @wraps(f) def wrapper(*args, **kwargs): res = f(*args, **kwargs) if request.headers.get('x-cors-status', False): if isinstance(res, tuple): json_text = res[0].data code = res[1] else: json_text = res.data code = 200 o = json.loads(json_text) o['x-status'] = code return jsonify(o) return res return wrapper
mit
espadrine/opera
chromium/src/third_party/chromite/buildbot/configure_repo.py
4
1833
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Adjust a repo checkout's configuration, fixing/extending as needed""" import constants from chromite.lib import cros_build_lib def FixExternalRepoPushUrls(buildroot): """Set up SSH push for cros remote.""" shell_code = """ [ "${REPO_REMOTE}" = "cros" ] || exit 0; git config "remote.${REPO_REMOTE}.pushurl" "%s/${REPO_PROJECT}"; """ % (constants.GERRIT_SSH_URL,) cros_build_lib.RunCommand(['repo', '--time', 'forall', '-c', shell_code], cwd=buildroot) def FixBrokenExistingRepos(buildroot): """Ensure all git configurations are at least syncable and sane.""" cros_build_lib.RunCommand( ['repo', '--time', 'forall', '-c', 'git config --remove-section "url.%s" 2> /dev/null' % constants.GERRIT_SSH_URL], cwd=buildroot, error_code_ok=True) def SetupGerritRemote(buildroot): """Set up gerrit remote with SSH push. This is used by buildbots. If a pushurl is present on the cros remote is present, it will be removed, for ensuring that all consumers in the buildbot have moved to use the cros remote. """ urls = dict(gerrit_url=constants.GERRIT_SSH_URL, gerrit_int_url=constants.GERRIT_INT_SSH_URL) shell_code = """ if ! git config remote.gerrit.url > /dev/null; then if [ "${REPO_REMOTE}" = "cros" ]; then git config --unset-all "remote.${REPO_REMOTE}.pushurl" 2> /dev/null; git remote add gerrit "%(gerrit_url)s/${REPO_PROJECT}" || exit 1 else git remote add gerrit "%(gerrit_int_url)s/${REPO_PROJECT}" || exit 1 fi fi """ % urls cros_build_lib.RunCommand(['repo', '--time', 'forall', '-c', shell_code], cwd=buildroot)
bsd-3-clause
mgorny/PyGithub
tests/GitRef.py
2
3523
# -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2012 Zearin <zearin@gonk.net> # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2016 Jannis Gebauer <ja.geb@me.com> # # Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> # # Copyright 2018 sfdye <tsfdye@gmail.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from . import Framework class GitRef(Framework.TestCase): def setUp(self): super().setUp() self.ref = ( self.g.get_user() .get_repo("PyGithub") .get_git_ref("heads/BranchCreatedByPyGithub") ) def testAttributes(self): self.assertEqual( self.ref.object.sha, "1292bf0e22c796e91cc3d6e24b544aece8c21f2a" ) self.assertEqual(self.ref.object.type, "commit") self.assertEqual( self.ref.object.url, "https://api.github.com/repos/jacquev6/PyGithub/git/commits/1292bf0e22c796e91cc3d6e24b544aece8c21f2a", ) self.assertEqual(self.ref.ref, "refs/heads/BranchCreatedByPyGithub") self.assertEqual( self.ref.url, "https://api.github.com/repos/jacquev6/PyGithub/git/refs/heads/BranchCreatedByPyGithub", ) # test __repr__() based on this attributes self.assertEqual( self.ref.__repr__(), 'GitRef(ref="refs/heads/BranchCreatedByPyGithub")' ) def testEdit(self): self.ref.edit("04cde900a0775b51f762735637bd30de392a2793") def testEditWithForce(self): self.ref.edit("4303c5b90e2216d927155e9609436ccb8984c495", force=True) def testDelete(self): self.ref.delete()
lgpl-3.0
kayone/Wox
PythonHome/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py
1730
1142
from __future__ import absolute_import, division, unicode_literals import re from . import _base from ..constants import rcdataElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) class Filter(_base.Filter): spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) def __iter__(self): preserve = 0 for token in _base.Filter.__iter__(self): type = token["type"] if type == "StartTag" \ and (preserve or token["name"] in self.spacePreserveElements): preserve += 1 elif type == "EndTag" and preserve: preserve -= 1 elif not preserve and type == "SpaceCharacters" and token["data"]: # Test on token["data"] above to not introduce spaces where there were not token["data"] = " " elif not preserve and type == "Characters": token["data"] = collapse_spaces(token["data"]) yield token def collapse_spaces(text): return SPACES_REGEX.sub(' ', text)
mit
glenux/contrib-mypaint
gui/colors/sliders.py
2
9109
# This file is part of MyPaint. # Copyright (C) 2012 by Andrew Chadwick <andrewc-git@piffle.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. """Component sliders for power users. """ from gui import gtk2compat import gtk from gtk import gdk from gettext import gettext as _ from util import * from uicolor import * from bases import IconRenderable from adjbases import ColorAdjusterWidget from adjbases import ColorAdjuster from adjbases import SliderColorAdjuster from combined import CombinedAdjusterPage class ComponentSlidersAdjusterPage (CombinedAdjusterPage, IconRenderable): """Component sliders for precise adjustment: page for `CombinedAdjuster`. """ # Instance data (defaults, docs) __sliders = None #: List of slider widgets __table = None #: Page table. def __init__(self): table = gtk.Table(rows=6, columns=2) table.set_size_request(100, -1) self.__sliders = [] xpad = 3 ypad = 3 table_layout = [ [(RGBRedSlider, 1, 2, 'R', 0, 1), (RGBGreenSlider, 1, 2, 'G', 0, 1), (RGBBlueSlider, 1, 2, 'B', 0, 1)], #[(HSVHueSlider, 1, 2, 'H', 0, 1), # (HSVSaturationSlider, 1, 2, 'S', 0, 1), # (HSVValueSlider, 1, 2, 'V', 0, 1)], [(HCYHueSlider, 1, 2, 'H', 0, 1), (HCYChromaSlider, 1, 2, 'C', 0, 1), (HCYLumaSlider, 1, 2, 'Y', 0, 1)], ] row = 0 for adj_triple in table_layout: component_num = 1 for (slider_class, slider_l, slider_r, label_text, label_l, label_r) in adj_triple: yopts = gtk.FILL slider = slider_class() self.__sliders.append(slider) label = gtk.Label() label.set_text(label_text) label.set_alignment(1.0, 0.5) if component_num in (1, 3) and row != 0: yalign = (component_num == 1) and 1 or 0 align = gtk.Alignment(xalign=0, yalign=yalign, xscale=1, yscale=0) align.add(label) label = align align = gtk.Alignment(xalign=0, yalign=yalign, xscale=1, yscale=0) align.add(slider) slider = align yopts |= gtk.EXPAND table.attach(label, label_l, label_r, row, row+1, gtk.SHRINK | gtk.FILL, yopts, xpad, ypad) table.attach(slider, slider_l, slider_r, row, row+1, gtk.EXPAND | gtk.SHRINK | gtk.FILL, yopts, xpad, ypad) row += 1 component_num += 1 self.__table = table @classmethod def get_page_icon_name(self): return 'mypaint-tool-component-sliders' @classmethod def get_page_title(self): return _('Component Sliders') @classmethod def get_page_description(self): return _('Adjust individual components of the color.') def get_page_widget(self): return self.__table def set_color_manager(self, manager): ColorAdjuster.set_color_manager(self, manager) for slider in self.__sliders: slider.set_color_manager(manager) def render_as_icon(self, cr, size): """Renders as an icon into a Cairo context. """ # Strategy: construct tmp R,G,B sliders with a color that shows off # their primary a bit. Render carefully (might need special handling for # the 16px size). from adjbases import ColorManager mgr = ColorManager(prefs={}, datapath=".") mgr.set_color(RGBColor(0.3, 0.3, 0.4)) adjs = [RGBRedSlider(), RGBGreenSlider(), RGBBlueSlider()] for adj in adjs: adj.set_color_manager(mgr) if size <= 16: cr.save() for adj in adjs: adj.BORDER_WIDTH = 1 adj.render_background_cb(cr, wd=16, ht=5) cr.translate(0, 5) cr.restore() else: cr.save() bar_ht = int(size/3) offset = int((size - bar_ht*3) / 2) cr.translate(0, offset) for adj in adjs: adj.BORDER_WIDTH = max(2, int(size/16)) adj.render_background_cb(cr, wd=size, ht=bar_ht) cr.translate(0, bar_ht) cr.restore() for adj in adjs: adj.set_color_manager(None) class RGBRedSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("RGB Red") def get_background_validity(self): col = self.get_managed_color() r, g, b = col.get_rgb() return g, b def get_color_for_bar_amount(self, amt): col = RGBColor(color=self.get_managed_color()) col.r = amt return col def get_bar_amount_for_color(self, col): return col.r class RGBGreenSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("RGB Green") def get_background_validity(self): col = self.get_managed_color() r, g, b = col.get_rgb() return r, b def get_color_for_bar_amount(self, amt): col = RGBColor(color=self.get_managed_color()) col.g = amt return col def get_bar_amount_for_color(self, col): return col.g class RGBBlueSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("RGB Blue") def get_background_validity(self): col = self.get_managed_color() r, g, b = col.get_rgb() return r, g def get_color_for_bar_amount(self, amt): col = RGBColor(color=self.get_managed_color()) col.b = amt return col def get_bar_amount_for_color(self, col): return col.b class HSVHueSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HSV Hue") samples = 4 def get_color_for_bar_amount(self, amt): col = HSVColor(color=self.get_managed_color()) col.h = amt return col def get_bar_amount_for_color(self, col): return col.h class HSVSaturationSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HSV Saturation") def get_color_for_bar_amount(self, amt): col = HSVColor(color=self.get_managed_color()) col.s = amt return col def get_bar_amount_for_color(self, col): return col.s class HSVValueSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HSV Value") def get_color_for_bar_amount(self, amt): col = HSVColor(color=self.get_managed_color()) col.v = amt return col def get_bar_amount_for_color(self, col): return col.v class HCYHueSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HCY Hue") samples = 4 def get_color_for_bar_amount(self, amt): col = HCYColor(color=self.get_managed_color()) col.h = amt return col def get_bar_amount_for_color(self, col): col = HCYColor(color=col) return col.h class HCYChromaSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HCY Chroma") def get_color_for_bar_amount(self, amt): col = HCYColor(color=self.get_managed_color()) col.c = amt return col def get_bar_amount_for_color(self, col): col = HCYColor(color=col) return col.c class HCYLumaSlider (SliderColorAdjuster): STATIC_TOOLTIP_TEXT = _("HCY Luma (Y')") @property def samples(self): alloc = self.get_allocation() len = self.vertical and alloc.height or alloc.width len -= self.BORDER_WIDTH * 2 return min(int(len / 3), 64) def get_color_for_bar_amount(self, amt): col = HCYColor(color=self.get_managed_color()) col.y = amt return col def get_bar_amount_for_color(self, col): col = HCYColor(color=col) return col.y def get_background_validity(self): col = HCYColor(color=self.get_managed_color()) return int(col.h * 1000), int(col.c * 1000) if __name__ == '__main__': import os import sys from adjbases import ColorManager mgr = ColorManager(prefs={}, datapath=".") cs_adj = ComponentSlidersAdjusterPage() cs_adj.set_color_manager(mgr) cs_adj.set_managed_color(RGBColor(0.3, 0.6, 0.7)) if len(sys.argv) > 1: icon_name = cs_adj.get_page_icon_name() for dir_name in sys.argv[1:]: cs_adj.save_icon_tree(dir_name, icon_name) else: # Interactive test window = gtk.Window() window.add(cs_adj.get_page_widget()) window.set_title(os.path.basename(sys.argv[0])) window.connect("destroy", lambda *a: gtk.main_quit()) window.show_all() gtk.main()
gpl-2.0
alexjc/pylearn2
pylearn2/models/vae/__init__.py
43
22792
""" Variational autoencoder (VAE) implementation, as described in Kingma, D. and Welling, M. Auto-Encoding Variational Bayes `VAE` expects to receive three objects to do its job properly: 1. An instance of `Prior` (`pylearn2.models.vae.prior` module), which handles methods related to the prior distribution :math:`p_\\theta(\\mathbf{z})`. 2. An instance of `Conditional` (`pylearn2.models.vae.conditional` module), which handles methods related to the conditional distribution :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})`. 1. An instance of `Conditional` (`pylearn2.models.vae.conditional` module), which handles methods related to the posterior distribution :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})`. For an example on how to use the VAE framework, see `pylearn2/scripts/tutorials/variational_autoencoder/vae.yaml`. """ __authors__ = "Vincent Dumoulin" __copyright__ = "Copyright 2014, Universite de Montreal" __credits__ = ["Vincent Dumoulin"] __license__ = "3-clause BSD" __maintainer__ = "Vincent Dumoulin" __email__ = "pylearn-dev@googlegroups" import warnings import numpy import theano.tensor as T from pylearn2.compat import OrderedDict from pylearn2.expr.basic import log_sum_exp from pylearn2.models.model import Model from pylearn2.models.vae.kl import find_integrator_for from pylearn2.space import VectorSpace from pylearn2.utils import wraps, sharedX, safe_update from pylearn2.utils.rng import make_np_rng default_seed = 2014 + 9 + 20 pi = sharedX(numpy.pi) class VAE(Model): """ Implementation of the variational autoencoder (VAE). Parameters ---------- nvis : int Number of dimensions in the input data prior : pylearn2.models.vae.prior.Prior Represents the prior distribution :math:`p_\\theta(\\mathbf{z})` conditional : pylearn2.models.vae.conditional.Conditional Represents the conditional distribution :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})` posterior : pylearn2.models.vae.conditional.Conditional Represents the posterior distribution :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` nhid : int Number of dimensions in latent space, i.e. the space in which :math:`z` lives learn_prior : bool, optional Whether to learn the prior distribution p(z). Defaults to `True`. kl_integrator : pylearn2.models.vae.kl.KLIntegrator, optional Object providing methods for computing KL-related quantities. Defaults to `None`, in which case the approximate KL is computed instead. batch_size : int, optional Sometimes required for some MLPs representing encoding/decoding models. Defaults to `None`. seed : int or list of int Seed for the VAE's numpy RNG used by its subcomponents """ def __init__(self, nvis, prior, conditional, posterior, nhid, learn_prior=True, kl_integrator=None, batch_size=None, seed=None): super(VAE, self).__init__() self.__dict__.update(locals()) del self.self self.rng = make_np_rng(self.seed, default_seed, ['uniform', 'randint', 'randn']) self.prior.set_vae(self) self.conditional.set_vae(self) self.posterior.set_vae(self) self.learn_prior = learn_prior # Space initialization self.input_space = VectorSpace(dim=self.nvis) self.input_source = 'features' self.latent_space = VectorSpace(dim=self.nhid) # Parameter initialization self.prior.initialize_parameters(nhid=self.nhid) self.conditional.initialize_parameters( input_space=self.latent_space, ndim=self.nvis ) self.posterior.initialize_parameters( input_space=self.input_space, ndim=self.nhid ) self._params = (self.get_posterior_params() + self.get_conditional_params()) if self.learn_prior: self._params += self.get_prior_params() names = [] for param in self._params: if param.name not in names: names.append(param.name) else: raise Exception("no two parameters must share the same name: " + param.name) # Look for the right KLIntegrator if it's not specified if self.kl_integrator is None: self.kl_integrator = find_integrator_for(self.prior, self.posterior) @wraps(Model.get_monitoring_data_specs) def get_monitoring_data_specs(self): return self.input_space, self.input_source @wraps(Model.get_monitoring_channels) def get_monitoring_channels(self, data): """ Notes ----- Monitors quantities related to the approximate posterior parameters phi and the conditional and prior parameters theta. """ space, source = self.get_monitoring_data_specs() space.validate(data) rval = OrderedDict() X = data epsilon_shape = (X.shape[0], self.nhid) epsilon = self.sample_from_epsilon(shape=epsilon_shape) phi = self.encode_phi(X) z = self.sample_from_q_z_given_x(epsilon=epsilon, phi=phi) theta = self.decode_theta(z) X_r = self.means_from_theta(theta) rval["reconstruction_mse"] = T.sqr(X - X_r).mean() posterior_channels = \ self.posterior.monitoring_channels_from_conditional_params(phi) safe_update(rval, posterior_channels) conditional_channels = \ self.conditional.monitoring_channels_from_conditional_params(theta) safe_update(rval, conditional_channels) prior_channels = self.prior.monitoring_channels_from_prior_params() safe_update(rval, prior_channels) return rval @wraps(Model.get_lr_scalers) def get_lr_scalers(self): rval = self.prior.get_lr_scalers() safe_update(rval, self.conditional.get_lr_scalers()) safe_update(rval, self.posterior.get_lr_scalers()) return rval @wraps(Model._modify_updates) def _modify_updates(self, updates): self.prior.modify_updates(updates) self.conditional.modify_updates(updates) self.posterior.modify_updates(updates) @wraps(Model.get_weights) def get_weights(self): return self.posterior.get_weights() def get_conditional_weights(self): """ Returns the weights of the first layer of the decoding network """ return self.conditional.get_weights() def get_prior_params(self): """ Returns the model's prior distribution parameters """ return self.prior.get_params() def get_conditional_params(self): """ Returns the model's conditional distribution parameters """ return self.conditional.get_params() def get_posterior_params(self): """ Returns the model's posterior distribution parameters """ return self.posterior.get_params() def sample(self, num_samples, return_sample_means=True, **kwargs): """ Sample from the model's learned distribution Parameters ---------- num_samples : int Number of samples return_sample_means : bool, optional Whether to return the conditional expectations :math:`\\mathbb{E}[p_\\theta(\\mathbf{x} \\mid \\mathbf{h})]` in addition to the actual samples. Defaults to `False`. Returns ------- rval : tensor_like or tuple of tensor_like Samples, and optionally conditional expectations """ # Sample from p(z) z = self.sample_from_p_z(num_samples=num_samples, **kwargs) # Decode theta theta = self.decode_theta(z) # Sample from p(x | z) X = self.sample_from_p_x_given_z(num_samples=num_samples, theta=theta) if return_sample_means: return (X, self.means_from_theta(theta)) else: return X def reconstruct(self, X, noisy_encoding=False, return_sample_means=True): """ Given an input, generates its reconstruction by propagating it through the encoder network and projecting it back through the decoder network. Parameters ---------- X : tensor_like Input to reconstruct noisy_encoding : bool, optional If `True`, sample z from the posterior distribution. If `False`, take the expected value. Defaults to `False`. return_sample_means : bool, optional Whether to return the conditional expectations :math:`\\mathbb{E}[p_\\theta(\\mathbf{x} \\mid \\mathbf{h})]` in addition to the actual samples. Defaults to `False`. Returns ------- rval : tensor_like or tuple of tensor_like Samples, and optionally conditional expectations """ # Sample noise # TODO: For now this covers our use cases, but we need something more # robust for the future. epsilon = self.sample_from_epsilon((X.shape[0], self.nhid)) if not noisy_encoding: epsilon *= 0 # Encode q(z | x) parameters phi = self.encode_phi(X) # Compute z z = self.sample_from_q_z_given_x(epsilon=epsilon, phi=phi) # Compute expectation term theta = self.decode_theta(z) reconstructed_X = self.sample_from_p_x_given_z( num_samples=X.shape[0], theta=theta ) if return_sample_means: return (reconstructed_X, self.means_from_theta(theta)) else: return reconstructed_X def log_likelihood_lower_bound(self, X, num_samples, approximate_kl=False, return_individual_terms=False): """ Computes the VAE lower-bound on the marginal log-likelihood of X. Parameters ---------- X : tensor_like Input num_samples : int Number of posterior samples per data point, e.g. number of times z is sampled for each x. approximate_kl : bool, optional Whether to compute a stochastic approximation of the KL divergence term. Defaults to `False`. return_individual_terms : bool, optional If `True`, return `(kl_divergence_term, expectation_term)` instead. Defaults to `False`. Returns ------- lower_bound : tensor_like Lower-bound on the marginal log-likelihood """ # Sample noise epsilon_shape = (num_samples, X.shape[0], self.nhid) epsilon = self.sample_from_epsilon(shape=epsilon_shape) # Encode q(z | x) parameters phi = self.encode_phi(X) # Get prior parameters prior_theta = self.get_prior_theta() # Compute z z = self.sample_from_q_z_given_x(epsilon=epsilon, phi=phi) # Compute KL divergence term kl_divergence_term = self.kl_divergence_term( phi=phi, theta=prior_theta, approximate=approximate_kl, epsilon=epsilon ) # Compute expectation term # (z is flattened out in order to be MLP-compatible, and the parameters # output by the decoder network are reshaped to the right shape) z = z.reshape((epsilon.shape[0] * epsilon.shape[1], epsilon.shape[2])) theta = self.decode_theta(z) theta = tuple( theta_i.reshape((epsilon.shape[0], epsilon.shape[1], theta_i.shape[1])) for theta_i in theta ) expectation_term = self.expectation_term( X=X.dimshuffle('x', 0, 1), theta=theta ).mean(axis=0) if return_individual_terms: return (kl_divergence_term, expectation_term) else: return -kl_divergence_term + expectation_term def log_likelihood_approximation(self, X, num_samples): """ Computes the importance sampling approximation to the marginal log-likelihood of X, using the reparametrization trick. Parameters ---------- X : tensor_like Input num_samples : int Number of posterior samples per data point, e.g. number of times z is sampled for each x. Returns ------- approximation : tensor_like Approximation on the marginal log-likelihood """ # Sample noise epsilon_shape = (num_samples, X.shape[0], self.nhid) epsilon = self.sample_from_epsilon(shape=epsilon_shape) # Encode q(z | x) parameters phi = self.encode_phi(X) # Compute z z = self.sample_from_q_z_given_x(epsilon=epsilon, phi=phi) # Decode p(x | z) parameters # (z is flattened out in order to be MLP-compatible, and the parameters # output by the decoder network are reshaped to the right shape) flat_z = z.reshape((epsilon.shape[0] * epsilon.shape[1], epsilon.shape[2])) theta = self.decode_theta(flat_z) theta = tuple( theta_i.reshape((epsilon.shape[0], epsilon.shape[1], theta_i.shape[1])) for theta_i in theta ) # Compute log-probabilities log_q_z_x = self.log_q_z_given_x(z=z, phi=phi) log_p_z = self.log_p_z(z) log_p_x_z = self.log_p_x_given_z( X=X.dimshuffle(('x', 0, 1)), theta=theta ) return log_sum_exp( log_p_z + log_p_x_z - log_q_z_x, axis=0 ) - T.log(num_samples) def encode_phi(self, X): """ Maps input `X` to a tuple of parameters of the :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` posterior distribution Parameters ---------- X : tensor_like Input Returns ------- phi : tuple of tensor_like Tuple of parameters for the posterior distribution """ return self.posterior.encode_conditional_params(X) def decode_theta(self, z): """ Maps latent variable `z` to a tuple of parameters of the :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})` distribution Parameters ---------- z : tensor_like Latent sample Returns ------- theta : tuple of tensor_like Tuple of parameters for the conditional distribution """ return self.conditional.encode_conditional_params(z) def get_prior_theta(self): """ Returns parameters of the prior distribution :math:`p_\\theta(\\mathbf{z})` """ return self.prior.get_params() def means_from_theta(self, theta): """ Given a tuple of parameters of the :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})` distribution, returns the expected value of `x`. Parameters ---------- theta : tuple of tensor_like Tuple of parameters for the conditional distribution Returns ------- means : tensor_like Expected value of `x` """ return self.conditional.conditional_expectation(theta) def expectation_term(self, X, theta): """ Computes an approximation of :math:`\\mathrm{E}_{q_\\phi(\\mathbf{z} \\mid \\mathbf{x})} [\\log p_\\theta(\\mathbf{x} \\mid \\mathbf{z})]` Parameters ---------- X : tensor_like Input theta : tuple of tensor_like Tuple of parameters for the conditional distribution Returns ------- expectation_term : tensor_like Expectation term """ return self.log_p_x_given_z(X, theta) def kl_divergence_term(self, phi, theta, approximate=False, epsilon=None): """ Computes the KL-divergence term of the VAE criterion. Parameters ---------- phi : tuple of tensor_like Parameters of the distribution :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` theta : tuple of tensor_like Parameters of the distribution :math:`p_\\theta(\\mathbf{z})` approximate_kl : bool, optional Whether to compute a stochastic approximation of the KL divergence term. Defaults to `False`. epsilon : tensor_like, optional Noise samples used to compute the approximate KL term. Defaults to `None`. """ if self.kl_integrator is None: warnings.warn("computing the analytical KL divergence term is not " "supported for this prior/posterior combination, " "computing a stochastic approximate KL instead") return self._approximate_kl_divergence_term(phi, epsilon) if approximate: return self._approximate_kl_divergence_term(phi, epsilon) else: return self.kl_integrator.kl_divergence( phi=phi, theta=theta, prior=self.prior, posterior=self.posterior ) def _approximate_kl_divergence_term(self, phi, epsilon): """ Returns a Monte Carlo approximation of the KL divergence term. Parameters ---------- phi : tuple of tensor_like Tuple of parameters for the posterior distribution epsilon : tensor_like Noise term from which z is computed """ if epsilon is None: raise ValueError("stochastic KL is requested but no epsilon is " "given") z = self.sample_from_q_z_given_x(epsilon=epsilon, phi=phi) log_q_z_x = self.log_q_z_given_x(z=z, phi=phi) log_p_z = self.log_p_z(z) return (log_q_z_x - log_p_z).mean(axis=0) def per_component_kl_divergence_term(self, phi, theta): """ If the prior/posterior combination allows it, analytically computes the per-latent-dimension KL divergences between the prior distribution :math:`p_\\theta(\\mathbf{z})` and :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` Parameters ---------- phi : tuple of tensor_like Parameters of the distribution :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` theta : tuple of tensor_like Parameters of the distribution :math:`p_\\theta(\\mathbf{z})` """ if self.kl_integrator is None: raise NotImplementedError("impossible to compute the analytical " "KL divergence") else: return self.kl_integrator.per_component_kl_divergence( phi=phi, theta=theta, prior=self.prior, posterior=self.posterior ) def sample_from_p_x_given_z(self, num_samples, theta): """ Given a tuple of parameters, samples from the :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})` conditional distribution Parameters ---------- num_samples : int Number of samples theta : tuple of tensor_like Tuple of parameters for the conditional distribution Returns ------- x : tensor_like Samples """ return self.conditional.sample_from_conditional( conditional_params=theta, num_samples=num_samples ) def sample_from_p_z(self, num_samples, **kwargs): """ Samples from the prior distribution :math:`p_\\theta(\\mathbf{z})` Parameters ---------- num_samples : int Number of samples Returns ------- z : tensor_like Sample from the prior distribution """ return self.prior.sample_from_p_z(num_samples, **kwargs) def sample_from_q_z_given_x(self, epsilon, phi): """ Given a tuple of parameters and an epsilon noise sample, generates samples from the :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})` posterior distribution using the reparametrization trick Parameters ---------- epsilon : tensor_like Noise sample phi : tuple of tensor_like Tuple of parameters for the posterior distribution Returns ------- z : tensor_like Posterior sample """ return self.posterior.sample_from_conditional( conditional_params=phi, epsilon=epsilon ) def sample_from_epsilon(self, shape): """ Samples from a canonical noise distribution from which posterior samples will be drawn using the reparametrization trick (see `_sample_from_q_z_given_x`) Parameters ---------- shape : tuple of int Shape of the requested samples Returns ------- epsilon : tensor_like Noise samples """ return self.posterior.sample_from_epsilon(shape) def log_p_z(self, z): """ Computes the log-prior probabilities of `z` Parameters ---------- z : tensor_like Posterior samples Returns ------- log_p_z : tensor_like Log-prior probabilities """ return self.prior.log_p_z(z) def log_p_x_given_z(self, X, theta): """ Computes the log-conditional probabilities of `X` Parameters ---------- X : tensor_like Input theta : tuple of tensor_like Tuple of parameters for the contitional distribution Returns ------- log_p_x_z : tensor_like Log-prior probabilities """ return self.conditional.log_conditional(X, theta) def log_q_z_given_x(self, z, phi): """ Computes the log-posterior probabilities of `z` Parameters ---------- z : tensor_like Posterior samples phi : tuple of tensor_like Tuple of parameters for the posterior distribution Returns ------- log_q_z_x : tensor_like Log-posterior probabilities """ return self.posterior.log_conditional(z, phi)
bsd-3-clause
cgimenop/Excel2Testlink
ExcelParser/lib/openpyxl/chartsheet/protection.py
5
1441
from __future__ import absolute_import import hashlib from openpyxl.descriptors import (Bool, Integer, String) from openpyxl.descriptors.excel import Base64Binary from openpyxl.descriptors.serialisable import Serialisable from openpyxl.worksheet.protection import ( hash_password, _Protected ) class ChartsheetProtection(Serialisable, _Protected): tagname = "sheetProtection" algorithmName = String(allow_none=True) hashValue = Base64Binary(allow_none=True) saltValue = Base64Binary(allow_none=True) spinCount = Integer(allow_none=True) content = Bool(allow_none=True) objects = Bool(allow_none=True) __attrs__ = ("content", "objects", "password", "hashValue", "spinCount", "saltValue", "algorithmName") def __init__(self, content=None, objects=None, hashValue=None, spinCount=None, saltValue=None, algorithmName=None, password=None, ): self.content = content self.objects = objects self.hashValue = hashValue self.spinCount = spinCount self.saltValue = saltValue self.algorithmName = algorithmName if password is not None: self.password = password def hash_password(self, password): self.hashValue = hashlib.sha256((self.saltValue + password).encode("utf-8")).hexdigest()
mit
svanschalkwyk/datafari
windows/python/Lib/test/test_sys.py
12
28937
# -*- coding: iso-8859-1 -*- import unittest, test.test_support from test.script_helper import assert_python_ok, assert_python_failure import sys, os, cStringIO import struct import operator class SysModuleTest(unittest.TestCase): def tearDown(self): test.test_support.reap_children() def test_original_displayhook(self): import __builtin__ savestdout = sys.stdout out = cStringIO.StringIO() sys.stdout = out dh = sys.__displayhook__ self.assertRaises(TypeError, dh) if hasattr(__builtin__, "_"): del __builtin__._ dh(None) self.assertEqual(out.getvalue(), "") self.assertTrue(not hasattr(__builtin__, "_")) dh(42) self.assertEqual(out.getvalue(), "42\n") self.assertEqual(__builtin__._, 42) del sys.stdout self.assertRaises(RuntimeError, dh, 42) sys.stdout = savestdout def test_lost_displayhook(self): olddisplayhook = sys.displayhook del sys.displayhook code = compile("42", "<string>", "single") self.assertRaises(RuntimeError, eval, code) sys.displayhook = olddisplayhook def test_custom_displayhook(self): olddisplayhook = sys.displayhook def baddisplayhook(obj): raise ValueError sys.displayhook = baddisplayhook code = compile("42", "<string>", "single") self.assertRaises(ValueError, eval, code) sys.displayhook = olddisplayhook def test_original_excepthook(self): savestderr = sys.stderr err = cStringIO.StringIO() sys.stderr = err eh = sys.__excepthook__ self.assertRaises(TypeError, eh) try: raise ValueError(42) except ValueError, exc: eh(*sys.exc_info()) sys.stderr = savestderr self.assertTrue(err.getvalue().endswith("ValueError: 42\n")) # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. def test_exc_clear(self): self.assertRaises(TypeError, sys.exc_clear, 42) # Verify that exc_info is present and matches exc, then clear it, and # check that it worked. def clear_check(exc): typ, value, traceback = sys.exc_info() self.assertTrue(typ is not None) self.assertTrue(value is exc) self.assertTrue(traceback is not None) with test.test_support.check_py3k_warnings(): sys.exc_clear() typ, value, traceback = sys.exc_info() self.assertTrue(typ is None) self.assertTrue(value is None) self.assertTrue(traceback is None) def clear(): try: raise ValueError, 42 except ValueError, exc: clear_check(exc) # Raise an exception and check that it can be cleared clear() # Verify that a frame currently handling an exception is # unaffected by calling exc_clear in a nested frame. try: raise ValueError, 13 except ValueError, exc: typ1, value1, traceback1 = sys.exc_info() clear() typ2, value2, traceback2 = sys.exc_info() self.assertTrue(typ1 is typ2) self.assertTrue(value1 is exc) self.assertTrue(value1 is value2) self.assertTrue(traceback1 is traceback2) # Check that an exception can be cleared outside of an except block clear_check(exc) def test_exit(self): # call with two arguments self.assertRaises(TypeError, sys.exit, 42, 42) # call without argument with self.assertRaises(SystemExit) as cm: sys.exit() self.assertIsNone(cm.exception.code) rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()') self.assertEqual(rc, 0) self.assertEqual(out, b'') self.assertEqual(err, b'') # call with integer argument with self.assertRaises(SystemExit) as cm: sys.exit(42) self.assertEqual(cm.exception.code, 42) # call with tuple argument with one entry # entry will be unpacked with self.assertRaises(SystemExit) as cm: sys.exit((42,)) self.assertEqual(cm.exception.code, 42) # call with string argument with self.assertRaises(SystemExit) as cm: sys.exit("exit") self.assertEqual(cm.exception.code, "exit") # call with tuple argument with two entries with self.assertRaises(SystemExit) as cm: sys.exit((17, 23)) self.assertEqual(cm.exception.code, (17, 23)) # test that the exit machinery handles SystemExits properly # both unnormalized... rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46') self.assertEqual(rc, 46) self.assertEqual(out, b'') self.assertEqual(err, b'') # ... and normalized rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)') self.assertEqual(rc, 47) self.assertEqual(out, b'') self.assertEqual(err, b'') def check_exit_message(code, expected, **env_vars): rc, out, err = assert_python_failure('-c', code, **env_vars) self.assertEqual(rc, 1) self.assertEqual(out, b'') self.assertTrue(err.startswith(expected), "%s doesn't start with %s" % (repr(err), repr(expected))) # test that stderr buffer is flushed before the exit message is written # into stderr check_exit_message( r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")', b"unflushed,message") # test that the unicode message is encoded to the stderr encoding check_exit_message( r'import sys; sys.exit(u"h\xe9")', b"h\xe9", PYTHONIOENCODING='latin-1') def test_getdefaultencoding(self): if test.test_support.have_unicode: self.assertRaises(TypeError, sys.getdefaultencoding, 42) # can't check more than the type, as the user might have changed it self.assertIsInstance(sys.getdefaultencoding(), str) # testing sys.settrace() is done in test_sys_settrace.py # testing sys.setprofile() is done in test_sys_setprofile.py def test_setcheckinterval(self): self.assertRaises(TypeError, sys.setcheckinterval) orig = sys.getcheckinterval() for n in 0, 100, 120, orig: # orig last to restore starting state sys.setcheckinterval(n) self.assertEqual(sys.getcheckinterval(), n) def test_recursionlimit(self): self.assertRaises(TypeError, sys.getrecursionlimit, 42) oldlimit = sys.getrecursionlimit() self.assertRaises(TypeError, sys.setrecursionlimit) self.assertRaises(ValueError, sys.setrecursionlimit, -42) sys.setrecursionlimit(10000) self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) self.assertRaises(OverflowError, sys.setrecursionlimit, 1 << 31) try: sys.setrecursionlimit((1 << 31) - 5) try: # issue13546: isinstance(e, ValueError) used to fail # when the recursion limit is close to 1<<31 raise ValueError() except ValueError, e: pass finally: sys.setrecursionlimit(oldlimit) def test_getwindowsversion(self): # Raise SkipTest if sys doesn't have getwindowsversion attribute test.test_support.get_attribute(sys, "getwindowsversion") v = sys.getwindowsversion() self.assertEqual(len(v), 5) self.assertIsInstance(v[0], int) self.assertIsInstance(v[1], int) self.assertIsInstance(v[2], int) self.assertIsInstance(v[3], int) self.assertIsInstance(v[4], str) self.assertRaises(IndexError, operator.getitem, v, 5) self.assertIsInstance(v.major, int) self.assertIsInstance(v.minor, int) self.assertIsInstance(v.build, int) self.assertIsInstance(v.platform, int) self.assertIsInstance(v.service_pack, str) self.assertIsInstance(v.service_pack_minor, int) self.assertIsInstance(v.service_pack_major, int) self.assertIsInstance(v.suite_mask, int) self.assertIsInstance(v.product_type, int) self.assertEqual(v[0], v.major) self.assertEqual(v[1], v.minor) self.assertEqual(v[2], v.build) self.assertEqual(v[3], v.platform) self.assertEqual(v[4], v.service_pack) # This is how platform.py calls it. Make sure tuple # still has 5 elements maj, min, buildno, plat, csd = sys.getwindowsversion() @unittest.skipUnless(hasattr(sys, "setdlopenflags"), 'test needs sys.setdlopenflags()') def test_dlopenflags(self): self.assertTrue(hasattr(sys, "getdlopenflags")) self.assertRaises(TypeError, sys.getdlopenflags, 42) oldflags = sys.getdlopenflags() self.assertRaises(TypeError, sys.setdlopenflags) sys.setdlopenflags(oldflags+1) self.assertEqual(sys.getdlopenflags(), oldflags+1) sys.setdlopenflags(oldflags) def test_refcount(self): # n here must be a global in order for this test to pass while # tracing with a python function. Tracing calls PyFrame_FastToLocals # which will add a copy of any locals to the frame object, causing # the reference count to increase by 2 instead of 1. global n self.assertRaises(TypeError, sys.getrefcount) c = sys.getrefcount(None) n = None self.assertEqual(sys.getrefcount(None), c+1) del n self.assertEqual(sys.getrefcount(None), c) if hasattr(sys, "gettotalrefcount"): self.assertIsInstance(sys.gettotalrefcount(), int) def test_getframe(self): self.assertRaises(TypeError, sys._getframe, 42, 42) self.assertRaises(ValueError, sys._getframe, 2000000000) self.assertTrue( SysModuleTest.test_getframe.im_func.func_code \ is sys._getframe().f_code ) # sys._current_frames() is a CPython-only gimmick. def test_current_frames(self): have_threads = True try: import thread except ImportError: have_threads = False if have_threads: self.current_frames_with_threads() else: self.current_frames_without_threads() # Test sys._current_frames() in a WITH_THREADS build. @test.test_support.reap_threads def current_frames_with_threads(self): import threading, thread import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(thread.get_ident()) entered_g.set() leave_g.wait() t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_frames() main_id = thread.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) # Verify that the captured main-thread frame is _this_ frame. frame = d.pop(main_id) self.assertTrue(frame is sys._getframe()) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a litte tricky, since various bits of # threading.py are also in the thread's call stack. frame = d.pop(thread_id) stack = traceback.extract_stack(frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) # Reap the spawned thread. leave_g.set() t.join() # Test sys._current_frames() when thread support doesn't exist. def current_frames_without_threads(self): # Not much happens here: there is only one thread, with artificial # "thread id" 0. d = sys._current_frames() self.assertEqual(len(d), 1) self.assertIn(0, d) self.assertTrue(d[0] is sys._getframe()) def test_attributes(self): self.assertIsInstance(sys.api_version, int) self.assertIsInstance(sys.argv, list) self.assertIn(sys.byteorder, ("little", "big")) self.assertIsInstance(sys.builtin_module_names, tuple) self.assertIsInstance(sys.copyright, basestring) self.assertIsInstance(sys.exec_prefix, basestring) self.assertIsInstance(sys.executable, basestring) self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.long_info), 2) self.assertTrue(sys.long_info.bits_per_digit % 5 == 0) self.assertTrue(sys.long_info.sizeof_digit >= 1) self.assertEqual(type(sys.long_info.bits_per_digit), int) self.assertEqual(type(sys.long_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) self.assertIsInstance(sys.maxint, int) if test.test_support.have_unicode: self.assertIsInstance(sys.maxunicode, int) self.assertIsInstance(sys.platform, basestring) self.assertIsInstance(sys.prefix, basestring) self.assertIsInstance(sys.version, basestring) vi = sys.version_info self.assertIsInstance(vi[:], tuple) self.assertEqual(len(vi), 5) self.assertIsInstance(vi[0], int) self.assertIsInstance(vi[1], int) self.assertIsInstance(vi[2], int) self.assertIn(vi[3], ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi[4], int) self.assertIsInstance(vi.major, int) self.assertIsInstance(vi.minor, int) self.assertIsInstance(vi.micro, int) self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi.serial, int) self.assertEqual(vi[0], vi.major) self.assertEqual(vi[1], vi.minor) self.assertEqual(vi[2], vi.micro) self.assertEqual(vi[3], vi.releaselevel) self.assertEqual(vi[4], vi.serial) self.assertTrue(vi > (1,0,0)) self.assertIsInstance(sys.float_repr_style, str) self.assertIn(sys.float_repr_style, ('short', 'legacy')) def test_43581(self): # Can't use sys.stdout, as this is a cStringIO object when # the test runs under regrtest. self.assertTrue(sys.__stdout__.encoding == sys.__stderr__.encoding) def test_sys_flags(self): self.assertTrue(sys.flags) attrs = ("debug", "py3k_warning", "division_warning", "division_new", "inspect", "interactive", "optimize", "dont_write_bytecode", "no_site", "ignore_environment", "tabcheck", "verbose", "unicode", "bytes_warning", "hash_randomization") for attr in attrs: self.assertTrue(hasattr(sys.flags, attr), attr) self.assertEqual(type(getattr(sys.flags, attr)), int, attr) self.assertTrue(repr(sys.flags)) @test.test_support.cpython_only def test_clear_type_cache(self): sys._clear_type_cache() def test_ioencoding(self): import subprocess env = dict(os.environ) # Test character: cent sign, encoded as 0x4A (ASCII J) in CP424, # not representable in ASCII. env["PYTHONIOENCODING"] = "cp424" p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, unichr(0xa2).encode("cp424")) env["PYTHONIOENCODING"] = "ascii:replace" p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, '?') def test_call_tracing(self): self.assertEqual(sys.call_tracing(str, (2,)), "2") self.assertRaises(TypeError, sys.call_tracing, str, 2) def test_executable(self): # sys.executable should be absolute self.assertEqual(os.path.abspath(sys.executable), sys.executable) # Issue #7774: Ensure that sys.executable is an empty string if argv[0] # has been set to an non existent program name and Python is unable to # retrieve the real program name import subprocess # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen( ["nonexistent", "-c", 'import sys; print repr(sys.executable)'], executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir) executable = p.communicate()[0].strip() p.wait() self.assertIn(executable, ["''", repr(sys.executable)]) @test.test_support.cpython_only class SizeofTest(unittest.TestCase): def setUp(self): self.P = struct.calcsize('P') self.longdigit = sys.long_info.sizeof_digit import _testcapi self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD self.file = open(test.test_support.TESTFN, 'wb') def tearDown(self): self.file.close() test.test_support.unlink(test.test_support.TESTFN) check_sizeof = test.test_support.check_sizeof def test_gc_head_size(self): # Check that the gc header size is added to objects tracked by the gc. size = test.test_support.calcobjsize gc_header_size = self.gc_headsize # bool objects are not gc tracked self.assertEqual(sys.getsizeof(True), size('l')) # but lists are self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size) def test_errors(self): class BadSizeof(object): def __sizeof__(self): raise ValueError self.assertRaises(ValueError, sys.getsizeof, BadSizeof()) class InvalidSizeof(object): def __sizeof__(self): return None self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof()) sentinel = ["sentinel"] self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel) class OverflowSizeof(long): def __sizeof__(self): return int(self) self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)), sys.maxsize + self.gc_headsize) with self.assertRaises(OverflowError): sys.getsizeof(OverflowSizeof(sys.maxsize + 1)) with self.assertRaises(ValueError): sys.getsizeof(OverflowSizeof(-1)) with self.assertRaises((ValueError, OverflowError)): sys.getsizeof(OverflowSizeof(-sys.maxsize - 1)) def test_default(self): size = test.test_support.calcobjsize self.assertEqual(sys.getsizeof(True, -1), size('l')) def test_objecttypes(self): # check all types defined in Objects/ size = test.test_support.calcobjsize vsize = test.test_support.calcvobjsize check = self.check_sizeof # bool check(True, size('l')) # buffer with test.test_support.check_py3k_warnings(): check(buffer(''), size('2P2Pil')) # builtin_function_or_method check(len, size('3P')) # bytearray samples = ['', 'u'*100000] for sample in samples: x = bytearray(sample) check(x, vsize('iPP') + x.__alloc__()) # bytearray_iterator check(iter(bytearray()), size('PP')) # cell def get_cell(): x = 42 def inner(): return x return inner check(get_cell().func_closure[0], size('P')) # classobj (old-style class) class class_oldstyle(): def method(): pass check(class_oldstyle, size('7P')) # instance (old-style class) check(class_oldstyle(), size('3P')) # instancemethod (old-style class) check(class_oldstyle().method, size('4P')) # complex check(complex(0,1), size('2d')) # code check(get_cell().func_code, size('4i8Pi3P')) # BaseException check(BaseException(), size('3P')) # UnicodeEncodeError check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP')) # UnicodeDecodeError check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP')) # UnicodeTranslateError check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP')) # method_descriptor (descriptor object) check(str.lower, size('2PP')) # classmethod_descriptor (descriptor object) # XXX # member_descriptor (descriptor object) import datetime check(datetime.timedelta.days, size('2PP')) # getset_descriptor (descriptor object) import __builtin__ check(__builtin__.file.closed, size('2PP')) # wrapper_descriptor (descriptor object) check(int.__add__, size('2P2P')) # dictproxy class C(object): pass check(C.__dict__, size('P')) # method-wrapper (descriptor object) check({}.__iter__, size('2P')) # dict check({}, size('3P2P' + 8*'P2P')) x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8} check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P')) # dictionary-keyiterator check({}.iterkeys(), size('P2PPP')) # dictionary-valueiterator check({}.itervalues(), size('P2PPP')) # dictionary-itemiterator check({}.iteritems(), size('P2PPP')) # ellipses check(Ellipsis, size('')) # EncodingMap import codecs, encodings.iso8859_3 x = codecs.charmap_build(encodings.iso8859_3.decoding_table) check(x, size('32B2iB')) # enumerate check(enumerate([]), size('l3P')) # file check(self.file, size('4P2i4P3i3P3i')) # float check(float(0), size('d')) # sys.floatinfo check(sys.float_info, vsize('') + self.P * len(sys.float_info)) # frame import inspect CO_MAXBLOCKS = 20 x = inspect.currentframe() ncells = len(x.f_code.co_cellvars) nfrees = len(x.f_code.co_freevars) extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\ ncells + nfrees - 1 check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P')) # function def func(): pass check(func, size('9P')) class c(): @staticmethod def foo(): pass @classmethod def bar(cls): pass # staticmethod check(foo, size('P')) # classmethod check(bar, size('P')) # generator def get_gen(): yield 1 check(get_gen(), size('Pi2P')) # integer check(1, size('l')) check(100, size('l')) # iterator check(iter('abc'), size('lP')) # callable-iterator import re check(re.finditer('',''), size('2P')) # list samples = [[], [1,2,3], ['1', '2', '3']] for sample in samples: check(sample, vsize('PP') + len(sample)*self.P) # sortwrapper (list) # XXX # cmpwrapper (list) # XXX # listiterator (list) check(iter([]), size('lP')) # listreverseiterator (list) check(reversed([]), size('lP')) # long check(0L, vsize('')) check(1L, vsize('') + self.longdigit) check(-1L, vsize('') + self.longdigit) PyLong_BASE = 2**sys.long_info.bits_per_digit check(long(PyLong_BASE), vsize('') + 2*self.longdigit) check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit) check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit) # module check(unittest, size('P')) # None check(None, size('')) # object check(object(), size('')) # property (descriptor object) class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, "") check(x, size('4Pi')) # PyCObject # PyCapsule # XXX # rangeiterator check(iter(xrange(1)), size('4l')) # reverse check(reversed(''), size('PP')) # set # frozenset PySet_MINSIZE = 8 samples = [[], range(10), range(50)] s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP') for sample in samples: minused = len(sample) if minused == 0: tmp = 1 # the computation of minused is actually a bit more complicated # but this suffices for the sizeof test minused = minused*2 newsize = PySet_MINSIZE while newsize <= minused: newsize = newsize << 1 if newsize <= 8: check(set(sample), s) check(frozenset(sample), s) else: check(set(sample), s + newsize*struct.calcsize('lP')) check(frozenset(sample), s + newsize*struct.calcsize('lP')) # setiterator check(iter(set()), size('P3P')) # slice check(slice(1), size('3P')) # str vh = test.test_support._vheader check('', struct.calcsize(vh + 'lic')) check('abc', struct.calcsize(vh + 'lic') + 3) # super check(super(int), size('3P')) # tuple check((), vsize('')) check((1,2,3), vsize('') + 3*self.P) # tupleiterator check(iter(()), size('lP')) # type # (PyTypeObject + PyNumberMethods + PyMappingMethods + # PySequenceMethods + PyBufferProcs) s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P') class newstyleclass(object): pass check(newstyleclass, s) # builtin type check(int, s) # NotImplementedType import types check(types.NotImplementedType, s) # unicode usize = len(u'\0'.encode('unicode-internal')) samples = [u'', u'1'*100] # we need to test for both sizes, because we don't know if the string # has been cached for s in samples: check(s, size('PPlP') + usize * (len(s) + 1)) # weakref import weakref check(weakref.ref(int), size('2Pl2P')) # weakproxy # XXX # weakcallableproxy check(weakref.proxy(int), size('2Pl2P')) # xrange check(xrange(1), size('3l')) check(xrange(66000), size('3l')) def test_pythontypes(self): # check all types defined in Python/ size = test.test_support.calcobjsize vsize = test.test_support.calcvobjsize check = self.check_sizeof # _ast.AST import _ast check(_ast.AST(), size('')) # imp.NullImporter import imp check(imp.NullImporter(self.file.name), size('')) try: raise TypeError except TypeError: tb = sys.exc_info()[2] # traceback if tb != None: check(tb, size('2P2i')) # symtable entry # XXX # sys.flags check(sys.flags, vsize('') + self.P * len(sys.flags)) def test_main(): test_classes = (SysModuleTest, SizeofTest) test.test_support.run_unittest(*test_classes) if __name__ == "__main__": test_main()
apache-2.0
nomadcube/scikit-learn
doc/tutorial/machine_learning_map/parse_path.py
275
7084
#!/usr/local/bin/python """ Based on: http://wxpsvg.googlecode.com/svn/trunk/svg/pathdata.py According to that project, this file is licensed under the LGPL """ try: from pyparsing import (ParserElement, Literal, Word, CaselessLiteral, Optional, Combine, Forward, ZeroOrMore, nums, oneOf, Group, ParseException, OneOrMore) except ImportError: import sys sys.exit("pyparsing is required") #ParserElement.enablePackrat() def Command(char): """ Case insensitive but case preserving""" return CaselessPreservingLiteral(char) def Arguments(token): return Group(token) class CaselessPreservingLiteral(CaselessLiteral): """ Like CaselessLiteral, but returns the match as found instead of as defined. """ def __init__( self, matchString ): super(CaselessPreservingLiteral,self).__init__( matchString.upper() ) self.name = "'%s'" % matchString self.errmsg = "Expected " + self.name self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): test = instring[ loc:loc+self.matchLen ] if test.upper() == self.match: return loc+self.matchLen, test #~ raise ParseException( instring, loc, self.errmsg ) exc = self.myException exc.loc = loc exc.pstr = instring raise exc def Sequence(token): """ A sequence of the token""" return OneOrMore(token+maybeComma) digit_sequence = Word(nums) sign = oneOf("+ -") def convertToFloat(s, loc, toks): try: return float(toks[0]) except: raise ParseException(loc, "invalid float format %s"%toks[0]) exponent = CaselessLiteral("e")+Optional(sign)+Word(nums) #note that almost all these fields are optional, #and this can match almost anything. We rely on Pythons built-in #float() function to clear out invalid values - loosely matching like this #speeds up parsing quite a lot floatingPointConstant = Combine( Optional(sign) + Optional(Word(nums)) + Optional(Literal(".") + Optional(Word(nums)))+ Optional(exponent) ) floatingPointConstant.setParseAction(convertToFloat) number = floatingPointConstant #same as FP constant but don't allow a - sign nonnegativeNumber = Combine( Optional(Word(nums)) + Optional(Literal(".") + Optional(Word(nums)))+ Optional(exponent) ) nonnegativeNumber.setParseAction(convertToFloat) coordinate = number #comma or whitespace can seperate values all over the place in SVG maybeComma = Optional(Literal(',')).suppress() coordinateSequence = Sequence(coordinate) coordinatePair = (coordinate + maybeComma + coordinate).setParseAction(lambda t: tuple(t)) coordinatePairSequence = Sequence(coordinatePair) coordinatePairPair = coordinatePair + maybeComma + coordinatePair coordinatePairPairSequence = Sequence(Group(coordinatePairPair)) coordinatePairTriple = coordinatePair + maybeComma + coordinatePair + maybeComma + coordinatePair coordinatePairTripleSequence = Sequence(Group(coordinatePairTriple)) #commands lineTo = Group(Command("L") + Arguments(coordinatePairSequence)) curve = Group(Command("C") + Arguments(coordinatePairSequence)) moveTo = Group(Command("M") + Arguments(coordinatePairSequence)) closePath = Group(Command("Z")).setParseAction(lambda t: ('Z', (None,))) flag = oneOf("1 0").setParseAction(lambda t: bool(int((t[0])))) arcRadius = ( nonnegativeNumber + maybeComma + #rx nonnegativeNumber #ry ).setParseAction(lambda t: tuple(t)) arcFlags = (flag + maybeComma + flag).setParseAction(lambda t: tuple(t)) ellipticalArcArgument = Group( arcRadius + maybeComma + #rx, ry number + maybeComma +#rotation arcFlags + #large-arc-flag, sweep-flag coordinatePair #(x,y) ) ellipticalArc = Group(Command("A") + Arguments(Sequence(ellipticalArcArgument))) smoothQuadraticBezierCurveto = Group(Command("T") + Arguments(coordinatePairSequence)) quadraticBezierCurveto = Group(Command("Q") + Arguments(coordinatePairPairSequence)) smoothCurve = Group(Command("S") + Arguments(coordinatePairPairSequence)) #curve = Group(Command("C") + Arguments(coordinatePairTripleSequence)) horizontalLine = Group(Command("H") + Arguments(coordinateSequence)) verticalLine = Group(Command("V") + Arguments(coordinateSequence)) drawToCommand = ( lineTo | moveTo | closePath | ellipticalArc | smoothQuadraticBezierCurveto | quadraticBezierCurveto | smoothCurve | curve | horizontalLine | verticalLine ) #~ number.debug = True moveToDrawToCommands = moveTo + ZeroOrMore(drawToCommand) path = ZeroOrMore(moveToDrawToCommands) path.keepTabs = True def get_points(d): commands = path.parseString(d) points = [] currentset = None for command in commands: if command[0] == 'M' or command[0] == 'm': currentset = [] points.append(currentset) currentset.append(command[1][-1]) elif command[0] == 'L' or command[0] == 'l': currentset.extend(command[1]) elif command[0] == 'C' or command[0] == 'c': currentset.extend(command[1]) return points if __name__ == "__main__": print path.parseString("M 242.96145,653.59282 L 244.83646,650.1553 L 247.02397,649.8428 L 247.33647,650.62405 L 245.30521,653.59282 L 242.96145,653.59282 z M 252.80525,649.99905 L 258.74278,652.49906 L 260.77404,652.18656 L 262.33654,648.43654 L 261.71154,645.15528 L 257.64902,644.68653 L 253.74275,646.40528 L 252.80525,649.99905 z M 282.49289,659.6866 L 286.08665,664.99912 L 288.43041,664.68662 L 289.52417,664.21787 L 290.93042,665.46787 L 294.52419,665.31162 L 295.4617,663.90537 L 292.64918,662.18661 L 290.77417,658.59284 L 288.74291,655.15533 L 283.11789,657.96784 L 282.49289,659.6866 z M 302.02423,668.28039 L 303.27423,666.40538 L 307.8055,667.34288 L 308.43051,666.87413 L 314.36803,667.49913 L 314.05553,668.74914 L 311.55552,670.15539 L 307.33675,669.84289 L 302.02423,668.28039 z M 307.1805,673.28041 L 309.05551,677.03043 L 312.02427,675.93667 L 312.33677,674.37416 L 310.77427,672.3429 L 307.1805,672.0304 L 307.1805,673.28041 z M 313.89928,672.18665 L 316.08679,669.37414 L 320.61806,671.7179 L 324.83683,672.81166 L 329.0556,675.46792 L 329.0556,677.34293 L 325.61809,679.06169 L 320.93056,679.99919 L 318.5868,678.59293 L 313.89928,672.18665 z M 329.99311,687.18672 L 331.55561,685.93672 L 334.83688,687.49923 L 342.18066,690.93674 L 345.46193,692.968 L 347.02443,695.31176 L 348.89944,699.53053 L 352.80571,702.03054 L 352.49321,703.28055 L 348.74319,706.40556 L 344.68067,707.81182 L 343.27442,707.18682 L 340.30565,708.90557 L 337.96189,712.03059 L 335.77438,714.8431 L 334.05562,714.68685 L 330.61811,712.18684 L 330.30561,707.81182 L 330.93061,705.46806 L 329.3681,699.99928 L 327.33684,698.28052 L 327.18059,695.78051 L 329.3681,694.84301 L 331.39936,691.87425 L 331.86811,690.93674 L 330.30561,689.21798 L 329.99311,687.18672 z ")
bsd-3-clause
micronpn/sequanto-automation
generator/lib/sequanto_automation/codeparsers/c.py
1
4328
import types import re from sequanto_automation.codeparser import ICodeParser, Function, Parameter, Enum class CodeParser ( ICodeParser ): RE_IDENTIFIER = '[A-Za-z_][A-Za-z0-9_]*' RE_TYPE = '((const|unsigned|signed)\s+)*%s(\s+|\s*((\\*|\\[\\])\s*)+)(const\s+)?' % RE_IDENTIFIER RE_PARAMETER = '((?P<parameterType>%s)\s*(?P<parameterName>%s))' % (RE_TYPE, RE_IDENTIFIER) RE_PARAMETER_COMPILED = re.compile(RE_PARAMETER) RE_FUNCTION = re.compile ( '(?P<returnType>%(type)s)(?P<name>%(identifier)s)\s*\\(\s*(?P<parameters>((%(parameter)s\s*,?\s*)*)|void)\s*\\)' % {'identifier' : RE_IDENTIFIER, 'type' : RE_TYPE, 'parameter' : RE_PARAMETER} ) RE_TYPE_PARTS = re.compile('const|void|\\[|\\]|\\*|%s' % RE_IDENTIFIER) RE_BRACKET_PAIRS_WITH_SPACE = re.compile('\\[\s+\\]') RE_ENUM = re.compile ( 'enum\s+(?P<name>%s)\s*\{\s*(?P<values>.*?)\s*\}' % RE_IDENTIFIER, re.MULTILINE + re.DOTALL ) RE_TYPEDEF_ENUM = re.compile ( 'typedef enum\s+(?P<name>%s)\s*\{\s*(?P<values>.*?)\s*\}\s*(?P<typedef_name>%s)\s*;' % (RE_IDENTIFIER, RE_IDENTIFIER), re.MULTILINE + re.DOTALL ) RE_ENUM_VALUE = re.compile ( '^\s*(?P<name>%s)(\s*=\s*(?P<value>\-?[0-9]+))?\s*$' % RE_IDENTIFIER ) def __init__ ( self ): self.clear() def cleanType ( self, _type ): _type = self.RE_TYPE_PARTS.findall ( _type ) if type(_type) in types.StringTypes: return self.RE_BRACKET_PAIRS_WITH_SPACE.sub ( '[]', _type ) else: return self.RE_BRACKET_PAIRS_WITH_SPACE.sub ( '[]', ' '.join ( _type ) ) def _parseEnumValues ( self, values_string ): values_string = [s.strip() for s in values_string.split(',') if len(s.strip()) > 0] values = [] for value_string in values_string: value_match = self.RE_ENUM_VALUE.match ( value_string ) if value_match: if value_match.group('value') != None: values.append ( (value_match.group('name'), value_match.group('value')) ) else: values.append ( (value_match.group('name'), None) ) else: raise Exception('Unparseable enum value string: "%s" in enum %s (defined in character %i to %i)' % (value_string, name, match.start(), match.end()) ) return values def parse ( self, _input ): for match in self.RE_FUNCTION.finditer ( _input ): returnType = self.cleanType(match.group('returnType')) name = match.group('name') parameters = match.group('parameters').strip() if parameters == '' or parameters == 'void': parameters = [] else: parameterList = [] for parameter in parameters.split(','): match = self.RE_PARAMETER_COMPILED.match ( parameter.strip() ) parameterObject = Parameter(match.group('parameterName'), self.cleanType(match.group('parameterType'))) parameterList.append ( parameterObject ) parameters = parameterList function = Function ( name, returnType, parameters ) self.m_functions[name] = function for match in self.RE_ENUM.finditer ( _input ): name = match.group('name') values = self._parseEnumValues ( match.group('values') ) self.m_enums[name] = Enum(name, values) for match in self.RE_TYPEDEF_ENUM.finditer ( _input ): name = match.group('typedef_name') values = self._parseEnumValues ( match.group('values') ) self.m_enums[name] = Enum(name, values) def hasFunction ( self, _name ): return _name in self.m_functions def getFunction ( self, _name ): return self.m_functions[_name] def listFunctions ( self ): return self.m_functions.items() def hasEnum ( self, _name ): return _name in self.m_enums def getEnum ( self, _name ): return self.m_enums[_name] def listEnums ( self ): return self.m_enums.items() def clear ( self ): self.m_functions = {} self.m_enums = {}
apache-2.0
miloharper/neural-network-animation
matplotlib/tests/test_figure.py
9
4546
from __future__ import (absolute_import, division, print_function, unicode_literals) import six from six.moves import xrange from nose.tools import assert_equal, assert_true, assert_raises from matplotlib.testing.decorators import image_comparison, cleanup from matplotlib.axes import Axes import matplotlib.pyplot as plt @cleanup def test_figure_label(): # pyplot figure creation, selection and closing with figure label and # number plt.close('all') plt.figure('today') plt.figure(3) plt.figure('tomorrow') plt.figure() plt.figure(0) plt.figure(1) plt.figure(3) assert_equal(plt.get_fignums(), [0, 1, 3, 4, 5]) assert_equal(plt.get_figlabels(), ['', 'today', '', 'tomorrow', '']) plt.close(10) plt.close() plt.close(5) plt.close('tomorrow') assert_equal(plt.get_fignums(), [0, 1]) assert_equal(plt.get_figlabels(), ['', 'today']) @image_comparison(baseline_images=['figure_today']) def test_figure(): # named figure support fig = plt.figure('today') ax = fig.add_subplot(111) ax.set_title(fig.get_label()) ax.plot(list(xrange(5))) # plot red line in a different figure. plt.figure('tomorrow') plt.plot([0, 1], [1, 0], 'r') # Return to the original; make sure the red line is not there. plt.figure('today') plt.close('tomorrow') @cleanup def test_gca(): fig = plt.figure() ax1 = fig.add_axes([0, 0, 1, 1]) assert_true(fig.gca(projection='rectilinear') is ax1) assert_true(fig.gca() is ax1) ax2 = fig.add_subplot(121, projection='polar') assert_true(fig.gca() is ax2) assert_true(fig.gca(polar=True)is ax2) ax3 = fig.add_subplot(122) assert_true(fig.gca() is ax3) # the final request for a polar axes will end up creating one # with a spec of 111. assert_true(fig.gca(polar=True) is not ax3) assert_true(fig.gca(polar=True) is not ax2) assert_equal(fig.gca().get_geometry(), (1, 1, 1)) fig.sca(ax1) assert_true(fig.gca(projection='rectilinear') is ax1) assert_true(fig.gca() is ax1) @image_comparison(baseline_images=['figure_suptitle']) def test_suptitle(): fig = plt.figure() ax = fig.add_subplot(1, 1, 1) fig.suptitle('hello', color='r') fig.suptitle('title', color='g', rotation='30') @image_comparison(baseline_images=['alpha_background'], # only test png and svg. The PDF output appears correct, # but Ghostscript does not preserve the background color. extensions=['png', 'svg'], savefig_kwarg={'facecolor': (0, 1, 0.4), 'edgecolor': 'none'}) def test_alpha(): # We want an image which has a background color and an # alpha of 0.4. fig = plt.figure(figsize=[2, 1]) fig.set_facecolor((0, 1, 0.4)) fig.patch.set_alpha(0.4) import matplotlib.patches as mpatches fig.patches.append(mpatches.CirclePolygon([20, 20], radius=15, alpha=0.6, facecolor='red')) @cleanup def test_too_many_figures(): import warnings with warnings.catch_warnings(record=True) as w: for i in range(22): fig = plt.figure() assert len(w) == 1 def test_iterability_axes_argument(): # This is a regression test for matplotlib/matplotlib#3196. If one of the # arguments returned by _as_mpl_axes defines __getitem__ but is not # iterable, this would raise an execption. This is because we check # whether the arguments are iterable, and if so we try and convert them # to a tuple. However, the ``iterable`` function returns True if # __getitem__ is present, but some classes can define __getitem__ without # being iterable. The tuple conversion is now done in a try...except in # case it fails. class MyAxes(Axes): def __init__(self, *args, **kwargs): kwargs.pop('myclass', None) return Axes.__init__(self, *args, **kwargs) class MyClass(object): def __getitem__(self, item): if item != 'a': raise ValueError("item should be a") def _as_mpl_axes(self): return MyAxes, {'myclass': self} fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection=MyClass()) plt.close(fig) if __name__ == "__main__": import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
mit
jseabold/scipy
setup.py
16
7924
#!/usr/bin/env python """SciPy: Scientific Library for Python SciPy (pronounced "Sigh Pie") is open-source software for mathematics, science, and engineering. The SciPy library depends on NumPy, which provides convenient and fast N-dimensional array manipulation. The SciPy library is built to work with NumPy arrays, and provides many user-friendly and efficient numerical routines such as routines for numerical integration and optimization. Together, they run on all popular operating systems, are quick to install, and are free of charge. NumPy and SciPy are easy to use, but powerful enough to be depended upon by some of the world's leading scientists and engineers. If you need to manipulate numbers on a computer and display or publish the results, give SciPy a try! """ DOCLINES = __doc__.split("\n") import os import sys import subprocess if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 2): raise RuntimeError("Python version 2.6, 2.7 or >= 3.2 required.") if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins CLASSIFIERS = """\ Development Status :: 4 - Beta Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved Programming Language :: C Programming Language :: Python Programming Language :: Python :: 3 Topic :: Software Development Topic :: Scientific/Engineering Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System :: Unix Operating System :: MacOS """ MAJOR = 0 MINOR = 17 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) # Return the git revision as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = "Unknown" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit hackish: we are setting a global variable so that the main # scipy __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet. While ugly, it's # a lot more robust than what was previously being used. builtins.__SCIPY_SETUP__ = True def get_version_info(): # Adding the git rev number needs to be done inside # write_version_py(), otherwise the import of scipy.version messes # up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('scipy/version.py'): # must be a source distribution, use existing version file # load it as a separate module to not load scipy/__init__.py import imp version = imp.load_source('scipy.version', 'scipy/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = "Unknown" if not ISRELEASED: FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='scipy/version.py'): cnt = """ # THIS FILE IS GENERATED FROM SCIPY SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s if not release: version = full_version """ FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED)}) finally: a.close() try: from sphinx.setup_command import BuildDoc HAVE_SPHINX = True except: HAVE_SPHINX = False if HAVE_SPHINX: class ScipyBuildDoc(BuildDoc): """Run in-place build before Sphinx doc build""" def run(self): ret = subprocess.call([sys.executable, sys.argv[0], 'build_ext', '-i']) if ret != 0: raise RuntimeError("Building Scipy failed!") BuildDoc.run(self) def generate_cython(): cwd = os.path.abspath(os.path.dirname(__file__)) print("Cythonizing sources") p = subprocess.call([sys.executable, os.path.join(cwd, 'tools', 'cythonize.py'), 'scipy'], cwd=cwd) if p != 0: raise RuntimeError("Running cythonize failed!") def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('scipy') config.add_data_files(('scipy', '*.txt')) config.get_version('scipy/version.py') return config def setup_package(): # Rewrite the version file every time write_version_py() if HAVE_SPHINX: cmdclass = {'build_sphinx': ScipyBuildDoc} else: cmdclass = {} # Figure out whether to add ``*_requires = ['numpy']``. # We don't want to do that unconditionally, because we risk updating # an installed numpy which fails too often. Just if it's not installed, we # may give it a try. See gh-3379. build_requires = [] try: import numpy except: build_requires = ['numpy>=1.6.2'] metadata = dict( name='scipy', maintainer="SciPy Developers", maintainer_email="scipy-dev@scipy.org", description=DOCLINES[0], long_description="\n".join(DOCLINES[2:]), url="http://www.scipy.org", download_url="http://sourceforge.net/projects/scipy/files/scipy/", license='BSD', cmdclass=cmdclass, classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='nose.collector', setup_requires=build_requires, install_requires=build_requires, ) if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy is not required. # # They are required to succeed without Numpy for example when # pip is used to install Scipy when Numpy is not yet present in # the system. try: from setuptools import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel') or ( 'develop' in sys.argv): # bdist_wheel needs setuptools import setuptools from numpy.distutils.core import setup cwd = os.path.abspath(os.path.dirname(__file__)) if not os.path.exists(os.path.join(cwd, 'PKG-INFO')): # Generate Cython sources, unless building from source release generate_cython() metadata['configuration'] = configuration setup(**metadata) if __name__ == '__main__': setup_package()
bsd-3-clause
tpo/ansible
test/support/integration/plugins/module_utils/k8s/raw.py
37
23839
# # Copyright 2018 Red Hat | Ansible # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function import copy from datetime import datetime from distutils.version import LooseVersion import time import sys import traceback from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC from ansible.module_utils.six import string_types from ansible.module_utils.k8s.common import KubernetesAnsibleModule from ansible.module_utils.common.dict_transformations import dict_merge try: import yaml from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing import urllib3 except ImportError: # Exceptions handled in common pass try: import kubernetes_validate HAS_KUBERNETES_VALIDATE = True except ImportError: HAS_KUBERNETES_VALIDATE = False K8S_CONFIG_HASH_IMP_ERR = None try: from openshift.helper.hashes import generate_hash HAS_K8S_CONFIG_HASH = True except ImportError: K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc() HAS_K8S_CONFIG_HASH = False HAS_K8S_APPLY = None try: from openshift.dynamic.apply import apply_object HAS_K8S_APPLY = True except ImportError: HAS_K8S_APPLY = False class KubernetesRawModule(KubernetesAnsibleModule): @property def validate_spec(self): return dict( fail_on_error=dict(type='bool'), version=dict(), strict=dict(type='bool', default=True) ) @property def condition_spec(self): return dict( type=dict(), status=dict(default=True, choices=[True, False, "Unknown"]), reason=dict() ) @property def argspec(self): argument_spec = copy.deepcopy(COMMON_ARG_SPEC) argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC)) argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge']) argument_spec['wait'] = dict(type='bool', default=False) argument_spec['wait_sleep'] = dict(type='int', default=5) argument_spec['wait_timeout'] = dict(type='int', default=120) argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec) argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec) argument_spec['append_hash'] = dict(type='bool', default=False) argument_spec['apply'] = dict(type='bool', default=False) return argument_spec def __init__(self, k8s_kind=None, *args, **kwargs): self.client = None self.warnings = [] mutually_exclusive = [ ('resource_definition', 'src'), ('merge_type', 'apply'), ] KubernetesAnsibleModule.__init__(self, *args, mutually_exclusive=mutually_exclusive, supports_check_mode=True, **kwargs) self.kind = k8s_kind or self.params.get('kind') self.api_version = self.params.get('api_version') self.name = self.params.get('name') self.namespace = self.params.get('namespace') resource_definition = self.params.get('resource_definition') validate = self.params.get('validate') if validate: if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"): self.fail_json(msg="openshift >= 0.8.0 is required for validate") self.append_hash = self.params.get('append_hash') if self.append_hash: if not HAS_K8S_CONFIG_HASH: self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"), exception=K8S_CONFIG_HASH_IMP_ERR) if self.params['merge_type']: if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"): self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type")) self.apply = self.params.get('apply', False) if self.apply: if not HAS_K8S_APPLY: self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply")) if resource_definition: if isinstance(resource_definition, string_types): try: self.resource_definitions = yaml.safe_load_all(resource_definition) except (IOError, yaml.YAMLError) as exc: self.fail(msg="Error loading resource_definition: {0}".format(exc)) elif isinstance(resource_definition, list): self.resource_definitions = resource_definition else: self.resource_definitions = [resource_definition] src = self.params.get('src') if src: self.resource_definitions = self.load_resource_definitions(src) try: self.resource_definitions = [item for item in self.resource_definitions if item] except AttributeError: pass if not resource_definition and not src: implicit_definition = dict( kind=self.kind, apiVersion=self.api_version, metadata=dict(name=self.name) ) if self.namespace: implicit_definition['metadata']['namespace'] = self.namespace self.resource_definitions = [implicit_definition] def flatten_list_kind(self, list_resource, definitions): flattened = [] parent_api_version = list_resource.group_version if list_resource else None parent_kind = list_resource.kind[:-4] if list_resource else None for definition in definitions.get('items', []): resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True) flattened.append((resource, self.set_defaults(resource, definition))) return flattened def execute_module(self): changed = False results = [] try: self.client = self.get_api_client() # Hopefully the kubernetes client will provide its own exception class one day except (urllib3.exceptions.RequestError) as e: self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e)) flattened_definitions = [] for definition in self.resource_definitions: kind = definition.get('kind', self.kind) api_version = definition.get('apiVersion', self.api_version) if kind.endswith('List'): resource = self.find_resource(kind, api_version, fail=False) flattened_definitions.extend(self.flatten_list_kind(resource, definition)) else: resource = self.find_resource(kind, api_version, fail=True) flattened_definitions.append((resource, definition)) for (resource, definition) in flattened_definitions: kind = definition.get('kind', self.kind) api_version = definition.get('apiVersion', self.api_version) definition = self.set_defaults(resource, definition) self.warnings = [] if self.params['validate'] is not None: self.warnings = self.validate(definition) result = self.perform_action(resource, definition) result['warnings'] = self.warnings changed = changed or result['changed'] results.append(result) if len(results) == 1: self.exit_json(**results[0]) self.exit_json(**{ 'changed': changed, 'result': { 'results': results } }) def validate(self, resource): def _prepend_resource_info(resource, msg): return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg) try: warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict')) except KubernetesValidateMissing: self.fail_json(msg="kubernetes-validate python library is required to validate resources") if errors and self.params['validate']['fail_on_error']: self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors])) else: return [_prepend_resource_info(resource, msg) for msg in warnings + errors] def set_defaults(self, resource, definition): definition['kind'] = resource.kind definition['apiVersion'] = resource.group_version metadata = definition.get('metadata', {}) if self.name and not metadata.get('name'): metadata['name'] = self.name if resource.namespaced and self.namespace and not metadata.get('namespace'): metadata['namespace'] = self.namespace definition['metadata'] = metadata return definition def perform_action(self, resource, definition): result = {'changed': False, 'result': {}} state = self.params.get('state', None) force = self.params.get('force', False) name = definition['metadata'].get('name') namespace = definition['metadata'].get('namespace') existing = None wait = self.params.get('wait') wait_sleep = self.params.get('wait_sleep') wait_timeout = self.params.get('wait_timeout') wait_condition = None if self.params.get('wait_condition') and self.params['wait_condition'].get('type'): wait_condition = self.params['wait_condition'] self.remove_aliases() try: # ignore append_hash for resources other than ConfigMap and Secret if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']: name = '%s-%s' % (name, generate_hash(definition)) definition['metadata']['name'] = name params = dict(name=name) if namespace: params['namespace'] = namespace existing = resource.get(**params) except NotFoundError: # Remove traceback so that it doesn't show up in later failures try: sys.exc_clear() except AttributeError: # no sys.exc_clear on python3 pass except ForbiddenError as exc: if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent': return self.create_project_request(definition) self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), error=exc.status, status=exc.status, reason=exc.reason) except DynamicApiError as exc: self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body), error=exc.status, status=exc.status, reason=exc.reason) if state == 'absent': result['method'] = "delete" if not existing: # The object already does not exist return result else: # Delete the object result['changed'] = True if not self.check_mode: try: k8s_obj = resource.delete(**params) result['result'] = k8s_obj.to_dict() except DynamicApiError as exc: self.fail_json(msg="Failed to delete object: {0}".format(exc.body), error=exc.status, status=exc.status, reason=exc.reason) if wait: success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent') result['duration'] = duration if not success: self.fail_json(msg="Resource deletion timed out", **result) return result else: if self.apply: if self.check_mode: ignored, k8s_obj = apply_object(resource, definition) else: try: k8s_obj = resource.apply(definition, namespace=namespace).to_dict() except DynamicApiError as exc: msg = "Failed to apply object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) success = True result['result'] = k8s_obj if wait: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) if existing: existing = existing.to_dict() else: existing = {} match, diffs = self.diff_objects(existing, result['result']) result['changed'] = not match result['diff'] = diffs result['method'] = 'apply' if not success: self.fail_json(msg="Resource apply timed out", **result) return result if not existing: if self.check_mode: k8s_obj = definition else: try: k8s_obj = resource.create(definition, namespace=namespace).to_dict() except ConflictError: # Some resources, like ProjectRequests, can't be created multiple times, # because the resources that they create don't match their kind # In this case we'll mark it as unchanged and warn the user self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \ if the resource you are creating does not directly create a resource of the same kind.".format(name)) return result except DynamicApiError as exc: msg = "Failed to create object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) success = True result['result'] = k8s_obj if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) result['changed'] = True result['method'] = 'create' if not success: self.fail_json(msg="Resource creation timed out", **result) return result match = False diffs = [] if existing and force: if self.check_mode: k8s_obj = definition else: try: k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict() except DynamicApiError as exc: msg = "Failed to replace object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) success = True result['result'] = k8s_obj if wait: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match result['method'] = 'replace' result['diff'] = diffs if not success: self.fail_json(msg="Resource replacement timed out", **result) return result # Differences exist between the existing obj and requested params if self.check_mode: k8s_obj = dict_merge(existing.to_dict(), definition) else: if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"): k8s_obj, error = self.patch_resource(resource, definition, existing, name, namespace) else: for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']: k8s_obj, error = self.patch_resource(resource, definition, existing, name, namespace, merge_type=merge_type) if not error: break if error: self.fail_json(**error) success = True result['result'] = k8s_obj if wait: success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match result['method'] = 'patch' result['diff'] = diffs if not success: self.fail_json(msg="Resource update timed out", **result) return result def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None): try: params = dict(name=name, namespace=namespace) if merge_type: params['content_type'] = 'application/{0}-patch+json'.format(merge_type) k8s_obj = resource.patch(definition, **params).to_dict() match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) error = {} return k8s_obj, {} except DynamicApiError as exc: msg = "Failed to patch object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings) return None, error def create_project_request(self, definition): definition['kind'] = 'ProjectRequest' result = {'changed': False, 'result': {}} resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True) if not self.check_mode: try: k8s_obj = resource.create(definition) result['result'] = k8s_obj.to_dict() except DynamicApiError as exc: self.fail_json(msg="Failed to create object: {0}".format(exc.body), error=exc.status, status=exc.status, reason=exc.reason) result['changed'] = True result['method'] = 'create' return result def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state): start = datetime.now() def _wait_for_elapsed(): return (datetime.now() - start).seconds response = None while _wait_for_elapsed() < timeout: try: response = resource.get(name=name, namespace=namespace) if predicate(response): if response: return True, response.to_dict(), _wait_for_elapsed() else: return True, {}, _wait_for_elapsed() time.sleep(sleep) except NotFoundError: if state == 'absent': return True, {}, _wait_for_elapsed() if response: response = response.to_dict() return False, response, _wait_for_elapsed() def wait(self, resource, definition, sleep, timeout, state='present', condition=None): def _deployment_ready(deployment): # FIXME: frustratingly bool(deployment.status) is True even if status is empty # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty return (deployment.status and deployment.status.replicas is not None and deployment.status.availableReplicas == deployment.status.replicas and deployment.status.observedGeneration == deployment.metadata.generation) def _pod_ready(pod): return (pod.status and pod.status.containerStatuses is not None and all([container.ready for container in pod.status.containerStatuses])) def _daemonset_ready(daemonset): return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and daemonset.status.observedGeneration == daemonset.metadata.generation) def _custom_condition(resource): if not resource.status or not resource.status.conditions: return False match = [x for x in resource.status.conditions if x.type == condition['type']] if not match: return False # There should never be more than one condition of a specific type match = match[0] if match.status == 'Unknown': if match.status == condition['status']: if 'reason' not in condition: return True if condition['reason']: return match.reason == condition['reason'] return False status = True if match.status == 'True' else False if status == condition['status']: if condition.get('reason'): return match.reason == condition['reason'] return True return False def _resource_absent(resource): return not resource waiter = dict( Deployment=_deployment_ready, DaemonSet=_daemonset_ready, Pod=_pod_ready ) kind = definition['kind'] if state == 'present' and not condition: predicate = waiter.get(kind, lambda x: x) elif state == 'present' and condition: predicate = _custom_condition else: predicate = _resource_absent return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)
gpl-3.0
Ritsyy/fjord
fjord/base/utils.py
1
16093
import datetime import json import re import time from functools import wraps from hashlib import md5 from textwrap import wrap from django.contrib.auth.decorators import permission_required from django.core import validators from django.http import ( HttpResponse, HttpResponseBadRequest, HttpResponseRedirect ) from django.utils.dateparse import parse_date from django.utils.encoding import force_str, force_text from django.utils.feedgenerator import Atom1Feed from product_details import product_details from ratelimit.utils import is_ratelimited from rest_framework.throttling import BaseThrottle from statsd.defaults.django import statsd from fjord.base.urlresolvers import reverse class JSONDatetimeEncoder(json.JSONEncoder): def default(self, value): if hasattr(value, 'strftime'): return value.isoformat() return super(JSONDatetimeEncoder, self).default(value) def wrap_with_paragraphs(text, width=72): """Runs textwrap on text, but keeps pre-existing paragraphs""" if not text: return text return '\n'.join( ['\n'.join(wrap(segment, width=width)) for segment in text.splitlines()] ) def is_url(url): """Takes a string and returns whether or not it's a url Recognizes about: and chrome:// urls, everything Django's URLValidator recognizes and protocol-less urls. >>> is_url(u'example.com') True >>> is_url(u'about:') True >>> is_url(u'foo') False """ url = force_text(url) # Check what Django's URLValidator thinks about it. That covers # the http/https/ftp cases including localhost, ipv4, ipv6 and # optional ports. if validators.URLValidator.regex.search(url): return True # Check if it's an about: url. if url.startswith('about:'): return True # Check if it's a chrome:// url. if url.startswith('chrome://'): return True # Check if it's a protocol-less url by prepending http:// to it. if validators.URLValidator.regex.search('http://' + url): return True return False def translate_country_name(current_language, country_code, country_name, country_name_l10n): """Translates country name from product details or gettext It might seem a bit weird we're not doing the _lazy gettext translation here, but if we did, then we'd be translating a variable value rather than a string and then it wouldn't get picked up by extract script. :arg current_language: the language of the user viewing the page :arg country_code: the iso 3166 two-letter country code :arg country_name: the country name :arg country_name_l10n: the country name wrapped in a lazy gettext call :returns: translated country name """ # FIXME: this is a lousy way to alleviate the problem where we # have a "locale" and we really need a "language". language_fix = { 'es': 'es-ES', } current_language = language_fix.get(current_language, current_language) # If the country name has been translated, then use that if unicode(country_name) != unicode(country_name_l10n): return country_name_l10n current_language = current_language.split('-') current_language[0] = current_language[0].lower() if len(current_language) > 1: current_language[1] = current_language[1].upper() current_language = '-'.join(current_language) country_code = country_code.lower() try: countries = product_details.get_regions(current_language) except IOError: return country_name return countries.get(country_code, country_name) def smart_truncate(content, length=100, suffix='...'): """Truncate text at space before length bound. :arg content: string to truncate :arg length: length to truncate at :arg suffix: text to append to truncated content :returns: string Example: >>> smart_truncate('abcde fghij', length=8) 'abcde...' >>> smart_truncate('abcde fghij', length=100) 'abcde fghij' """ if len(content) <= length: return content else: return content[:length].rsplit(' ', 1)[0] + suffix def smart_str(s, fallback=u''): """Returns the string or the fallback if it's not a string""" if isinstance(s, basestring): return s return fallback def smart_int(s, fallback=0): """Convert a string to int, with fallback for invalid strings or types.""" try: return int(float(s)) except (ValueError, TypeError, OverflowError): return fallback def smart_timedelta(s, fallback=None): """Convert s to a datetime.timedelta with a fallback for invalid input. :arg s: The string to convert to a timedelta. :arg fallback: Value to use in case of an error. Default: ``None``. """ if isinstance(s, datetime.timedelta): return s if s and s.endswith('d'): try: days = int(s[:-1]) if days > 0: return datetime.timedelta(days=days) except ValueError: pass return fallback def smart_date(s, fallback=None): """Convert a string to a datetime.date with a fallback for invalid input. :arg s: The string to convert to a date. :arg fallback: Value to use in case of an error. Default: ``None``. """ if isinstance(s, datetime.date): return s try: dt = parse_date(s) # The strftime functions require a year >= 1900, so if this # has a year before that, then we treat it as an invalid date so # later processing doesn't get hosed. if dt and dt.year >= 1900: return dt except (ValueError, TypeError): pass return fallback def smart_bool(s, fallback=False): """Convert a string that has a semantic boolean value to a real boolean. Note that this is not the same as ``s`` being "truthy". The string ``'False'`` will be returned as False, even though it is Truthy, and non- boolean values like ``'apple'`` would return the fallback parameter, since it doesn't represent a boolean value. """ try: s = s.lower() if s in ['true', 't', 'yes', 'y', '1']: return True elif s in ['false', 'f', 'no', 'n', '0']: return False except AttributeError: pass return fallback def epoch_milliseconds(d): """Convert a datetime to a number of milliseconds since the epoch.""" return time.mktime(d.timetuple()) * 1000 class FakeLogger(object): """Fake logger that we can pretend is a Python Logger Why? Well, because Django has logging settings that prevent me from setting up a logger here that uses the stdout that the Django BaseCommand has. At some point p while fiddling with it, I figured, 'screw it--I'll just write my own' and did. The minor ramification is that this isn't a complete implementation so if it's missing stuff, we'll have to add it. """ def __init__(self, stdout): self.stdout = stdout def _out(self, level, msg, *args): msg = msg % args self.stdout.write('%s %-8s: %s\n' % ( time.strftime('%H:%M:%S'), level, msg)) def info(self, msg, *args): self._out('INFO', msg, *args) def error(self, msg, *args): self._out('ERROR', msg, *args) class Atom1FeedWithRelatedLinks(Atom1Feed): """Atom1Feed with related links This adds a "link_related" item as:: <link rel="related">url</link> """ def add_item_elements(self, handler, item): super(Atom1FeedWithRelatedLinks, self).add_item_elements(handler, item) if item.get('link_related'): handler.addQuickElement( 'link', attrs={'href': item['link_related'], 'rel': 'related'}) def actual_ip(group, req): """Returns the actual ip address Our dev, stage and prod servers are behind a reverse proxy, so the ip address in REMOTE_ADDR is the reverse proxy server and not the client ip address. The actual client ip address is in HTTP_X_CLUSTER_CLIENT_IP. In our local development and test environments, the client ip address is in REMOTE_ADDR. """ return req.META.get('HTTP_X_CLUSTER_CLIENT_IP', req.META['REMOTE_ADDR']) def actual_ip_plus_context(contextfun): """Returns a key function that adds md5 hashed context to the key""" def _actual_ip_plus_context(group, req, *args, **kwargs): # Force whatever comes out of contextfun to be bytes. context = force_str(contextfun(req)) # md5 hash that. hasher = md5() hasher.update(context) context = hasher.hexdigest() # Then return the ip address plus a : plus the desc md5 hash. return actual_ip(group, req) + ':' + context return _actual_ip_plus_context def ratelimit(rulename, keyfun=actual_ip, rate='5/m'): """Rate-limiting decorator that keeps metrics via statsd This is just like the django-ratelimit ratelimit decorator, but is stacking-friendly, performs some statsd fancypants and also has Fjord-friendly defaults. :arg rulename: rulename for statsd logging---must be a string with letters only! look for this in statsd under "throttled." + rulename. :arg keyfun: (optional) function to generate a key for this throttling. defaults to actual_ip. :arg rate: (optional) rate to throttle at. defaults to 5/m. .. Note:: Unlike django-ratelimit's ratelimit decorator, this does **not** redirect the user or handle the rate limiting. Caller must check the request for 'limited' and redirect/handle accordingly. For example:: @ratelimit(rulename='foo', keyfun=actual_ip, rate='1/10m') def view(request): if getattr(request, 'limited', False): # handle ratelimit here! raise PermissionDenied() """ def decorator(fn): @wraps(fn) def _wrapped(request, *args, **kwargs): already_limited = getattr(request, 'limited', False) ratelimited = is_ratelimited( request=request, group=rulename, key=keyfun, rate=rate, method=['POST'], increment=True) if not already_limited and ratelimited: statsd.incr('throttled.' + rulename) return fn(request, *args, **kwargs) return _wrapped return decorator RATE_RE = re.compile(r'^(\d+)/(\d*)([smhd])$') class RatelimitThrottle(BaseThrottle): """This wraps the django-ratelimit ratelimiter in a DRF class Django Rest Framework has its own throttling system. That's great, but we're already using django-ratelimit. So this wraps django-ratelimit throttling in the Django Rest Framework structure so I can have a unified throttling backend for regular and API views. .. Note:: Return an instance of this in the `get_throttles` method. Don't use this with `throttled_classes` property because it requires other parameters to instantiate. e.g.:: class MyThrottle(AnonRateThrottle): def get_throttles(self): return [ RatelimitThrottle( rulename='double_submit', rate='1/10m' ) ] """ def __init__(self, rulename, keyfun=None, rate='5/m', methods=('POST',)): self.rulename = rulename self.rate = rate self.num_requests, self.duration = self.parse_rate(rate) self.keyfun = keyfun or actual_ip self.methods = methods def parse_rate(self, rate): """Handles num/(multi * period) like 1/10m""" num, multiplier, period = RATE_RE.match(rate).groups() num = int(num) multiplier = int(multiplier or 1) period = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period] return (num, (multiplier * period)) def allow_request(self, request, view): already_limited = getattr(request, 'limited', False) ratelimited = is_ratelimited( request=request, group=self.rulename, key=self.keyfun, rate=self.rate, method=self.methods, increment=True) if ratelimited: if not already_limited: statsd.incr('throttled.' + self.rulename) return self.throttle_failure() # Did not trigger rate-limiting, so this request is allowed. return self.throttle_success() def throttle_success(self): return True def throttle_failure(self): """Called when a request has failed due to throttling""" return False def wait(self): # We don't want to calculate the actual wait time, so we cheat # here and just return the full duration. return self.duration def check_new_user(fun): @wraps(fun) def _wrapped_view(request, *args, **kwargs): # Do this here to avoid circular imports from fjord.base.models import Profile try: request.user.profile except AttributeError: pass except Profile.DoesNotExist: url = reverse('new-user-view') + '?next=' + request.path return HttpResponseRedirect(url) return fun(request, *args, **kwargs) return _wrapped_view def cors_enabled(origin, methods=['GET']): """A simple decorator to enable CORS.""" def decorator(f): @wraps(f) def decorated_func(request, *args, **kwargs): if request.method == 'OPTIONS': # preflight if ('HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META and 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META): response = HttpResponse() response['Access-Control-Allow-Methods'] = ', '.join( methods) # TODO: We might need to change this response['Access-Control-Allow-Headers'] = \ request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'] else: return HttpResponseBadRequest() elif request.method in methods: response = f(request, *args, **kwargs) else: return HttpResponseBadRequest() response['Access-Control-Allow-Origin'] = origin return response return decorated_func return decorator analyzer_required = permission_required( 'analytics.can_view_dashboard', raise_exception=True) def class_to_path(cls): """Given a class, returns the class path""" return ':'.join([cls.__module__, cls.__name__]) def path_to_class(path): """Given a class path, returns the class""" module_path, cls_name = path.split(':') module = __import__(module_path, fromlist=[cls_name]) cls = getattr(module, cls_name) return cls def instance_to_key(instance): """Given an instance, returns a key :arg instance: The model instance to generate a key for :returns: A string representing that specific instance .. Note:: If you ever make a code change that moves the model to some other Python module, then the keys for those model instances will fail. """ cls = instance.__class__ return ':'.join([cls.__module__, cls.__name__, str(instance.pk)]) def key_to_instance(key): """Given a key, returns the instance :raises DoesNotExist: if the instance doesn't exist :raises ImportError: if there's an import error :raises AttributeError: if the class doesn't exist in the module """ module_path, cls_name, id_ = key.split(':') module = __import__(module_path, fromlist=[cls_name]) cls = getattr(module, cls_name) instance = cls.objects.get(pk=int(id_)) return instance
bsd-3-clause
SCSSoftware/BlenderTools
addon/io_scs_tools/exp/pit.py
1
30644
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Copyright (C) 2013-2014: SCS Software import bpy import os import shutil from io_scs_tools.consts import Variant as _VARIANT_consts from io_scs_tools.exp import tobj as _tobj from io_scs_tools.internals import looks as _looks from io_scs_tools.internals import shader_presets as _shader_presets from io_scs_tools.internals.structure import SectionData as _SectionData from io_scs_tools.internals.containers import pix as _pix_container from io_scs_tools.utils import path as _path_utils from io_scs_tools.utils import get_scs_globals as _get_scs_globals from io_scs_tools.utils.info import get_combined_ver_str from io_scs_tools.utils.printout import lprint def fill_comment_header_section(look_list, variant_list): """Fills up comment section (before Header).""" section = _SectionData("#comment") section.props.append(("#", "# Look Names:")) for look in look_list: section.props.append(("#", "#\t" + look['name'])) section.props.append(("#", "#")) section.props.append(("#", "# Variant Names:")) for variant in variant_list: section.props.append(("#", "#\t" + variant[0])) section.props.append(("#", "#")) return section def fill_header_section(format_version, file_name, sign_export): """Fills up "Header" section.""" section = _SectionData("Header") section.props.append(("FormatVersion", format_version)) section.props.append(("Source", get_combined_ver_str())) section.props.append(("Type", "Trait")) section.props.append(("Name", file_name)) if sign_export: section.props.append(("SourceFilename", str(bpy.data.filepath))) author = bpy.context.user_preferences.system.author if author: section.props.append(("Author", str(author))) return section def fill_global_section(looks, variants, parts, materials): """Fills up "Global" section.""" section = _SectionData("Global") section.props.append(("LookCount", looks)) section.props.append(("VariantCount", variants)) section.props.append(("PartCount", parts)) section.props.append(("MaterialCount", materials)) return section def fill_material_sections(materials, material_dict): """Fills up "Material" sections.""" sections = [] for material in materials: if isinstance(material, str): sections.append(material_dict[material]) else: if material.name in material_dict: material_section = material_dict[material.name] else: material_section = material_dict[str("_" + material.name + "_-_default_settings_")] sections.append(material_section) return sections def default_material(alias): """Return 'default material' data section.""" # DEFAULT PROPERTIES material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", alias)) # material_export_data.props.append(("Effect", "eut2.none")) material_export_data.props.append(("Effect", "eut2.dif")) material_export_data.props.append(("Flags", 0)) attribute_data = [ ('FLOAT3', "diffuse", (1.0, 1.0, 1.0)), ('FLOAT3', "specular", (0.0, 0.0, 0.0)), ('FLOAT', "shininess", (5.0,)), ('FLOAT', "add_ambient", (0.0,)), ('FLOAT', "reflection", (0.0,)), ] texture_data = [ ('texture[0]:texture_base', ""), ] material_export_data.props.append(("AttributeCount", len(attribute_data))) material_export_data.props.append(("TextureCount", len(texture_data))) # DEFAULT ATTRIBUTES AND TEXTURE for attribute in attribute_data: attribute_section = _SectionData("Attribute") attribute_section.props.append(("Format", attribute[0])) attribute_section.props.append(("Tag", attribute[1])) attribute_section.props.append(("Value", ["i", attribute[2]])) material_export_data.sections.append(attribute_section) for texture in texture_data: texture_section = _SectionData("Texture") texture_section.props.append(("Tag", texture[0])) texture_section.props.append(("Value", texture[1])) material_export_data.sections.append(texture_section) return material_export_data def get_texture_path_from_material(material, texture_type, export_path): """Get's relative path for Texture section of tobj from given texture_type. If tobj is not yet created it also creates tobj for it. :param material: Blender material :type material: bpy.types.Material :param texture_type: type of texture which should be readed from material (example "texture_base") :type texture_type: str :param export_path: path where PIT of this material and texture is gonna be exported :type export_path: str :return: relative path for Texture section data of PIT material :rtype: str """ # overwrite tobj value directly if specified if getattr(material.scs_props, "shader_" + texture_type + "_use_imported", False): return getattr(material.scs_props, "shader_" + texture_type + "_imported_tobj", "") # use tobj value from shader preset if texture is locked and has default value if "scs_shader_attributes" in material and "textures" in material["scs_shader_attributes"]: for tex_entry in material["scs_shader_attributes"]["textures"].values(): if "Tag" in tex_entry and texture_type in tex_entry["Tag"]: if "Lock" in tex_entry and tex_entry["Lock"] == "True": if "Value" in tex_entry and tex_entry["Value"] != "": return tex_entry["Value"] # CALCULATING TOBJ AND TEXTURE PATHS texture_raw_path = getattr(material.scs_props, "shader_" + texture_type, "NO PATH") tobj_rel_filepath = tobj_abs_filepath = texture_abs_filepath = "" scs_project_path = _get_scs_globals().scs_project_path.rstrip("\\").rstrip("/") extensions, texture_raw_path = _path_utils.get_texture_extens_and_strip_path(texture_raw_path) for ext in extensions: if texture_raw_path.startswith("//"): # relative # search for relative path inside current scs project base and # possible dlc/mod parent folders; use first found for infix in ("", "../base/", "../base_vehicle/", "../../base/", "../../base_vehicle/"): curr_path = os.path.join(scs_project_path, infix + texture_raw_path[2:] + ext) if os.path.isfile(curr_path): tobj_rel_filepath = texture_raw_path.replace("//", "/") # if tobj is used by user then get texture path from tobj # otherwise get tobj path from texture path if ext == ".tobj": tobj_abs_filepath = curr_path texture_abs_filepath = _path_utils.get_texture_path_from_tobj(curr_path) else: tobj_abs_filepath = _path_utils.get_tobj_path_from_shader_texture(curr_path, check_existance=False) texture_abs_filepath = curr_path break # break searching for texture if texture was found if tobj_rel_filepath != "": break elif ext != ".tobj" and os.path.isfile(texture_raw_path + ext): # absolute texture_raw_path_with_ext = texture_raw_path + ext # if we are exporting somewhere into SCS Project Base Path texture still can be saved if scs_project_path != "" and _path_utils.startswith(export_path, scs_project_path): tex_dir, tex_filename = os.path.split(texture_raw_path) tobj_filename = tex_filename + ".tobj" texture_copied_path_with_ext = os.path.join(export_path, tex_filename) + ext # copy texture beside exported files try: shutil.copy2(texture_raw_path_with_ext, texture_copied_path_with_ext) except OSError as e: # ignore copying the same file # NOTE: happens if absolute texture paths are used # even if they are referring to texture inside scs project path if type(e).__name__ != "SameFileError": raise e # copy also TOBJ if exists texture_raw_tobj_path = str(tex_dir) + os.sep + tobj_filename if os.path.isfile(texture_raw_tobj_path): shutil.copy2(texture_raw_tobj_path, os.path.join(export_path, tobj_filename)) # get copied TOBJ relative path to current scs project path tobj_rel_filepath = "" if export_path != scs_project_path: tobj_rel_filepath = os.sep + os.path.relpath(export_path, scs_project_path) tobj_rel_filepath = tobj_rel_filepath + os.sep + tobj_filename[:-5] tobj_abs_filepath = os.path.join(export_path, tobj_filename) texture_abs_filepath = texture_raw_path_with_ext lprint("W Material %r texture of type %r uses absolute path!\n\t " + "Texture copied into the Project Base Path beside exported PIT file:\n\t " + "Original path: %r\n\t " + "Copied path: %r", (material.name, texture_type, texture_abs_filepath, texture_copied_path_with_ext)) break else: lprint("E Can not properly export texture %r from material %r!\n\t " + "Make sure you are exporting somewhere into Project Base Path and texture is properly set!", (texture_raw_path, material.name)) return "" else: lprint("E Texture file %r from material %r doesn't exists inside current Project Base Path.\n\t " + "TOBJ won't be exported and reference will remain empty, expect problems!", (texture_raw_path, material.name)) return "" # CREATE TOBJ FILE if not os.path.isfile(tobj_abs_filepath): # only if it does not exists yet # export tobj only if file of texture exists if os.path.isfile(texture_abs_filepath): texture_name = os.path.basename(_path_utils.strip_sep(texture_abs_filepath)) _tobj.export(tobj_abs_filepath, texture_name, set()) else: lprint("E Texture file %r from material %r doesn't exists, TOBJ can not be exported!", (texture_raw_path, material.name)) # make sure that Windows users will export proper paths tobj_rel_filepath = tobj_rel_filepath.replace("\\", "/") return tobj_rel_filepath def fill_look_sections(data_list): """Fills up "Look" sections.""" sections = [] for item_i, item in enumerate(data_list): section = _SectionData("Look") section.props.append(("Name", item['name'])) for material_section in item['material_sections']: section.sections.append(material_section) sections.append(section) return sections def _fill_atr_section(atr): """Creates "Attribute" section.""" section = _SectionData("Attribute") section.props.append(("Format", atr[0])) section.props.append(("Tag", atr[1])) section.props.append(("Value", ["&&", (atr[2],)])) return section def _fill_part_section(part): """Creates "Part" section.""" section = _SectionData("Part") section.props.append(("Name", part[0])) section.props.append(("AttributeCount", len(part[1]))) for atr in part[1]: atr_section = _fill_atr_section(atr) section.sections.append(atr_section) return section def fill_variant_sections(data_list): """Fills up "Variant" sections.""" sections = [] for item_i, item in enumerate(data_list): section = _SectionData("Variant") section.props.append(("Name", item[0])) for part in item[1]: part_section = _fill_part_section(part) section.sections.append(part_section) sections.append(section) return sections def fill_part_list(parts, used_parts_names, all_parts=False): """Fills up "Part" sections in "Varian" section :param parts: SCS Root part inventory or parts collection property from variant inventory :type parts: io_scs_tools.properties.object.ObjectPartInventoryItem | list[io_scs_tools.properties.object.ObjectVariantPartInclusionItem] :param used_parts_names: list of part names that are actually used in game object :type used_parts_names: list[str] :param all_parts: flag for all parts are visible (handy for creating default visibilities) :type all_parts: bool :return: Part records (name, attributes) :rtype: list """ part_list = [] for part_name in used_parts_names: part_written = False for part in parts: if part.name == part_name: part_atr = [] if all_parts: part_atr.append(('INT', 'visible', 1)) else: if part.include: include = 1 else: include = 0 part_atr.append(('INT', 'visible', include)) part_list.append((part.name, part_atr), ) part_written = True if not part_written: lprint("E Part %r from collected parts not avaliable in variant parts inventory, expect problems by conversion!", (part_name,)) return part_list def export(root_object, filepath, name_suffix, used_parts, used_materials): """Export PIT. :param root_object: SCS root object :type root_object: bpy.types.Object :param filepath: PIT file path :type filepath: str :param name_suffix: file name suffix :type name_suffix: str :param used_parts: parts transitional structure for accessing stored parts from PIM, PIC and PIP :type used_parts: io_scs_tools.exp.transition_structs.parts.PartsTrans :param used_materials: materials transitional structure for accessing stored materials from PIM :type used_materials: io_scs_tools.exp.transition_structs.materials.MaterialsTrans :return: True if successful; False otherwise; :rtype: bool """ scs_globals = _get_scs_globals() file_name = root_object.name print("\n************************************") print("** SCS PIT Exporter **") print("** (c)2014 SCS Software **") print("************************************\n") # DATA GATHERING look_list = [] variant_list = [] saved_active_look = root_object.scs_props.active_scs_look looks_inventory = root_object.scs_object_look_inventory looks_count = len(looks_inventory) if looks_count <= 0: looks_count = 1 used_materials_pairs = used_materials.get_as_pairs() for i in range(0, looks_count): # apply each look from inventory first if len(looks_inventory) > 0: root_object.scs_props.active_scs_look = i # set index for curret look _looks.apply_active_look(root_object) # apply look manually, as active look setter method works only when user sets index from UI curr_look_name = looks_inventory[i].name else: # if no looks create default curr_look_name = "default" material_dict = {} material_list = [] # get materials data for material_name, material in used_materials_pairs: if material is None: material_name = str("_default_material_-_default_settings_") # DEFAULT MATERIAL material_export_data = default_material(material_name) material_list.append(material_name) else: # print('material name: %r' % material.name) material_list.append(material) # MATERIAL EFFECT effect_name = material.scs_props.mat_effect_name # PRESET SHADERS flags = 0 attribute_cnt = texture_cnt = 0 attribute_sections = [] texture_sections = [] active_shader_preset_name = material.scs_props.active_shader_preset_name # SUBSTANCE substance_value = material.scs_props.substance # only write substance to material if it's assigned if substance_value != "None" and substance_value != "": substance_data = _SectionData("Attribute") substance_data.props.append(("Format", "STRING")) substance_data.props.append(("Tag", "substance")) substance_data.props.append(("Value", ["i", (substance_value,)])) attribute_sections.append(substance_data) attribute_cnt += 1 if _shader_presets.has_preset(active_shader_preset_name) and active_shader_preset_name != "<none>": preset = _shader_presets.get_preset(active_shader_preset_name) flavors_str = effect_name[len(preset.effect):] section = _shader_presets.get_section(active_shader_preset_name, flavors_str) # FLAGS for prop in section.props: if prop[0] == "Flags": flags = int(not material.scs_props.enable_aliasing) break # COLLECT ATTRIBUTES AND TEXTURES for item in section.sections: # if attribute is hidden in shader preset ignore it on export # this is useful for flavor hiding some attributes from original material # eg: airbrush on "truckpaint" hides R G B aux attributes which are not present # when using airbrush flavor hidden = item.get_prop_value("Hide") if hidden and hidden == "True": continue preview_only = item.get_prop_value("PreviewOnly") if preview_only and preview_only == "True": continue # ATTRIBUTES if item.type == "Attribute": # print(' Attribute:') attribute_data = _SectionData("Attribute") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Format": attribute_data.props.append((rec[0], rec[1])) elif rec[0] == "Tag": # tag_prop = rec[1].replace("[", "").replace("]", "") # attribute_data.props.append((rec[0], tag_prop)) attribute_data.props.append((rec[0], rec[1])) elif rec[0] == "Value": format_prop = item.get_prop("Format")[1] tag_prop = item.get_prop("Tag")[1] tag_prop = tag_prop.replace("[", "").replace("]", "") # print(' format_prop: %r' % str(format_prop)) # print(' tag_prop: %r' % str(tag_prop)) if "aux" in tag_prop: aux_props = getattr(material.scs_props, "shader_attribute_" + tag_prop) value = [] for aux_prop in aux_props: value.append(aux_prop.value) # extract list if there is only one value inside and tagged as FLOAT # otherwise it gets saved as: "Value: ( [0.0] )" instead of: "Value: ( 0.0 )" if len(value) == 1 and format_prop == "FLOAT": value = value[0] else: value = getattr(material.scs_props, "shader_attribute_" + tag_prop, "NO TAG") # print(' value: %s' % str(value)) if format_prop == 'FLOAT': attribute_data.props.append((rec[0], ["&&", (value,)])) elif format_prop == 'INT': attribute_data.props.append((rec[0], ["ii", (value,)])) else: attribute_data.props.append((rec[0], ["i", tuple(value)])) attribute_sections.append(attribute_data) attribute_cnt += 1 # TEXTURES elif item.type == "Texture": # print(' Texture:') texture_data = _SectionData("Texture") for rec in item.props: # print(' rec: %r' % str(rec)) if rec[0] == "Tag": tag_prop = rec[1].split(":")[1] tag = str("texture[" + str(texture_cnt) + "]:" + tag_prop) texture_data.props.append((rec[0], tag)) elif rec[0] == "Value": tag_prop = item.get_prop("Tag")[1].split(":")[1] # print(' tag_prop: %r' % str(tag_prop)) # create and get path to tobj tobj_rel_path = get_texture_path_from_material(material, tag_prop, os.path.dirname(filepath)) texture_data.props.append((rec[0], tobj_rel_path)) texture_sections.append(texture_data) texture_cnt += 1 material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", material.name)) material_export_data.props.append(("Effect", effect_name)) material_export_data.props.append(("Flags", flags)) material_export_data.props.append(("AttributeCount", attribute_cnt)) material_export_data.props.append(("TextureCount", texture_cnt)) for attribute in attribute_sections: material_export_data.sections.append(attribute) for texture in texture_sections: material_export_data.sections.append(texture) elif active_shader_preset_name == "<imported>": material_attributes = material['scs_shader_attributes']['attributes'].to_dict().values() material_textures = material['scs_shader_attributes']['textures'].to_dict().values() material_export_data = _SectionData("Material") material_export_data.props.append(("Alias", material.name)) material_export_data.props.append(("Effect", effect_name)) material_export_data.props.append(("Flags", int(not material.scs_props.enable_aliasing))) material_export_data.props.append(("AttributeCount", len(material_attributes))) material_export_data.props.append(("TextureCount", len(material_textures))) for attribute_dict in material_attributes: attribute_section = _SectionData("Attribute") format_value = "" for attr_prop in sorted(attribute_dict.keys()): # get the format of current attribute (we assume that "Format" attribute is before "Value" attribute in this for loop) if attr_prop == "Format": format_value = attribute_dict[attr_prop] if attr_prop == "Value" and ("FLOAT" in format_value or "STRING" in format_value or "INT" in format_value): tag_prop = attribute_dict["Tag"].replace("[", "").replace("]", "") if "aux" in tag_prop: aux_props = getattr(material.scs_props, "shader_attribute_" + tag_prop) value = [] for aux_prop in aux_props: value.append(aux_prop.value) else: value = getattr(material.scs_props, "shader_attribute_" + tag_prop, None) if isinstance(value, float): value = [value] if value is None: attribute_section.props.append((attr_prop, ["i", tuple(attribute_dict[attr_prop])])) else: attribute_section.props.append((attr_prop, ["i", tuple(value)])) elif attr_prop == "Tag" and "aux" in attribute_dict[attr_prop]: attribute_section.props.append((attr_prop, "aux[" + attribute_dict[attr_prop][3:] + "]")) elif attr_prop == "FriendlyTag": continue else: attribute_section.props.append((attr_prop, attribute_dict[attr_prop])) material_export_data.sections.append(attribute_section) for texture_dict in material_textures: texture_section = _SectionData("Texture") tag_id_string = "" for tex_prop in sorted(texture_dict.keys()): if tex_prop == "Tag": tag_id_string = texture_dict[tex_prop].split(':')[1] if tex_prop == "Value" and tag_id_string != "": tobj_rel_path = get_texture_path_from_material(material, tag_id_string, os.path.dirname(filepath)) texture_section.props.append((tex_prop, tobj_rel_path)) else: texture_section.props.append((tex_prop, texture_dict[tex_prop])) material_export_data.sections.append(texture_section) else: # when user made material presets were there, but there is no preset library at export for some reason lprint("W Shader preset used on %r not found in Shader Presets Library (Did you set correct path?), " "exporting default material instead!", (material_name,)) material_name = str("_" + material_name + "_-_default_settings_") material_export_data = default_material(material_name) material_dict[material_name] = material_export_data # create materials sections for looks material_sections = fill_material_sections(material_list, material_dict) look_data = { "name": curr_look_name, "material_sections": material_sections } look_list.append(look_data) # restore look applied before export root_object.scs_props.active_scs_look = saved_active_look # set index for curret look _looks.apply_active_look(root_object) # apply look manually, as active look setter method works only when user sets index from UI # PARTS AND VARIANTS... used_parts_names = used_parts.get_as_list() if len(root_object.scs_object_variant_inventory) == 0: # If there is no Variant, add the Default one... part_list = fill_part_list(root_object.scs_object_part_inventory, used_parts_names, all_parts=True) variant_list.append((_VARIANT_consts.default_name, part_list), ) else: for variant in root_object.scs_object_variant_inventory: part_list = fill_part_list(variant.parts, used_parts_names) variant_list.append((variant.name, part_list), ) # DATA CREATION header_section = fill_header_section(1, file_name, scs_globals.export_write_signature) look_section = fill_look_sections(look_list) # part_sections = fill_part_section(part_list) variant_section = fill_variant_sections(variant_list) comment_header_section = fill_comment_header_section(look_list, variant_list) global_section = fill_global_section(len(look_list), len(variant_list), used_parts.count(), len(used_materials_pairs)) # DATA ASSEMBLING pit_container = [comment_header_section, header_section, global_section] for section in look_section: pit_container.append(section) for section in variant_section: pit_container.append(section) # FILE EXPORT ind = " " pit_filepath = str(filepath + ".pit" + name_suffix) result = _pix_container.write_data_to_file(pit_container, pit_filepath, ind) # print("************************************") return result
gpl-2.0
Nesiehr/osf.io
website/settings/local-travis.py
2
2435
# -*- coding: utf-8 -*- '''Example settings/local.py file. These settings override what's in website/settings/defaults.py NOTE: local.py will not be added to source control. ''' import inspect from . import defaults import os DB_PORT = 54321 DEV_MODE = True DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc. SECURE_MODE = not DEBUG_MODE # Disable osf secure cookie PROTOCOL = 'https://' if SECURE_MODE else 'http://' DOMAIN = PROTOCOL + 'localhost:5000/' API_DOMAIN = PROTOCOL + 'localhost:8000/' ENABLE_INSTITUTIONS = True PREPRINT_PROVIDER_DOMAINS = { 'enabled': False, 'prefix': 'http://local.', 'suffix': ':4200/' } USE_EXTERNAL_EMBER = True EXTERNAL_EMBER_APPS = { 'preprints': { 'url': '/preprints/', 'server': 'http://localhost:4200', 'path': os.environ.get('HOME') + '/preprints/' } } SEARCH_ENGINE = 'elastic' USE_EMAIL = False USE_CELERY = False # Email MAIL_SERVER = 'localhost:1025' # For local testing MAIL_USERNAME = 'osf-smtp' MAIL_PASSWORD = 'CHANGEME' # Session COOKIE_NAME = 'osf' SECRET_KEY = "CHANGEME" SESSION_COOKIE_SECURE = SECURE_MODE OSF_SERVER_KEY = None OSF_SERVER_CERT = None ##### Celery ##### ## Default RabbitMQ broker BROKER_URL = 'amqp://' # In-memory result backend CELERY_RESULT_BACKEND = 'cache' CELERY_CACHE_BACKEND = 'memory' USE_CDN_FOR_CLIENT_LIBS = False SENTRY_DSN = None TEST_DB_NAME = DB_NAME = 'osf_test' VARNISH_SERVERS = ['http://localhost:8080'] # if ENABLE_VARNISH isn't set in python read it from the env var and set it locals().setdefault('ENABLE_VARNISH', os.environ.get('ENABLE_VARNISH') == 'True') KEEN = { 'public': { 'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', 'master_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', 'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', 'read_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', }, 'private': { 'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', 'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', 'read_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272', }, } NEW_AND_NOTEWORTHY_LINKS_NODE = 'helloo' POPULAR_LINKS_NODE = 'hiyah' POPULAR_LINKS_REGISTRATIONS = 'woooo'
apache-2.0
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/linux_benchmarks/aerospike_ycsb_benchmark.py
1
5890
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs YCSB against Aerospike. This benchmark runs two workloads against Aerospike using YCSB (the Yahoo! Cloud Serving Benchmark). Aerospike is described in perfkitbenchmarker.linux_packages.aerospike_server YCSB and workloads described in perfkitbenchmarker.linux_packages.ycsb. """ import functools from absl import flags from perfkitbenchmarker import configs from perfkitbenchmarker import disk from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import aerospike_server from perfkitbenchmarker.linux_packages import ycsb FLAGS = flags.FLAGS # TODO(user): unify overrides into --client_machine_type/server_machine_type flags.DEFINE_string('aerospike_client_machine_type', None, 'Machine type to use for the aerospike client if different ' 'from aerospike server machine type.') flags.DEFINE_string('aerospike_server_machine_type', None, 'Machine type to use for the aerospike server if different ' 'from aerospike client machine type.') BENCHMARK_NAME = 'aerospike_ycsb' BENCHMARK_CONFIG = """ aerospike_ycsb: description: > Run YCSB against an Aerospike installation. Specify the number of YCSB VMs with --ycsb_client_vms. vm_groups: workers: vm_spec: *default_single_core disk_spec: *default_500_gb vm_count: null disk_count: 0 clients: vm_spec: *default_dual_core """ def GetConfig(user_config): config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) if FLAGS.aerospike_storage_type == aerospike_server.DISK: if FLAGS.data_disk_type == disk.LOCAL: # Didn't know max number of local disks, decide later. config['vm_groups']['workers']['disk_count'] = ( config['vm_groups']['workers']['disk_count'] or None) else: config['vm_groups']['workers']['disk_count'] = ( config['vm_groups']['workers']['disk_count'] or 1) if FLAGS.aerospike_server_machine_type: vm_spec = config['vm_groups']['workers']['vm_spec'] for cloud in vm_spec: vm_spec[cloud]['machine_type'] = FLAGS.aerospike_server_machine_type if FLAGS.aerospike_client_machine_type: vm_spec = config['vm_groups']['clients']['vm_spec'] for cloud in vm_spec: vm_spec[cloud]['machine_type'] = FLAGS.aerospike_client_machine_type if FLAGS['aerospike_vms'].present: config['vm_groups']['workers']['vm_count'] = FLAGS.aerospike_vms if FLAGS['ycsb_client_vms'].present: config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms return config def CheckPrerequisites(benchmark_config): """Verifies that the required resources are present. Raises: perfkitbenchmarker.data.ResourceNotFound: On missing resource. """ ycsb.CheckPrerequisites() def Prepare(benchmark_spec): """Prepare the virtual machines to run YCSB against Aerospike. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ loaders = benchmark_spec.vm_groups['clients'] assert loaders, benchmark_spec.vm_groups # Aerospike cluster aerospike_vms = benchmark_spec.vm_groups['workers'] assert aerospike_vms, 'No aerospike VMs: {0}'.format( benchmark_spec.vm_groups) seed_ips = [vm.internal_ip for vm in aerospike_vms] aerospike_install_fns = [functools.partial(aerospike_server.ConfigureAndStart, vm, seed_node_ips=seed_ips) for vm in aerospike_vms] ycsb_install_fns = [functools.partial(vm.Install, 'ycsb') for vm in loaders] vm_util.RunThreaded(lambda f: f(), aerospike_install_fns + ycsb_install_fns) benchmark_spec.executor = ycsb.YCSBExecutor( 'aerospike', **{'as.host': aerospike_vms[0].internal_ip, 'as.namespace': 'test'}) def Run(benchmark_spec): """Spawn YCSB and gather the results. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample instances. """ loaders = benchmark_spec.vm_groups['clients'] aerospike_vms = benchmark_spec.vm_groups['workers'] metadata = { 'ycsb_client_vms': FLAGS.ycsb_client_vms, 'num_vms': len(aerospike_vms), 'Storage Type': FLAGS.aerospike_storage_type, 'memory_size': int(aerospike_vms[0].total_memory_kb * 0.8), 'transaction_threads_per_queue': FLAGS.aerospike_transaction_threads_per_queue, 'replication_factor': FLAGS.aerospike_replication_factor, } samples = list(benchmark_spec.executor.LoadAndRun(loaders)) for sample in samples: sample.metadata.update(metadata) return samples def Cleanup(benchmark_spec): """Cleanup. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ def StopAerospike(server): server.RemoteCommand('cd %s && nohup sudo make stop' % aerospike_server.AEROSPIKE_DIR) server.RemoteCommand('sudo rm -rf aerospike*') aerospike_vms = benchmark_spec.vm_groups['workers'] vm_util.RunThreaded(StopAerospike, aerospike_vms)
apache-2.0
smathot/PyGaze
examples/slideshow/constants.py
5
2421
import os.path # FILES AND FOLDERS # the DIR constant contains the full path to the current directory, which will # be used to determine where to store and retrieve data files DIR = os.path.dirname(__file__) # the DATADIR is the path to the directory where data files will be stored DATADIR = os.path.join(DIR, 'data') # the IMGDIR is the path to the directory that contains the image files IMGDIR = os.path.join(DIR, 'imgs') # the INSTFILE is the path to the file that contains the instructions INSTFILE = os.path.join(DIR, 'instructions.txt') # ask for the participant name, to use as the name for the logfile... LOGFILENAME = input("Participant name: ") # ...then use the LOGFILENAME to make create the path to the logfile LOGFILE = os.path.join(DATADIR, LOGFILENAME) # DISPLAY # for the DISPTYPE, you can choose between 'pygame' and 'psychopy'; go for # 'psychopy' if you need millisecond accurate display refresh timing, and go # for 'pygame' if you experience trouble using PsychoPy DISPTYPE = 'psychopy' # the DISPSIZE is the monitor resolution, e.g. (1024,768) DISPSIZE = (1024,768) # the SCREENSIZE is the physical screen size in centimeters, e.g. (39.9,29.9) SCREENSIZE = (39.9,29.9) # the SCREENDIST is the distance in centimeters between the participant and the # display SCREENDIST = 60.0 # set FULLSCREEN to True for fullscreen displaying, or to False for a windowed # display FULLSCREEN = True # BGC is for BackGroundColour, FGC for ForeGroundColour; both are RGB guns, # which contain three values between 0 and 255, representing the intensity of # Red, Green, and Blue respectively, e.g. (0,0,0) for black, (255,255,255) for # white, or (255,0,0) for the brightest red BGC = (0,0,0) FGC = (255,255,255) # the TEXTSIZE determines the size of the text in the experiment TEXTSIZE = 24 # TIMING # the TRIALTIME is the time each image is visible TRIALTIME = 10000 # ms # the intertrial interval (ITI) is the minimal amount of time between the # presentation of two consecutive images ITI = 2000 # ms # EYE TRACKING # the TRACKERTYPE indicates the brand of eye tracker, and should be one of the # following: 'eyelink', 'smi', 'tobii' 'dumbdummy', 'dummy' TRACKERTYPE = 'eyelink' # the EYELINKCALBEEP constant determines whether a beep should be sounded on # the appearance of every calibration target (EyeLink only) EYELINKCALBEEP = True # set DUMMYMODE to True if no tracker is attached DUMMYMODE = False
gpl-3.0
liuwenf/moose
gui/vtk/ExodusRenderer.py
8
5113
import os, sys, getopt try: from PyQt4 import QtCore, QtGui QtCore.Signal = QtCore.pyqtSignal QtCore.Slot = QtCore.pyqtSlot except ImportError: try: from PySide import QtCore, QtGui QtCore.QString = str except ImportError: raise ImportError("Cannot load either PyQt or PySide") import vtk from vtk.util.colors import peacock, tomato, red, white, black from ExodusActor import ExodusActor from ClippedActor import ClippedActor from MeshRenderer import MeshRenderer try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class ExodusMap: # These are the blocks from the multiblockdataset that correspond to each item element_vtk_block = 0 sideset_vtk_block = 4 nodeset_vtk_block = 7 class ExodusRenderer(MeshRenderer): def __init__(self, render_widget, mesh_item_data): MeshRenderer.__init__(self, render_widget, mesh_item_data) self.file_name = mesh_item_data['file'] self.buildActors(self.file_name) def buildActors(self, file_name): reader = vtk.vtkExodusIIReader() reader.SetFileName(self.file_name) reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1) reader.SetAllArrayStatus(vtk.vtkExodusIIReader.EDGE_SET, 1) reader.SetAllArrayStatus(vtk.vtkExodusIIReader.SIDE_SET, 1) reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODE_SET, 1) reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL_TEMPORAL, 1) reader.UpdateInformation() reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, 0, 1) num_sidesets = reader.GetNumberOfSideSetArrays() num_nodesets = reader.GetNumberOfNodeSetArrays() num_blocks = reader.GetNumberOfElementBlockArrays() self.sidesets = [] self.sideset_id_to_exodus_block = {} self.sideset_id_to_name = {} self.name_to_sideset_id = {} for i in xrange(num_sidesets): sideset_id = reader.GetObjectId(vtk.vtkExodusIIReader.SIDE_SET,i) self.sidesets.append(sideset_id) self.sideset_id_to_exodus_block[sideset_id] = i reader.SetObjectStatus(vtk.vtkExodusIIReader.SIDE_SET, i, 1) name = reader.GetObjectName(vtk.vtkExodusIIReader.SIDE_SET,i).split(' ') if 'Unnamed' not in name: self.sideset_id_to_name[sideset_id] = name[0] self.name_to_sideset_id[name[0]] = sideset_id self.nodesets = [] self.nodeset_id_to_exodus_block = {} self.nodeset_id_to_name = {} self.name_to_nodeset_id = {} for i in xrange(num_nodesets): nodeset_id = reader.GetObjectId(vtk.vtkExodusIIReader.NODE_SET,i) self.nodesets.append(nodeset_id) self.nodeset_id_to_exodus_block[nodeset_id] = i reader.SetObjectStatus(vtk.vtkExodusIIReader.NODE_SET, i, 1) name = reader.GetObjectName(vtk.vtkExodusIIReader.NODE_SET,i).split(' ') if 'Unnamed' not in name: self.nodeset_id_to_name[nodeset_id] = name[0] self.name_to_nodeset_id[name[0]] = nodeset_id self.blocks = [] self.block_id_to_exodus_block = {} self.block_id_to_name = {} self.name_to_block_id = {} for i in xrange(num_blocks): block_id = reader.GetObjectId(vtk.vtkExodusIIReader.ELEM_BLOCK,i) self.blocks.append(block_id) self.block_id_to_exodus_block[block_id] = i name = reader.GetObjectName(vtk.vtkExodusIIReader.ELEM_BLOCK,i).split(' ') if 'Unnamed' not in name: self.block_id_to_name[block_id] = name[0] self.name_to_block_id[name[0]] = block_id reader.SetTimeStep(1) reader.Update() self.data = reader.GetOutput() for i in xrange(num_sidesets): actor = ExodusActor(self.renderer, self.data, ExodusMap.sideset_vtk_block, i) self.sideset_actors[str(self.sidesets[i])] = actor self.all_actors.append(actor) clipped_actor = ClippedActor(actor, self.plane) self.clipped_sideset_actors[str(self.sidesets[i])] = clipped_actor self.all_actors.append(clipped_actor) for i in xrange(num_nodesets): actor = ExodusActor(self.renderer, self.data, ExodusMap.nodeset_vtk_block, i) self.nodeset_actors[str(self.nodesets[i])] = actor self.all_actors.append(actor) clipped_actor = ClippedActor(actor, self.plane) self.clipped_nodeset_actors[str(self.nodesets[i])] = clipped_actor self.all_actors.append(clipped_actor) for i in xrange(num_blocks): actor = ExodusActor(self.renderer, self.data, ExodusMap.element_vtk_block, i) self.block_actors[str(self.blocks[i])] = actor self.all_actors.append(actor) clipped_actor = ClippedActor(actor, self.plane) self.clipped_block_actors[str(self.blocks[i])] = clipped_actor self.all_actors.append(clipped_actor)
lgpl-2.1
saradickinson/ietf_hackathon_unbound
libunbound/python/examples/dnssec-valid.py
5
2128
#!/usr/bin/python ''' dnssec-valid.py: DNSSEC validation Authors: Zdenek Vasicek (vasicek AT fit.vutbr.cz) Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz) Copyright (c) 2008. All rights reserved. This software is open source. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' from __future__ import print_function import os from unbound import ub_ctx,RR_TYPE_A,RR_CLASS_IN ctx = ub_ctx() ctx.resolvconf("/etc/resolv.conf") fw = open("dnssec-valid.txt","wb") ctx.debugout(fw) ctx.debuglevel(2) if os.path.isfile("keys"): ctx.add_ta_file("keys") #read public keys for DNSSEC verificatio status, result = ctx.resolve("www.nic.cz", RR_TYPE_A, RR_CLASS_IN) if status == 0 and result.havedata: print("Result:", sorted(result.data.address_list)) if result.secure: print("Result is secure") elif result.bogus: print("Result is bogus") else: print("Result is insecure")
bsd-3-clause
GNOME/orca
test/keystrokes/helpcontent/struct_nav_paragraph.py
3
10816
#!/usr/bin/python """Test of learn mode.""" from macaroon.playback import * import utils sequence = MacroSequence() sequence.append(KeyPressAction(0, None, "KP_Insert")) sequence.append(TypeAction("h")) sequence.append(KeyReleaseAction(0, None, "KP_Insert")) sequence.append(KeyComboAction("F1")) sequence.append(PauseAction(2000)) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("k")) sequence.append(utils.AssertPresentationAction( "1. k for next link", ["BRAILLE LINE: ' Welcome to Orca'", " VISIBLE: ' Welcome to Orca', cursor=2", "BRAILLE LINE: ' Welcome to Orca'", " VISIBLE: ' Welcome to Orca', cursor=2", "SPEECH OUTPUT: 'Welcome to Orca", "Introducing the Orca screen reader", " link'"])) sequence.append(KeyComboAction("Return")) sequence.append(PauseAction(2000)) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "2. p for next paragraph", ["BRAILLE LINE: ' Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'", " VISIBLE: 'Orca is a free, open source, fle', cursor=1", "BRAILLE LINE: ' Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'", " VISIBLE: 'Orca is a free, open source, fle', cursor=1", "SPEECH OUTPUT: 'Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "3. p for next paragraph", ["BRAILLE LINE: ' Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'", " VISIBLE: 'Orca works with applications and', cursor=1", "BRAILLE LINE: ' Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'", " VISIBLE: 'Orca works with applications and', cursor=1", "SPEECH OUTPUT: 'Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "4. p for next paragraph", ["BRAILLE LINE: ' To launch Orca:'", " VISIBLE: ' To launch Orca:', cursor=2", "BRAILLE LINE: ' To launch Orca:'", " VISIBLE: ' To launch Orca:', cursor=2", "SPEECH OUTPUT: 'To launch Orca:'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "5. p for next paragraph", ["BRAILLE LINE: ' The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'", " VISIBLE: 'The method for configuring Orca ', cursor=1", "BRAILLE LINE: ' The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'", " VISIBLE: 'The method for configuring Orca ', cursor=1", "SPEECH OUTPUT: 'The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "6. p for next paragraph", ["BRAILLE LINE: ' To toggle Orca on and off in GNOME, press Super+Alt+S.'", " VISIBLE: 'To toggle Orca on and off in GNO', cursor=1", "BRAILLE LINE: ' To toggle Orca on and off in GNOME, press Super+Alt+S.'", " VISIBLE: 'To toggle Orca on and off in GNO', cursor=1", "SPEECH OUTPUT: 'To toggle Orca on and off in GNOME, press Super+Alt+S.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("p")) sequence.append(utils.AssertPresentationAction( "7. p for next paragraph", ["BRAILLE LINE: ' Type orca, along with any optional parameters, in a terminal window or within the Run dialog and then press Return.'", " VISIBLE: 'Type orca, along with any option', cursor=1", "BRAILLE LINE: ' Type orca, along with any optional parameters, in a terminal window or within the Run dialog and then press Return.'", " VISIBLE: 'Type orca, along with any option', cursor=1", "SPEECH OUTPUT: 'Type orca, along with any optional parameters, in a terminal window or within the Run dialog and then press Return.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Shift>p")) sequence.append(utils.AssertPresentationAction( "8. shift+p for previous paragraph", ["BRAILLE LINE: ' To toggle Orca on and off in GNOME, press Super+Alt+S.'", " VISIBLE: 'To toggle Orca on and off in GNO', cursor=1", "BRAILLE LINE: ' To toggle Orca on and off in GNOME, press Super+Alt+S.'", " VISIBLE: 'To toggle Orca on and off in GNO', cursor=1", "SPEECH OUTPUT: 'To toggle Orca on and off in GNOME, press Super+Alt+S.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Shift>p")) sequence.append(utils.AssertPresentationAction( "9. shift+p for previous paragraph", ["BRAILLE LINE: ' The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'", " VISIBLE: 'The method for configuring Orca ', cursor=1", "BRAILLE LINE: ' The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'", " VISIBLE: 'The method for configuring Orca ', cursor=1", "SPEECH OUTPUT: 'The method for configuring Orca to be launched automatically as your preferred screen reader will depend upon which desktop environment you use. For instance, in GNOME 3.x this option can be found in the Universal Access Control Center panel on the Seeing page.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Shift>p")) sequence.append(utils.AssertPresentationAction( "10. shift+p for previous paragraph", ["BRAILLE LINE: ' To launch Orca:'", " VISIBLE: ' To launch Orca:', cursor=2", "BRAILLE LINE: ' To launch Orca:'", " VISIBLE: ' To launch Orca:', cursor=2", "SPEECH OUTPUT: 'To launch Orca:'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Shift>p")) sequence.append(utils.AssertPresentationAction( "11. shift+p for previous paragraph", ["BRAILLE LINE: ' Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'", " VISIBLE: 'Orca works with applications and', cursor=1", "BRAILLE LINE: ' Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'", " VISIBLE: 'Orca works with applications and', cursor=1", "SPEECH OUTPUT: 'Orca works with applications and toolkits that support the Assistive Technology Service Provider Interface (AT-SPI), which is the primary assistive technology infrastructure for Linux and Solaris. Applications and toolkits supporting the AT-SPI include the GNOME Gtk+ toolkit, the Java platform's Swing toolkit, LibreOffice, Gecko, and WebKitGtk. AT-SPI support for the KDE Qt toolkit is being pursued.'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("<Shift>p")) sequence.append(utils.AssertPresentationAction( "12. shift+p for previous paragraph", ["BRAILLE LINE: ' Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'", " VISIBLE: 'Orca is a free, open source, fle', cursor=1", "BRAILLE LINE: ' Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'", " VISIBLE: 'Orca is a free, open source, fle', cursor=1", "SPEECH OUTPUT: 'Orca is a free, open source, flexible, and extensible screen reader that provides access to the graphical desktop via speech and refreshable braille.'"])) sequence.append(KeyComboAction("<Alt>F4")) sequence.append(utils.AssertionSummaryAction()) sequence.start()
lgpl-2.1
scipy/scipy
scipy/special/utils/convert.py
12
3477
# This script is used to parse BOOST special function test data into something # we can easily import in numpy. import re import os # Where to put the data (directory will be created) DATA_DIR = 'scipy/special/tests/data/boost' # Where to pull out boost data BOOST_SRC = "boostmath/test" CXX_COMMENT = re.compile(r'^\s+//') DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_') ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?') HEADER_REGEX = re.compile( r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)') IGNORE_PATTERNS = [ # Makes use of ldexp and casts "hypergeometric_1F1_big_double_limited.ipp", "hypergeometric_1F1_big_unsolved.ipp", # Makes use of numeric_limits and ternary operator "beta_small_data.ipp", # Doesn't contain any data "almost_equal.ipp", # Derivatives functions don't exist "bessel_y01_prime_data.ipp", "bessel_yn_prime_data.ipp", "sph_bessel_prime_data.ipp", "sph_neumann_prime_data.ipp", # Data files not needed by scipy special tests. "ibeta_derivative_", r"ellint_r[cdfjg]_[^d]", r"ellint_d2?_", "jacobi_", "heuman_lambda_", "hypergeometric_", "nct_", r".*gammap1m1_", "trig_", "powm1_data.ipp", ] def _raw_data(line): items = line.split(',') l = [] for item in items: m = ITEM_REGEX.search(item) if m: q = m.group(0) l.append(q) return l def parse_ipp_file(filename): print(filename) with open(filename, 'r') as a: lines = a.readlines() data = {} i = 0 while (i < len(lines)): line = lines[i] m = HEADER_REGEX.search(line) if m: d = int(m.group(1)) n = int(m.group(2)) print(f"d = {d}, n = {n}") cdata = [] i += 1 line = lines[i] # Skip comments while CXX_COMMENT.match(line): i += 1 line = lines[i] while DATA_REGEX.match(line): cdata.append(_raw_data(line)) i += 1 line = lines[i] # Skip comments while CXX_COMMENT.match(line): i += 1 line = lines[i] if not len(cdata) == n: raise ValueError(f"parsed data: {len(cdata)}, expected {n}") data[m.group(3)] = cdata else: i += 1 return data def dump_dataset(filename, data): fid = open(filename, 'w') try: for line in data: fid.write("%s\n" % " ".join(line)) finally: fid.close() def dump_datasets(filename): base, ext = os.path.splitext(os.path.basename(filename)) base += '_%s' % ext[1:] datadir = os.path.join(DATA_DIR, base) os.makedirs(datadir) datasets = parse_ipp_file(filename) for k, d in datasets.items(): print(k, len(d)) dfilename = os.path.join(datadir, k) + '.txt' dump_dataset(dfilename, d) if __name__ == '__main__': for filename in sorted(os.listdir(BOOST_SRC)): # Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp) if filename.endswith(".ipp"): if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS): continue path = os.path.join(BOOST_SRC, filename) print(f"================= {path} ===============") dump_datasets(path)
bsd-3-clause
bazizi/robotframework-selenium2library
test/resources/testserver/testserver.py
61
3546
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/336012 import SimpleHTTPServer import BaseHTTPServer import httplib import os class StoppableHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """http request handler with QUIT stopping the server""" def do_QUIT(self): """send 200 OK response, and set server.stop to True""" self.send_response(200) self.end_headers() self.server.stop = True def do_POST(self): # We could also process paremeters here using something like below. # length = self.headers['Content-Length'] # print self.rfile.read(int(length)) self.do_GET() def send_head(self): # This is ripped directly from SimpleHTTPRequestHandler, # only the cookie part is added. """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ path = self.translate_path(self.path) f = None if os.path.isdir(path): if not self.path.endswith('/'): # redirect browser - doing basically what apache does self.send_response(301) self.send_header("Location", self.path + "/") self.end_headers() return None for index in "index.html", "index.htm": index = os.path.join(path, index) if os.path.exists(index): path = index break else: return self.list_directory(path) ctype = self.guess_type(path) if ctype.startswith('text/'): mode = 'r' else: mode = 'rb' try: f = open(path, mode) except IOError: self.send_error(404, "File not found") return None self.send_response(200) self.send_header("Content-type", ctype) fs = os.fstat(f.fileno()) self.send_header("Content-Length", str(fs[6])) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) self.send_header("Set-Cookie", "test=seleniumlibrary;") self.send_header("Set-Cookie", "another=value;") self.end_headers() return f class StoppableHttpServer(BaseHTTPServer.HTTPServer): """http server that reacts to self.stop flag""" def serve_forever(self): """Handle one request at a time until stopped.""" self.stop = False while not self.stop: self.handle_request() def stop_server(port=7000): """send QUIT request to http server running on localhost:<port>""" conn = httplib.HTTPConnection("localhost:%d" % port) conn.request("QUIT", "/") conn.getresponse() def start_server(port=7000): import os os.chdir(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), '..')) server = StoppableHttpServer(('', port), StoppableHttpRequestHandler) server.serve_forever() if __name__ == '__main__': import sys if len(sys.argv) != 2 or sys.argv[1] not in [ 'start', 'stop' ]: print 'usage: %s start|stop' % sys.argv[0] sys.exit(1) if sys.argv[1] == 'start': start_server() else: stop_server()
apache-2.0
malmiron/incubator-airflow
tests/contrib/hooks/test_sftp_hook.py
1
6743
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import mock import unittest import shutil import os import pysftp from airflow import configuration, models from airflow.contrib.hooks.sftp_hook import SFTPHook TMP_PATH = '/tmp' TMP_DIR_FOR_TESTS = 'tests_sftp_hook_dir' TMP_FILE_FOR_TESTS = 'test_file.txt' class SFTPHookTest(unittest.TestCase): def setUp(self): configuration.load_test_config() self.hook = SFTPHook() os.makedirs(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) with open(os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS), 'a') as f: f.write('Test file') def test_get_conn(self): output = self.hook.get_conn() self.assertEqual(type(output), pysftp.Connection) def test_close_conn(self): self.hook.conn = self.hook.get_conn() self.assertTrue(self.hook.conn is not None) self.hook.close_conn() self.assertTrue(self.hook.conn is None) def test_describe_directory(self): output = self.hook.describe_directory(TMP_PATH) self.assertTrue(TMP_DIR_FOR_TESTS in output) def test_list_directory(self): output = self.hook.list_directory( path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) self.assertEqual(output, []) def test_create_and_delete_directory(self): new_dir_name = 'new_dir' self.hook.create_directory(os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_name)) output = self.hook.describe_directory( os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) self.assertTrue(new_dir_name in output) self.hook.delete_directory(os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, new_dir_name)) output = self.hook.describe_directory( os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) self.assertTrue(new_dir_name not in output) def test_store_retrieve_and_delete_file(self): self.hook.store_file( remote_full_path=os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS), local_full_path=os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS) ) output = self.hook.list_directory( path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) self.assertEqual(output, [TMP_FILE_FOR_TESTS]) retrieved_file_name = 'retrieved.txt' self.hook.retrieve_file( remote_full_path=os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS), local_full_path=os.path.join(TMP_PATH, retrieved_file_name) ) self.assertTrue(retrieved_file_name in os.listdir(TMP_PATH)) os.remove(os.path.join(TMP_PATH, retrieved_file_name)) self.hook.delete_file(path=os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS)) output = self.hook.list_directory( path=os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) self.assertEqual(output, []) def test_get_mod_time(self): self.hook.store_file( remote_full_path=os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS), local_full_path=os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS) ) output = self.hook.get_mod_time(path=os.path.join( TMP_PATH, TMP_DIR_FOR_TESTS, TMP_FILE_FOR_TESTS)) self.assertEqual(len(output), 14) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_default(self, get_connection): connection = models.Connection(login='login', host='host') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, False) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_enabled(self, get_connection): connection = models.Connection( login='login', host='host', extra='{"no_host_key_check": true}') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, True) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_disabled(self, get_connection): connection = models.Connection( login='login', host='host', extra='{"no_host_key_check": false}') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, False) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_disabled_for_all_but_true(self, get_connection): connection = models.Connection( login='login', host='host', extra='{"no_host_key_check": "foo"}') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, False) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_ignore(self, get_connection): connection = models.Connection( login='login', host='host', extra='{"ignore_hostkey_verification": true}') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, True) @mock.patch('airflow.contrib.hooks.sftp_hook.SFTPHook.get_connection') def test_no_host_key_check_no_ignore(self, get_connection): connection = models.Connection( login='login', host='host', extra='{"ignore_hostkey_verification": false}') get_connection.return_value = connection hook = SFTPHook() self.assertEqual(hook.no_host_key_check, False) def tearDown(self): shutil.rmtree(os.path.join(TMP_PATH, TMP_DIR_FOR_TESTS)) os.remove(os.path.join(TMP_PATH, TMP_FILE_FOR_TESTS)) if __name__ == '__main__': unittest.main()
apache-2.0
beeftornado/sentry
src/sentry/migrations/0100_file_type_on_event_attachment.py
1
1513
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-09-15 08:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): # This flag is used to mark that a migration shouldn't be automatically run in # production. We set this to True for operations that we think are risky and want # someone from ops to run manually and monitor. # General advice is that if in doubt, mark your migration as `is_dangerous`. # Some things you should always mark as dangerous: # - Large data migrations. Typically we want these to be run manually by ops so that # they can be monitored. Since data migrations will now hold a transaction open # this is even more important. # - Adding columns to highly active tables, even ones that are NULL. is_dangerous = False # This flag is used to decide whether to run this migration in a transaction or not. # By default we prefer to run in a transaction, but for migrations where you want # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll # want to create an index concurrently when adding one to an existing table. atomic = False dependencies = [ ("sentry", "0099_fix_project_platforms"), ] operations = [ migrations.AddField( model_name="eventattachment", name="type", field=models.CharField(db_index=True, max_length=64, null=True), ), ]
bsd-3-clause
asadziach/tensorflow
tensorflow/contrib/tensor_forest/client/eval_metrics_test.py
69
3863
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.contrib.tensor_forest.client.eval_metrics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.tensor_forest.client import eval_metrics from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops import variables from tensorflow.python.platform import googletest class EvalMetricsTest(test_util.TensorFlowTestCase): def testTop2(self): top_2_fn = eval_metrics._top_k_generator(2) probabilities = constant_op.constant([[0.1, 0.2, 0.3], [0.4, 0.7, 0.5], [0.9, 0.8, 0.2], [0.6, 0.4, 0.8]]) targets = constant_op.constant([[0], [2], [1], [1]]) in_top_2_op, update_op = top_2_fn(probabilities, targets) with self.test_session(): # initializes internal accuracy vars variables.local_variables_initializer().run() # need to call in order to run the in_top_2_op internal operations because # it is a streaming function update_op.eval() self.assertNear(0.5, in_top_2_op.eval(), 0.0001) def testTop3(self): top_3_fn = eval_metrics._top_k_generator(3) probabilities = constant_op.constant([[0.1, 0.2, 0.6, 0.3, 0.5, 0.5], [0.1, 0.4, 0.7, 0.3, 0.5, 0.2], [0.1, 0.3, 0.8, 0.7, 0.4, 0.9], [0.9, 0.8, 0.1, 0.8, 0.2, 0.7], [0.3, 0.6, 0.9, 0.4, 0.8, 0.6]]) targets = constant_op.constant([3, 0, 2, 5, 1]) in_top_3_op, update_op = top_3_fn(probabilities, targets) with self.test_session(): # initializes internal accuracy vars variables.local_variables_initializer().run() # need to call in order to run the in_top_3_op internal operations because # it is a streaming function update_op.eval() self.assertNear(0.4, in_top_3_op.eval(), 0.0001) def testAccuracy(self): predictions = constant_op.constant([0, 1, 3, 6, 5, 2, 7, 6, 4, 9]) targets = constant_op.constant([0, 1, 4, 6, 5, 1, 7, 5, 4, 8]) accuracy_op, update_op = eval_metrics._accuracy(predictions, targets) with self.test_session(): variables.local_variables_initializer().run() # need to call in order to run the accuracy_op internal operations because # it is a streaming function update_op.eval() self.assertNear(0.6, accuracy_op.eval(), 0.0001) def testR2(self): probabilities = constant_op.constant( [1.2, 3.9, 2.1, 0.9, 2.2, 0.1, 6.0, 4.0, 0.9]) targets = constant_op.constant( [1.0, 4.3, 2.6, 0.5, 1.1, 0.7, 5.1, 3.4, 1.8]) r2_op, update_op = eval_metrics._r2(probabilities, targets) with self.test_session(): # initializes internal accuracy vars variables.local_variables_initializer().run() # need to call in order to run the r2_op internal operations because # it is a streaming function update_op.eval() self.assertNear(-19.7729, r2_op.eval(), 0.0001) if __name__ == '__main__': googletest.main()
apache-2.0
kichkasch/ioids
g4ds/communicationmanager_db.py
1
6959
""" The database backend functions for the communication module. Grid for Digital Security (G4DS) @author: Michael Pilgermann @contact: mailto:mpilgerm@glam.ac.uk @license: GPL (General Public License) """ import config import pg import communicationmanager class CommDB: """ Handles all requests form the communication module for backend connectivity with database. @ivar _connection: Reference to database connection @type _connection: PSQL-DB-Connection """ def __init__(self): """ Initialises the database manager for communication. The database connection is established using the appropriate settings in the configuations file / module config.py. Please run L{shutdown} before you shutdown the application in order to clear the database connections. """ dbname = config.g4ds_comm_dbname host = config.g4ds_comm_host port = config.g4ds_comm_port user = config.g4ds_comm_username password = config.g4ds_comm_password options = None tty = None self._connection = pg.connect(dbname, host, port, options, tty, user, password) def shutdown(self): """ Closes open database connections. """ self._connection.close() def getProtocols(self): """ Provides a list of all protocols in the database. @return: List of protocol instances @rtype: C{List} of L{communicationmanager.Protocol} """ result = self._connection.query('select id, name from ' + config.g4ds_comm_table_protocols) list = result.getresult() returnList = [] for item in list: id = item[0] name = item[1] c = communicationmanager.Protocol(id, name, 1) returnList.append(c) return returnList def addProtocol(self, protocol): """ Adds one protocol to the protocol table in the database. @param protocol: Protocol to add to the repository @type protocol: L{communicationmanager.Protocol} """ self._connection.query("""insert into """ + config.g4ds_comm_table_protocols + """(id, name) values ('""" + protocol.getId() + """', '""" + protocol.getName() + """')""") def getEndpoints(self): """ Provides a list of all endpoints in the database. @return: List of endpoint instances @rtype: C{List} of L{communicationmanager.Endpoint} """ result = self._connection.query('select id, memberid, communityid, protocolid, address, credentialid from ' + config.g4ds_comm_table_endpoints) list = result.getresult() returnList = [] for item in list: id = item[0] memberid = item[1] communityid = item[2] protocolid = item[3] address = item[4] credentialid = item[5] e = communicationmanager.Endpoint(id, memberid, communityid, protocolid, address, credentialid, 1) returnList.append(e) return returnList def addEndpoint(self, endpoint): """ Adds one endpoint to the endpoint table in the database. @param endpoint: Endpoint to add to the repository @type endpoint: L{communicationmanager.Endpoint} """ self._connection.query("""insert into """ + config.g4ds_comm_table_endpoints + """(id, memberid, communityid, protocolid, address, credentialid) values ('""" + endpoint.getId() + """', '""" + endpoint.getMemberId() + """', '""" + endpoint.getCommunityId() + """', '""" + endpoint.getProtocolId() + """', '""" + endpoint.getAddress() + """', '""" + endpoint.getCredentialId() + """')""") def findEndpoint(self, memberid, communityid, protocolname, algorithmname): """ There is a problem to find the correct endpoint instance for incoming message. This structure solves this problem. All the known information is put togehter, and voila, we have an endpoint id. @todo: The table names are hard coded. They must be replaced with the ones given in the config file. """ # this only works if all tables reside on the same host and database if config.g4ds_cudb_host != config.g4ds_comm_host or config.g4ds_comm_host != config.g4ds_sec_host or config.g4ds_cudb_host != config.g4ds_sec_host: return None if config.g4ds_cudb_dbname != config.g4ds_comm_dbname or config.g4ds_comm_dbname != config.g4ds_sec_dbname or config.g4ds_cudb_dbname != config.g4ds_sec_dbname: return None query = """select """ + config.g4ds_comm_table_endpoints + """.id from """ + config.g4ds_comm_table_endpoints + \ """, """ + config.g4ds_cudb_table_members + """, """ + config.g4ds_cudb_table_communities + """, """ + \ config.g4ds_comm_table_protocols + """, """ + config.g4ds_sec_table_credentials + """, """ + \ """algorithms where """ + config.g4ds_comm_table_endpoints + """.memberid = """ + config.g4ds_cudb_table_members + \ """.id and """ + config.g4ds_comm_table_endpoints + """.communityid = """ + config.g4ds_cudb_table_communities + """.id """ + \ """ and """ + config.g4ds_comm_table_endpoints + """.protocolid = """ + config.g4ds_comm_table_protocols + """.id and """ + \ config.g4ds_comm_table_endpoints + """.credentialid = """ + config.g4ds_sec_table_credentials + """.id and """ + \ config.g4ds_sec_table_credentials + """.algorithmid = algorithms.id and """ + config.g4ds_cudb_table_members + \ """.id = '""" + memberid + \ """' and """ + config.g4ds_comm_table_protocols + """.name = '""" + protocolname + """' and """ + \ config.g4ds_cudb_table_communities + """.id = '""" + \ communityid + """' and algorithms.name = '""" + algorithmname + """'""" result = self._connection.query(query) list = result.getresult() if not len(list): from errorhandling import G4dsDependencyException raise G4dsDependencyException('Incoming message has no valid endpoint. Try install / update member description.') return list[0][0] def removeEndpoints(self, endpointid = None): """ Removes credentials from the database. Depending on which parameters are given. If no parameter is set, all data in the table will be dropped. """ st = """delete from """ + config.g4ds_comm_table_endpoints + """ where 1=1 """ if endpointid: st = st + """ and id = '""" + endpointid + """' """ self._connection.query(st)
gpl-3.0
open-craft/opencraft
instance/models/mixins/domain_names.py
1
11759
# -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015-2019 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Instance app model mixins - domain names """ import re from django.db import models from django.conf import settings from django.utils.text import slugify # Constants ################################################################### DOMAIN_PREFIXES = [ 'studio', 'preview', 'discovery', 'ecommerce' ] # Functions ################################################################### def is_subdomain_contains_reserved_word(subdomain: str) -> bool: """ Check if the subdomain contains a reserved word. """ return subdomain.split('.')[0] in DOMAIN_PREFIXES def generate_internal_lms_domain(sub_domain): """ Generates value for internal_lms_domain field from the supplied sub_domain and the DEFAULT_INSTANCE_BASE_DOMAIN setting. """ return '{}.{}'.format(sub_domain, settings.DEFAULT_INSTANCE_BASE_DOMAIN) # Classes ##################################################################### class DomainNameInstance(models.Model): """ Mixin stores and provides logic around the retrieval of domain names and domain name-based information. """ domain_hierarchy = [ "external", "internal", ] allowed_domain_attributes = { 'domain': 'lms', 'lms_preview_domain': 'lms_preview', 'studio_domain': 'studio', 'ecommerce_domain': 'ecommerce', 'discovery_domain': 'discovery', } nginx_domain_regex_attributes = { 'studio_domain_nginx_regex': 'studio', 'discovery_domain_nginx_regex': 'discovery', 'ecommerce_domain_nginx_regex': 'ecommerce', } domain_attr_template = '{domain_type}_{domain_key}_domain' # Internal domains are controlled by us and their DNS records are automatically set to point to the current active # appserver. They are generated from a unique prefix (given as 'sub_domain' in instance factories) and the value of # DEFAULT_INSTANCE_BASE_DOMAIN at instance creation time. They cannot be blank and are normally never changed after # the instance is created. # External domains on the other hand are controlled by the customer and are optional. We use external domains in # preference to internal domains when displaying links to the instance in the UI and when passing domain-related # settings to Ansible vars when provisioning appservers. # The `domain`, `lms_preview_domain`, and `studio_domain` properties below are useful if you need to access # corresponding domains regardless of whether an instance uses external domains or not (they return the external # domain if set, and fall back to the corresponding internal domain otherwise). internal_lms_domain = models.CharField(max_length=100, blank=False, unique=True) internal_lms_preview_domain = models.CharField(max_length=100, blank=False, unique=True) internal_studio_domain = models.CharField(max_length=100, blank=False, unique=True) internal_discovery_domain = models.CharField(max_length=100, blank=False, unique=True) internal_ecommerce_domain = models.CharField(max_length=100, blank=False, unique=True) external_lms_domain = models.CharField(max_length=100, blank=True) external_lms_preview_domain = models.CharField(max_length=100, blank=True) external_studio_domain = models.CharField(max_length=100, blank=True) external_discovery_domain = models.CharField(max_length=100, blank=True) external_ecommerce_domain = models.CharField(max_length=100, blank=True) extra_custom_domains = models.TextField(default='', blank=True, help_text=( "Add custom domain names, one per line. Domain names must be sub domains of the main LMS domain." )) enable_prefix_domains_redirect = models.BooleanField(default=False) class Meta: abstract = True def __init__(self, *args, **kwargs): """ OpenEdXInstance constructor. The constructor is overridden to optionally accept a 'sub_domain' parameter instead of a full value for 'internal_lms_domain'. When 'sub_domain' is provided, the 'internal_lms_domain' field is automatically generated from from the value of 'sub_domain' and the DEFAULT_INSTANCE_BASE_DOMAIN setting. """ if 'sub_domain' in kwargs: sub_domain = kwargs.pop('sub_domain') if 'internal_lms_domain' not in kwargs: kwargs['internal_lms_domain'] = generate_internal_lms_domain(sub_domain) super().__init__(*args, **kwargs) def __getattr__(self, domain_attr): """ Catches missing attribute calls, checks if they're lookups for supported domain names, and if so, find and return the correct domain name, choosing between internal domain and external domain (if that variable has been set for this instance). """ domain_key = self.allowed_domain_attributes.get(domain_attr) nginx_regex_key = self.nginx_domain_regex_attributes.get(domain_attr) if domain_key is not None: return self.get_domain(domain_key) if nginx_regex_key is not None: return self.domain_nginx_regex(nginx_regex_key) return super().__getattribute__(domain_attr) def get_domain(self, domain_key): """ Returns the external domain for the given site if present; otherwise, falls back to the internal domain. """ for domain_type in self.domain_hierarchy: domain_attr = self.domain_attr_template.format( domain_type=domain_type, domain_key=domain_key ) domain_name = getattr(self, domain_attr, None) if domain_name: return domain_name return None def domain_nginx_regex(self, site_name): """ Regex that matches either the internal or external URL for the site. This is meant exclusively for filling in the server_name regex in nginx configs. """ domains = [] for domain_type in self.domain_hierarchy: domain = getattr( self, self.domain_attr_template.format( domain_type=domain_type, domain_key=site_name ), None, ) if domain: domains.append(domain) choices = '|'.join([re.escape(x) for x in domains]) return '^({})$'.format(choices) def get_prefix_domain_names(self): """ Return an iterable of domain names using prefixes for Studio, Preview, Discovery, E-Commerce """ return ['{}-{}'.format(prefix, self.internal_lms_domain) for prefix in DOMAIN_PREFIXES] def get_load_balanced_domains(self): """ Return an iterable of domains that should be handled by the load balancer. """ domain_names = [ self.external_lms_domain, self.external_lms_preview_domain, self.external_studio_domain, self.external_discovery_domain, self.external_ecommerce_domain, self.internal_lms_domain, self.internal_lms_preview_domain, self.internal_studio_domain, self.internal_discovery_domain, self.internal_ecommerce_domain, ] + self.extra_custom_domains.splitlines() return [name for name in domain_names if name] def get_managed_domains(self): """ Return an iterable of domains that we manage DNS entries for. """ managed_domains = [ self.internal_lms_domain, self.internal_lms_preview_domain, self.internal_studio_domain, self.internal_discovery_domain, self.internal_ecommerce_domain, ] if self.enable_prefix_domains_redirect: managed_domains += self.get_prefix_domain_names() # Filter out external custom domains here, because we can only manage # DNS entries for internal domains. managed_domains += [ domain for domain in self.extra_custom_domains.splitlines() if domain.endswith(self.internal_lms_domain) ] return [name for name in managed_domains if name] @property def domain_slug(self): """ Returns a slug-friendly name for this instance, using the domain name. """ prefix = ('edxins-' + slugify(self.domain))[:20] return "{prefix}-{num}".format(prefix=prefix, num=self.id) @property def url(self): """ LMS URL. """ return u'https://{}/'.format(self.domain) @property def studio_url(self): """ Studio URL. """ return u'https://{}/'.format(self.studio_domain) @property def lms_preview_url(self): """ LMS preview URL. """ return u'https://{}/'.format(self.lms_preview_domain) @property def lms_extended_heartbeat_url(self): """ LMS extended heartbeat URL. """ return u'{}heartbeat?extended'.format(self.url) def save(self, **kwargs): # pylint: disable=arguments-differ, too-many-branches, useless-suppression """ Set default values before saving the instance. """ # Set default field values from settings - using the `default` field attribute confuses # automatically generated migrations, generating a new one when settings don't match if not self.internal_lms_preview_domain: self.internal_lms_preview_domain = settings.DEFAULT_LMS_PREVIEW_DOMAIN_PREFIX + self.internal_lms_domain if not self.internal_studio_domain: self.internal_studio_domain = settings.DEFAULT_STUDIO_DOMAIN_PREFIX + self.internal_lms_domain if not self.internal_discovery_domain: self.internal_discovery_domain = settings.DEFAULT_DISCOVERY_DOMAIN_PREFIX + self.internal_lms_domain if not self.internal_ecommerce_domain: self.internal_ecommerce_domain = settings.DEFAULT_ECOMMERCE_DOMAIN_PREFIX + self.internal_lms_domain # Save for external domain, but only when present if self.external_lms_domain: if not self.external_lms_preview_domain: self.external_lms_preview_domain = settings.DEFAULT_LMS_PREVIEW_DOMAIN_PREFIX + self.external_lms_domain if not self.external_studio_domain: self.external_studio_domain = settings.DEFAULT_STUDIO_DOMAIN_PREFIX + self.external_lms_domain if not self.external_discovery_domain: self.external_discovery_domain = settings.DEFAULT_DISCOVERY_DOMAIN_PREFIX + self.external_lms_domain if not self.external_ecommerce_domain: self.external_ecommerce_domain = settings.DEFAULT_ECOMMERCE_DOMAIN_PREFIX + self.external_lms_domain super().save(**kwargs)
agpl-3.0
xaviercobain88/framework-python
build/lib.linux-i686-2.7/openerp/addons/crm_claim/__init__.py
53
1096
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import crm_claim import report import res_config # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
s-kanev/runstats
stats.py
1
7044
#!/bin/python import argparse from lxml import etree from datetime import datetime import numpy as np import os import plotting ######################################################################## class TrackPoint: def __init__(self): self.time = datetime.min self.lat = -1 self.lon = -1 self.alt = -1 self.cum_dist = -1 ######################################################################## class Lap: def __init__(self): self.dist = -1 self.time = -1 self.start_time = datetime.min self.points = [] def SetTotals(self, dist, time): self.dist = dist self.time = time def SetStart(self, start): self.start_time = start def AddPoint(self, tp): self.points.append(tp) def __str__(self): return ("%f %f" % (self.dist, self.time)) def __repr__(self): return ("%f %f" % (self.dist, self.time)) ######################################################################## class Workout: def __init__(self): self.laps = [] self.name = "<None>" def AddLap(self, lap_element): self.laps.append(ParseLap(lap_element)) def GetNumPoints(self): total = 0 for lap in self.laps: total += len(lap.points) return total def GetAltArray(self): n_items = self.GetNumPoints() alt = np.zeros(n_items) i = 0 for lap in self.laps: for tp in lap.points: alt[i] = tp.alt i += 1 return alt def GetDistArray(self): n_items = self.GetNumPoints() dist = np.zeros(n_items) i = 0 for lap in self.laps: for tp in lap.points: dist[i] = tp.cum_dist i += 1 return dist def GetTotalDist(self): dist = 0.0 for lap in self.laps: dist += lap.dist return dist def GetTotalTime(self): time = 0.0 for lap in self.laps: time += lap.time return time def GetStartTime(self): return self.laps[0].start_time def GetPace(self): res = [] for lap in self.laps: if lap.dist == 0.0: continue pace = lap.time / (lap.dist / 1000.0) # s / km res.append(pace) return res def GetLapStarts(self): res = [] start = 0.0 for lap in self.laps: if lap.dist == 0.0: continue res.append(start) start += lap.dist / 1000.0 return res def GetLapEnds(self): res = [] end = 0.0 for lap in self.laps: if lap.dist == 0.0: continue end += lap.dist / 1000.0 res.append(end) return res ######################################################################## def ParseLapStartTime(start_time): TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" TIME_FORMAT_NO_MS = "%Y-%m-%dT%H:%M:%SZ" try: res = datetime.strptime(start_time, TIME_FORMAT) except ValueError: res = datetime.strptime(start_time, TIME_FORMAT_NO_MS) return res ######################################################################## def ParseLap(lap_element): # Get lap totals -- time and distance dist = lap_element.find('./{*}DistanceMeters') dist_val = float(dist.text) time = lap_element.find('./{*}TotalTimeSeconds') time_val = float(time.text) new_lap = Lap() new_lap.SetTotals(dist_val, time_val) start_time = lap_element.get("StartTime") new_lap.SetStart(ParseLapStartTime(start_time)) track = lap_element.find("./{*}Track") if track is None: # No trackpoints, just return return new_lap # Get GPS trackpoints for trackpoint in track.findall("./{*}Trackpoint"): new_tp = TrackPoint() try: new_tp.alt = float(trackpoint.find("./{*}AltitudeMeters").text) new_tp.cum_dist = float(trackpoint.find("./{*}DistanceMeters").text) except AttributeError: # Some trackpoints only have a timestamp continue # XXX: proper corner cases if new_tp.alt != -1 and new_tp.cum_dist != -1: new_lap.AddPoint(new_tp) return new_lap ######################################################################## def ParseDoc(doc_name): try: docroot = etree.parse(doc_name).getroot() except: return None workout = Workout() name = docroot.find('./{*}Activities/{*}Activity/{*}Name') if name is not None: workout.name = name.text laps = docroot.findall('./{*}Activities/{*}Activity/{*}Lap') for lap_element in laps: workout.AddLap(lap_element) return workout ######################################################################## if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate fancy running plots.") parser.add_argument("--fname") parser.add_argument("--path", default="./data", help="Path to database of runs (default ./data)") parser.add_argument("--start", default=None, help="Parse runs after date Y-m-d") parser.add_argument("--export", default=False, action="store_true", help="Export plots") args = parser.parse_args() fname = args.fname path = args.path if args.start: ARGDATE_FORMAT = "%Y-%m-%d" filter_start_time = datetime.strptime(args.start, ARGDATE_FORMAT) else: filter_start_time = None export_figs = args.export if fname != None: fnames = fname.split(",") print fnames workouts = [] for f in fnames: workouts.append(ParseDoc(f)) plotting.PlotPace(workouts) # plotting.PlotAlt(workout) if path != None: workouts = [] for f in os.listdir(path): if os.path.splitext(f)[1] != ".tcx": continue workout = ParseDoc(os.path.join(path, f)) if workout == None: print "Ignoring workout %s" % f continue if filter_start_time and workout.GetStartTime() < filter_start_time: continue workouts.append(workout) if export_figs: pace_dist_name = "pace_distance.png" pace_hist_name = "pace_hist.png" monthly_name = "monthly.png" yearly_name = "yearly.png" races_name = "races.png" else: pace_dist_name = None pace_hist_name = None monthly_name = None yearly_name = None races_name = None plotting.PlotPaceVsDistance(workouts, pace_dist_name) plotting.PlotDistanceAtPace(workouts, pace_hist_name) plotting.PlotMonthlyDist(workouts, monthly_name) plotting.PlotYearlyCumulative(workouts, yearly_name) plotting.PlotRaces(workouts, races_name)
mit
tundebabzy/frappe
frappe/config/website.py
18
2113
from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "label": _("Web Site"), "icon": "fa fa-star", "items": [ { "type": "doctype", "name": "Web Page", "description": _("Content web page."), }, { "type": "doctype", "name": "Web Form", "description": _("User editable form on Website."), }, { "type": "doctype", "name": "Website Sidebar", }, { "type": "doctype", "name": "Website Slideshow", "description": _("Embed image slideshows in website pages."), }, ] }, { "label": _("Blog"), "items": [ { "type": "doctype", "name": "Blog Post", "description": _("Single Post (article)."), }, { "type": "doctype", "name": "Blog Settings", "description": _("Write titles and introductions to your blog."), }, { "type": "doctype", "name": "Blog Category", "description": _("Categorize blog posts."), }, ] }, { "label": _("Setup"), "icon": "fa fa-cog", "items": [ { "type": "doctype", "name": "Website Settings", "description": _("Setup of top navigation bar, footer and logo."), }, { "type": "doctype", "name": "Website Theme", "description": _("List of themes for Website."), }, { "type": "doctype", "name": "Website Script", "description": _("Javascript to append to the head section of the page."), }, { "type": "doctype", "name": "About Us Settings", "description": _("Settings for About Us Page."), }, { "type": "doctype", "name": "Contact Us Settings", "description": _("Settings for Contact Us Page."), }, ] }, { "label": _("Portal"), "items": [ { "type": "doctype", "name": "Portal Settings", "label": _("Portal Settings"), } ] }, { "label": _("Knowledge Base"), "items": [ { "type": "doctype", "name": "Help Category", }, { "type": "doctype", "name": "Help Article", }, ] }, ]
mit
mcrowson/django
django/dispatch/weakref_backports.py
414
2151
""" weakref_backports is a partial backport of the weakref module for python versions below 3.4. Copyright (C) 2013 Python Software Foundation, see license.python.txt for details. The following changes were made to the original sources during backporting: * Added `self` to `super` calls. * Removed `from None` when raising exceptions. """ from weakref import ref class WeakMethod(ref): """ A custom `weakref.ref` subclass which simulates a weak reference to a bound method, working around the lifetime problem of bound methods. """ __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" def __new__(cls, meth, callback=None): try: obj = meth.__self__ func = meth.__func__ except AttributeError: raise TypeError("argument should be a bound method, not {}" .format(type(meth))) def _cb(arg): # The self-weakref trick is needed to avoid creating a reference # cycle. self = self_wr() if self._alive: self._alive = False if callback is not None: callback(self) self = ref.__new__(cls, obj, _cb) self._func_ref = ref(func, _cb) self._meth_type = type(meth) self._alive = True self_wr = ref(self) return self def __call__(self): obj = super(WeakMethod, self).__call__() func = self._func_ref() if obj is None or func is None: return None return self._meth_type(func, obj) def __eq__(self, other): if isinstance(other, WeakMethod): if not self._alive or not other._alive: return self is other return ref.__eq__(self, other) and self._func_ref == other._func_ref return False def __ne__(self, other): if isinstance(other, WeakMethod): if not self._alive or not other._alive: return self is not other return ref.__ne__(self, other) or self._func_ref != other._func_ref return True __hash__ = ref.__hash__
bsd-3-clause
tetherless-world/satoru
whyis/interpreter.py
2
57286
import rdflib from datetime import datetime from nanopub import Nanopublication import logging import sys import pandas as pd import configparser import hashlib from .autonomic.update_change_service import UpdateChangeService from whyis.namespace import whyis, prov, sio class Interpreter(UpdateChangeService): kb = ":" cb_fn = None timeline_fn = None data_fn = None prefix_fn = "prefixes.txt" prefixes = {} studyRef = None unit_code_list = [] unit_uri_list = [] unit_label_list = [] explicit_entry_list = [] virtual_entry_list = [] explicit_entry_tuples = [] virtual_entry_tuples = [] cb_tuple = {} timeline_tuple = {} config = configparser.ConfigParser() def __init__(self, config_fn=None): # prefixes should be if config_fn is not None: try: self.config.read(config_fn) except Exception as e: logging.exception("Error: Unable to open configuration file: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) if self.config.has_option('Prefixes', 'prefixes'): self.prefix_fn = self.config.get('Prefixes', 'prefixes') # prefix_file = open(self.prefix_fn,"r") # self.prefixes = prefix_file.readlines() prefix_file = pd.read_csv(self.prefix_fn, dtype=object) try: for row in prefix_file.itertuples(): self.prefixes[row.prefix] = row.url except Exception as e: logging.exception("Error: Something went wrong when trying to read the Prefix File: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) if self.config.has_option('Prefixes', 'base_uri'): self.kb = self.config.get('Prefixes', 'base_uri') if self.config.has_option('Source Files', 'dictionary'): dm_fn = self.config.get('Source Files', 'dictionary') try: dm_file = pd.read_csv(dm_fn, dtype=object) try: # Populate virtual and explicit entry lists for row in dm_file.itertuples(): if pd.isnull(row.Column): logging.exception("Error: The SDD must have a column named 'Column'") sys.exit(1) if row.Column.startswith("??"): self.virtual_entry_list.append(row) else: self.explicit_entry_list.append(row) except Exception as e: logging.exception( "Error: Something went wrong when trying to read the Dictionary Mapping File: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) except Exception as e: logging.exception("Error: The specified Dictionary Mapping file does not exist: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) if self.config.has_option('Source Files', 'codebook'): self.cb_fn = self.config.get('Source Files', 'codebook') if self.cb_fn is not None: try: cb_file = pd.read_csv(self.cb_fn, dtype=object) try: inner_tuple_list = [] for row in cb_file.itertuples(): if (pd.notnull(row.Column) and row.Column not in self.cb_tuple): inner_tuple_list = [] inner_tuple = {} inner_tuple["Code"] = row.Code if pd.notnull(row.Label): inner_tuple["Label"] = row.Label if pd.notnull(row.Class): inner_tuple["Class"] = row.Class if "Resource" in row and pd.notnull(row.Resource): inner_tuple["Resource"] = row.Resource inner_tuple_list.append(inner_tuple) self.cb_tuple[row.Column] = inner_tuple_list except Exception as e: logging.warning("Warning: Unable to process Codebook file: ") if hasattr(e, 'message'): logging.warning(e.message) else: logging.warning(e) except Exception as e: logging.exception("Error: The specified Codebook file does not exist: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) if self.config.has_option('Source Files', 'timeline'): self.timeline_fn = self.config.get('Source Files', 'timeline') if self.timeline_fn is not None: try: timeline_file = pd.read_csv(self.timeline_fn, dtype=object) try: inner_tuple_list = [] for row in timeline_file.itertuples(): if pd.notnull(row.Name) and row.Name not in self.timeline_tuple: inner_tuple_list = [] inner_tuple = {} inner_tuple["Type"] = row.Type if pd.notnull(row.Label): inner_tuple["Label"] = row.Label if pd.notnull(row.Start): inner_tuple["Start"] = row.Start if pd.notnull(row.End): inner_tuple["End"] = row.End if pd.notnull(row.Unit): inner_tuple["Unit"] = row.Unit if pd.notnull(row.inRelationTo): inner_tuple["inRelationTo"] = row.inRelationTo inner_tuple_list.append(inner_tuple) self.timeline_tuple[row.Name] = inner_tuple_list except Exception as e: logging.warning("Warning: Unable to process Timeline file: ") if hasattr(e, 'message'): logging.warning(e.message) else: logging.warning(e) except Exception as e: logging.exception("Error: The specified Timeline file does not exist: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) if self.config.has_option('Source Files', 'code_mappings'): cmap_fn = self.config.get('Source Files', 'code_mappings') code_mappings_reader = pd.read_csv(cmap_fn) for code_row in code_mappings_reader.itertuples(): if pd.notnull(code_row.code): self.unit_code_list.append(code_row.code) if pd.notnull(code_row.uri): self.unit_uri_list.append(code_row.uri) if pd.notnull(code_row.label): self.unit_label_list.append(code_row.label) if self.config.has_option('Source Files', 'data_file'): self.data_fn = self.config.get('Source Files', 'data_file') def getInputClass(self): return whyis.SemanticDataDictionary def getOutputClass(self): return whyis.SemanticDataDictionaryInterpretation def get_query(self): return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n''' def process(self, i, o): print("Processing SDD...") self.app.db.store.nsBindings = {} npub = Nanopublication(store=o.graph.store) # prefixes={} # prefixes.update(self.prefixes) # prefixes.update(self.app.NS.prefixes) self.writeVirtualEntryNano(npub) self.writeExplicitEntryNano(npub) self.interpretData(npub) def parseString(self, input_string, delim): my_list = input_string.split(delim) my_list = [element.strip() for element in my_list] return my_list def rdflibConverter(self, input_word): if "http" in input_word: return rdflib.term.URIRef(input_word) if ':' in input_word: word_list = input_word.split(":") term = self.prefixes[word_list[0]] + word_list[1] return rdflib.term.URIRef(term) return rdflib.Literal(input_word, datatype=rdflib.XSD.string) def codeMapper(self, input_word): unitVal = input_word for unit_label in self.unit_label_list: if unit_label == input_word: unit_index = self.unit_label_list.index(unit_label) unitVal = self.unit_uri_list[unit_index] for unit_code in self.unit_code_list: if unit_code == input_word: unit_index = self.unit_code_list.index(unit_code) unitVal = self.unit_uri_list[unit_index] return unitVal def convertVirtualToKGEntry(self, *args): if args[0][:2] == "??": if self.studyRef is not None: if args[0] == self.studyRef: return self.prefixes[self.kb] + args[0][2:] if len(args) == 2: return self.prefixes[self.kb] + args[0][2:] + "-" + args[1] return self.prefixes[self.kb] + args[0][2:] if ':' not in args[0]: # Check for entry in column list for item in self.explicit_entry_list: if args[0] == item.Column: if len(args) == 2: return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace( ")", "").replace("/", "-").replace("\\", "-") + "-" + args[1] return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace( ")", "").replace("/", "-").replace("\\", "-") return '"' + args[0] + "\"^^xsd:string" return args[0] def checkVirtual(self, input_word): try: if input_word[:2] == "??": return True return False except Exception as e: logging.exception("Something went wrong in Interpreter.checkVirtual(): ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) def isfloat(self, value): try: float(value) return True except ValueError: return False def writeVirtualEntryNano(self, nanopub): for item in self.virtual_entry_list: virtual_tuple = {} term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:])) nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class)) nanopub.assertion.add( (term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string))) # Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual) if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)): if ',' in item.Entity: entities = self.parseString(item.Entity, ',') for entity in entities: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity)))) else: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity)))) virtual_tuple["Column"] = item.Column virtual_tuple["Entity"] = self.codeMapper(item.Entity) if virtual_tuple["Entity"] == "hasco:Study": self.studyRef = item.Column virtual_tuple["Study"] = item.Column elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)): if ',' in item.Attribute: attributes = self.parseString(item.Attribute, ',') for attribute in attributes: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute)))) else: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute)))) virtual_tuple["Column"] = item.Column virtual_tuple["Attribute"] = self.codeMapper(item.Attribute) else: logging.warning( "Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.") virtual_tuple["Column"] = item.Column # If there is a value in the inRelationTo column ... if pd.notnull(item.inRelationTo): virtual_tuple["inRelationTo"] = item.inRelationTo # If there is a value in the Relation column but not the Role column ... if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)): nanopub.assertion.add((term, self.rdflibConverter(item.Relation), self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) virtual_tuple["Relation"] = item.Relation # If there is a value in the Role column but not the Relation column ... elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)): role = rdflib.BNode() nanopub.assertion.add( (role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role)))) nanopub.assertion.add( (role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) nanopub.assertion.add((term, sio.hasRole, role)) virtual_tuple["Role"] = item.Role # If there is a value in the Role and Relation columns ... elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)): virtual_tuple["Relation"] = item.Relation virtual_tuple["Role"] = item.Role nanopub.assertion.add( (term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role)))) nanopub.assertion.add((term, self.rdflibConverter(item.Relation), self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal( "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month, datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format( datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime))) if pd.notnull(item.wasDerivedFrom): if ',' in item.wasDerivedFrom: derivedFromTerms = self.parseString(item.wasDerivedFrom, ',') for derivedFromTerm in derivedFromTerms: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm)))) else: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom)))) virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom if pd.notnull(item.wasGeneratedBy): if ',' in item.wasGeneratedBy: generatedByTerms = self.parseString(item.wasGeneratedBy, ',') for generatedByTerm in generatedByTerms: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm)))) else: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy)))) virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy self.virtual_entry_tuples.append(virtual_tuple) if self.timeline_fn is not None: for key in self.timeline_tuple: tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key)) nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class)) for timeEntry in self.timeline_tuple[key]: if 'Type' in timeEntry: nanopub.assertion.add( (tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type']))) if 'Label' in timeEntry: nanopub.assertion.add((tl_term, rdflib.RDFS.label, rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string))) if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']: nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start'])))) if 'Start' in timeEntry: start_time = rdflib.BNode() nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start'])))) nanopub.assertion.add((tl_term, sio.hasStartTime, start_time)) if 'End' in timeEntry: end_time = rdflib.BNode() nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End'])))) nanopub.assertion.add((tl_term, sio.hasEndTime, end_time)) if 'Unit' in timeEntry: nanopub.assertion.add( (tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit'])))) if 'inRelationTo' in timeEntry: nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(timeEntry['inRelationTo'])))) nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal( "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month, datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format( datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime))) def writeExplicitEntryNano(self, nanopub): for item in self.explicit_entry_list: explicit_entry_tuple = {} term = rdflib.term.URIRef(self.prefixes[self.kb] + str( item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/", "-").replace( "\\", "-"))) nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class)) if pd.notnull(item.Attribute): if ',' in item.Attribute: attributes = self.parseString(item.Attribute, ',') for attribute in attributes: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute)))) else: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute)))) explicit_entry_tuple["Column"] = item.Column explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute) elif pd.notnull(item.Entity): if ',' in item.Entity: entities = self.parseString(item.Entity, ',') for entity in entities: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity)))) else: nanopub.assertion.add( (term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity)))) explicit_entry_tuple["Column"] = item.Column explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity) else: nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute)) explicit_entry_tuple["Column"] = item.Column explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute") logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.") if pd.notnull(item.attributeOf): nanopub.assertion.add( (term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf)))) explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf) else: logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.") if pd.notnull(item.Unit): nanopub.assertion.add( (term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit))))) explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit)) if pd.notnull(item.Time): nanopub.assertion.add( (term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time)))) explicit_entry_tuple["Time"] = item.Time if pd.notnull(item.inRelationTo): explicit_entry_tuple["inRelationTo"] = item.inRelationTo # If there is a value in the Relation column but not the Role column ... if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)): nanopub.assertion.add((term, self.rdflibConverter(item.Relation), self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) explicit_entry_tuple["Relation"] = item.Relation # If there is a value in the Role column but not the Relation column ... elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)): role = rdflib.BNode() nanopub.assertion.add( (role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role)))) nanopub.assertion.add( (role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) nanopub.assertion.add((term, sio.hasRole, role)) explicit_entry_tuple["Role"] = item.Role # If there is a value in the Role and Relation columns ... elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)): nanopub.assertion.add( (term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role)))) nanopub.assertion.add((term, self.rdflibConverter(item.Relation), self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo)))) explicit_entry_tuple["Relation"] = item.Relation explicit_entry_tuple["Role"] = item.Role if ("Label" in item and pd.notnull(item.Label)): nanopub.assertion.add((term, rdflib.RDFS.label, self.rdflibConverter(item.Label))) explicit_entry_tuple["Label"] = item.Label if ("Comment" in item and pd.notnull(item.Comment)): nanopub.assertion.add((term, rdflib.RDFS.comment, self.rdflibConverter(item.Comment))) explicit_entry_tuple["Comment"] = item.Comment nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal( "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month, datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format( datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime))) if pd.notnull(item.wasDerivedFrom): if ',' in item.wasDerivedFrom: derivedFromTerms = self.parseString(item.wasDerivedFrom, ',') for derivedFromTerm in derivedFromTerms: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm)))) else: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom)))) explicit_entry_tuple["wasDerivedFrom"] = item.wasDerivedFrom if pd.notnull(item.wasGeneratedBy): if ',' in item.wasGeneratedBy: generatedByTerms = self.parseString(item.wasGeneratedBy, ',') for generatedByTerm in generatedByTerms: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm)))) else: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy)))) explicit_entry_tuple["wasGeneratedBy"] = item.wasGeneratedBy self.explicit_entry_tuples.append(explicit_entry_tuple) def writeVirtualEntry(self, nanopub, vref_list, v_column, index): term = self.rdflibConverter(self.convertVirtualToKGEntry(v_column, index)) try: if self.timeline_fn is not None: if v_column in self.timeline_tuple: nanopub.assertion.add( (term, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(v_column)))) for timeEntry in self.timeline_tuple[v_column]: if 'Type' in timeEntry: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(timeEntry['Type']))) if 'Label' in timeEntry: nanopub.assertion.add((term, rdflib.RDFS.label, rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string))) if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']: nanopub.assertion.add((term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start'])))) if 'Start' in timeEntry: start_time = rdflib.BNode() nanopub.assertion.add( (start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start'])))) nanopub.assertion.add((term, sio.hasStartTime, start_time)) if 'End' in timeEntry: end_time = rdflib.BNode() nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End'])))) nanopub.assertion.add((term, sio.hasEndTime, end_time)) if 'Unit' in timeEntry: nanopub.assertion.add( (term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit'])))) if 'inRelationTo' in timeEntry: nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(timeEntry['inRelationTo'])))) if self.checkVirtual(timeEntry['inRelationTo']) and timeEntry[ 'inRelationTo'] not in vref_list: vref_list.append(timeEntry['inRelationTo']) for v_tuple in self.virtual_entry_tuples: if v_tuple["Column"] == v_column: if "Study" in v_tuple: continue else: v_term = rdflib.term.URIRef(self.prefixes[self.kb] + str(v_tuple["Column"][2:]) + "-" + index) nanopub.assertion.add((v_term, rdflib.RDF.type, rdflib.term.URIRef(self.prefixes[self.kb] + str(v_tuple["Column"][2:])))) if "Entity" in v_tuple: if ',' in v_tuple["Entity"]: entities = self.parseString(v_tuple["Entity"], ',') for entity in entities: nanopub.assertion.add( (term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(entity)))) else: nanopub.assertion.add( (term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(v_tuple["Entity"])))) if "Attribute" in v_tuple: if ',' in v_tuple["Attribute"]: attributes = self.parseString(v_tuple["Attribute"], ',') for attribute in attributes: nanopub.assertion.add( (term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(attribute)))) else: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(v_tuple["Attribute"])))) if "Subject" in v_tuple: nanopub.assertion.add((term, sio.hasIdentifier, rdflib.term.URIRef( self.prefixes[self.kb] + v_tuple["Subject"] + "-" + index))) if "inRelationTo" in v_tuple: if ("Role" in v_tuple) and ("Relation" not in v_tuple): role = rdflib.BNode() nanopub.assertion.add((role, rdflib.RDF.type, self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["Role"], index)))) nanopub.assertion.add((role, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["inRelationTo"], index)))) nanopub.assertion.add((term, sio.hasRole, role)) elif ("Role" not in v_tuple) and ("Relation" in v_tuple): nanopub.assertion.add((term, self.rdflibConverter(v_tuple["Relation"]), self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["inRelationTo"], index)))) elif ("Role" not in v_tuple) and ("Relation" not in v_tuple): nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["inRelationTo"], index)))) nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal( "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month, datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format( datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime))) if "wasGeneratedBy" in v_tuple: if ',' in v_tuple["wasGeneratedBy"]: generatedByTerms = self.parseString(v_tuple["wasGeneratedBy"], ',') for generatedByTerm in generatedByTerms: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter( self.convertVirtualToKGEntry(generatedByTerm, index)))) if self.checkVirtual(generatedByTerm) and generatedByTerm not in vref_list: vref_list.append(generatedByTerm) else: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["wasGeneratedBy"], index)))) if self.checkVirtual(v_tuple["wasGeneratedBy"]) and v_tuple[ "wasGeneratedBy"] not in vref_list: vref_list.append(v_tuple["wasGeneratedBy"]) if "wasDerivedFrom" in v_tuple: if ',' in v_tuple["wasDerivedFrom"]: derivedFromTerms = self.parseString(v_tuple["wasDerivedFrom"], ',') for derivedFromTerm in derivedFromTerms: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter( self.convertVirtualToKGEntry(derivedFromTerm, index)))) if self.checkVirtual(derivedFromTerm) and derivedFromTerm not in vref_list: vref_list.append(derivedFromTerm) else: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter( self.convertVirtualToKGEntry(v_tuple["wasDerivedFrom"], index)))) if self.checkVirtual(v_tuple["wasDerivedFrom"]) and v_tuple[ "wasDerivedFrom"] not in vref_list: vref_list.append(v_tuple["wasDerivedFrom"]) return vref_list except Exception as e: logging.warning("Warning: Unable to create virtual entry:") if hasattr(e, 'message'): logging.warning(e.message) else: logging.warning(e) def interpretData(self, nanopub): if self.data_fn is not None: try: data_file = pd.read_csv(self.data_fn, dtype=object) except Exception as e: logging.exception("Error: The specified Data file does not exist: ") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) sys.exit(1) try: col_headers = list(data_file.columns.values) try: for a_tuple in self.explicit_entry_tuples: if "Attribute" in a_tuple: if ((a_tuple["Attribute"] == "hasco:originalID") or (a_tuple["Attribute"] == "sio:Identifier")): if a_tuple["Column"] in col_headers: for v_tuple in self.virtual_entry_tuples: if "isAttributeOf" in a_tuple: if a_tuple["isAttributeOf"] == v_tuple["Column"]: v_tuple["Subject"] = a_tuple["Column"].replace(" ", "_").replace(",", "").replace( "(", "").replace(")", "").replace("/", "-").replace("\\", "-") except Exception as e: logging.exception("Error: Something went wrong when processing column headers:") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) for row in data_file.itertuples(): id_string = '' for element in row: id_string += str(element) identifierString = hashlib.md5(id_string).hexdigest() try: vref_list = [] for a_tuple in self.explicit_entry_tuples: if a_tuple["Column"] in col_headers: try: try: term = rdflib.term.URIRef(self.prefixes[self.kb] + str( a_tuple["Column"].replace(" ", "_").replace(",", "").replace("(", "").replace( ")", "").replace("/", "-").replace("\\", "-")) + "-" + identifierString) nanopub.assertion.add((term, rdflib.RDF.type, rdflib.term.URIRef( self.prefixes[self.kb] + str( a_tuple["Column"].replace(" ", "_").replace(",", "").replace("(", "").replace( ")", "").replace("/", "-").replace("\\", "-"))))) print(term) if "Attribute" in a_tuple: if ',' in a_tuple["Attribute"]: attributes = self.parseString(a_tuple["Attribute"], ',') for attribute in attributes: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter( self.codeMapper(attribute)))) else: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter( self.codeMapper(a_tuple["Attribute"])))) if "Entity" in a_tuple: if ',' in a_tuple["Entity"]: entities = self.parseString(a_tuple["Entity"], ',') for entity in entities: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(entity)))) else: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter( self.codeMapper(a_tuple["Entity"])))) if "isAttributeOf" in a_tuple: nanopub.assertion.add((term, sio.isAttributeOf, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["isAttributeOf"], identifierString)))) if self.checkVirtual(a_tuple["isAttributeOf"]): if a_tuple["isAttributeOf"] not in vref_list: vref_list.append(a_tuple["isAttributeOf"]) if "Unit" in a_tuple: nanopub.assertion.add( (term, sio.hasUnit, self.rdflibConverter(self.codeMapper(a_tuple["Unit"])))) if "Time" in a_tuple: nanopub.assertion.add((term, sio.existsAt, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["Time"], identifierString)))) if self.checkVirtual(a_tuple["Time"]): if a_tuple["Time"] not in vref_list: vref_list.append(a_tuple["Time"]) if "Label" in a_tuple: nanopub.assertion.add( (term, rdflib.RDFS.label, self.rdflibConverter(a_tuple["Label"]))) if "Comment" in a_tuple: nanopub.assertion.add( (term, rdflib.RDFS.comment, self.rdflibConverter(a_tuple["Comment"]))) if "inRelationTo" in a_tuple: if ("Role" in a_tuple) and ("Relation" not in a_tuple): role = rdflib.BNode() nanopub.assertion.add((role, rdflib.RDF.type, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["Role"], identifierString)))) nanopub.assertion.add((role, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["inRelationTo"], identifierString)))) nanopub.assertion.add((term, sio.hasRole, role)) elif ("Role" not in a_tuple) and ("Relation" in a_tuple): nanopub.assertion.add((term, self.rdflibConverter(a_tuple["Relation"]), self.rdflibConverter(self.convertVirtualToKGEntry( a_tuple["inRelationTo"], identifierString)))) elif ("Role" not in a_tuple) and ("Relation" not in a_tuple): nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["inRelationTo"], identifierString)))) except Exception as e: logging.exception("Error: something went wrong for initial assertions:") if hasattr(e, 'message'): print(e.message) else: print(e) sys.exit(1) try: if row[col_headers.index(a_tuple["Column"]) + 1] != "": if self.cb_fn is not None: if a_tuple["Column"] in self.cb_tuple: for tuple_row in self.cb_tuple[a_tuple["Column"]]: if ("Code" in tuple_row) and (str(tuple_row['Code']) == str( row[col_headers.index(a_tuple["Column"]) + 1])): if ("Class" in tuple_row) and (tuple_row['Class'] != ""): if ',' in tuple_row['Class']: classTerms = self.parseString(tuple_row['Class'], ',') for classTerm in classTerms: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter( self.codeMapper( classTerm)))) else: nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter( self.codeMapper( tuple_row['Class'])))) if ("Resource" in tuple_row) and (tuple_row['Resource'] != ""): if ',' in tuple_row['Resource']: resourceTerms = self.parseString(tuple_row['Resource'], ',') for resourceTerm in resourceTerms: nanopub.assertion.add((term, rdflib.OWL.sameAs, self.rdflibConverter( self.convertVirtualToKGEntry( self.codeMapper( resourceTerm))))) else: nanopub.assertion.add((term, rdflib.OWL.sameAs, self.rdflibConverter( self.convertVirtualToKGEntry( self.codeMapper( tuple_row[ 'Resource']))))) if ("Label" in tuple_row) and (tuple_row['Label'] != ""): nanopub.assertion.add((term, rdflib.RDFS.label, self.rdflibConverter( tuple_row["Label"]))) try: if str(row[col_headers.index(a_tuple["Column"]) + 1]) == "nan": pass elif str(row[col_headers.index(a_tuple["Column"]) + 1]).isdigit(): nanopub.assertion.add((term, sio.hasValue, rdflib.Literal( str(row[col_headers.index(a_tuple["Column"]) + 1]), datatype=rdflib.XSD.integer))) elif self.isfloat(str(row[col_headers.index(a_tuple["Column"]) + 1])): nanopub.assertion.add((term, sio.hasValue, rdflib.Literal( str(row[col_headers.index(a_tuple["Column"]) + 1]), datatype=rdflib.XSD.float))) else: nanopub.assertion.add((term, sio.hasValue, rdflib.Literal( str(row[col_headers.index(a_tuple["Column"]) + 1]), datatype=rdflib.XSD.string))) except Exception as e: logging.warning("Warning: Unable to add assertion: %s", row[col_headers.index(a_tuple["Column"]) + 1] + ":") if hasattr(e, 'message'): logging.warning(e.message) else: logging.warning(e) except Exception as e: logging.exception("Error: Something went wrong when asserting data value: %s", row[col_headers.index(a_tuple["Column"]) + 1] + ":") if hasattr(e, 'message'): logging.exception(e.message) else: logging.exception(e) try: nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal( "{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month, datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format( datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime))) if "wasDerivedFrom" in a_tuple: if ',' in a_tuple["wasDerivedFrom"]: derivedFromTerms = self.parseString(a_tuple["wasDerivedFrom"], ',') for derivedFromTerm in derivedFromTerms: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter( self.convertVirtualToKGEntry(derivedFromTerm, identifierString)))) if self.checkVirtual(derivedFromTerm): if derivedFromTerm not in vref_list: vref_list.append(derivedFromTerm) else: nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["wasDerivedFrom"], identifierString)))) if self.checkVirtual(a_tuple["wasDerivedFrom"]): if a_tuple["wasDerivedFrom"] not in vref_list: vref_list.append(a_tuple["wasDerivedFrom"]) if "wasGeneratedBy" in a_tuple: if ',' in a_tuple["wasGeneratedBy"]: generatedByTerms = self.parseString(a_tuple["wasGeneratedBy"], ',') for generatedByTerm in generatedByTerms: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter( self.convertVirtualToKGEntry(generatedByTerm, identifierString)))) if self.checkVirtual(generatedByTerm): if generatedByTerm not in vref_list: vref_list.append(generatedByTerm) else: nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter( self.convertVirtualToKGEntry(a_tuple["wasGeneratedBy"], identifierString)))) if self.checkVirtual(a_tuple["wasGeneratedBy"]): if a_tuple["wasGeneratedBy"] not in vref_list: vref_list.append(a_tuple["wasGeneratedBy"]) except Exception as e: logging.exception("Error: Something went wrong when adding provenance:") if hasattr(e, 'message'): print(e.message) else: print(e) except Exception as e: logging.warning("Warning: Unable to process tuple %s", a_tuple.__str__() + ":") if hasattr(e, 'message'): print(e.message) else: print(e) try: for vref in vref_list: vref_list = self.writeVirtualEntry(nanopub, vref_list, vref, identifierString) except Exception as e: logging.warning("Warning: Something went writing vref entries:") if hasattr(e, 'message'): print(e.message) else: print(e) except Exception as e: logging.exception("Error: Something went wrong when processing explicit tuples:") if hasattr(e, 'message'): print(e.message) else: print(e) sys.exit(1) except Exception as e: logging.warning("Warning: Unable to process Data file:") if hasattr(e, 'message'): print(e.message) else: print(e)
apache-2.0
javier-ruiz-b/docker-rasppi-images
raspberry-google-home/env/lib/python3.7/site-packages/pip/_vendor/packaging/requirements.py
11
4903
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import string import re from pip._vendor.pyparsing import stringStart, stringEnd, originalTextFor, ParseException from pip._vendor.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from pip._vendor.pyparsing import Literal as L # noqa from pip._vendor.six.moves.urllib import parse as urlparse from ._typing import TYPE_CHECKING from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet if TYPE_CHECKING: # pragma: no cover from typing import List class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ ALPHANUM = Word(string.ascii_letters + string.digits) LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() LPAREN = L("(").suppress() RPAREN = L(")").suppress() COMMA = L(",").suppress() SEMICOLON = L(";").suppress() AT = L("@").suppress() PUNCTUATION = Word("-_.") IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) NAME = IDENTIFIER("name") EXTRA = IDENTIFIER URI = Regex(r"[^ ]+")("url") URL = AT + URI EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine( VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False )("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( lambda s, l, t: Marker(s[t._original_start : t._original_end]) ) MARKER_SEPARATOR = SEMICOLON MARKER = MARKER_SEPARATOR + MARKER_EXPR VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd # pyparsing isn't thread safe during initialization, so we do it eagerly, see # issue #104 REQUIREMENT.parseString("x[]") class Requirement(object): """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): # type: (str) -> None try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( 'Parse error at "{0!r}": {1}'.format( requirement_string[e.loc : e.loc + 8], e.msg ) ) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if parsed_url.scheme == "file": if urlparse.urlunparse(parsed_url) != req.url: raise InvalidRequirement("Invalid URL given") elif not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc ): raise InvalidRequirement("Invalid URL: {0}".format(req.url)) self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None def __str__(self): # type: () -> str parts = [self.name] # type: List[str] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append("@ {0}".format(self.url)) if self.marker: parts.append(" ") if self.marker: parts.append("; {0}".format(self.marker)) return "".join(parts) def __repr__(self): # type: () -> str return "<Requirement({0!r})>".format(str(self))
apache-2.0
Chasego/noi
src/inverse_matrix.py
1
1973
'''Calculate the inverse of a matrix by different methods Reference: - The Inverse of a Matrix (NYU Class: Linear Algebra) http://www.math.nyu.edu/~neylon/linalgfall04/project1/jja/group7.htm - Wiki: Invertible Matrix https://en.wikipedia.org/wiki/Invertible_matrix ''' import numpy as np def inv_mat(A, method='gauss_jordan_elim'): assert len(A) > 0 assert len(A) == len(A[0]) methods = {'gauss_jordan_elim': gauss_jordan_elim, 'newton': newton} return methods[method](A) def gauss_jordan_elim(A, touch_up=False): ''' @param -- touch_up : boolean, optional(default=False) whether to "touch up" corrections for the Gauss-Jordan algorithm which has been contaminated by small errors due to imperfect computer arithmetic Reference: - Wiki: Gaussian Elimination https://en.wikipedia.org/wiki/Gaussian_elimination ''' n = len(A) I = np.eye(n) Aug = np.append(A, I, axis=1) for j in xrange(n): if not Aug[j][j]: msg = "It's invertible!!!" raise ZeroDivisionError(msg) for i in xrange(n): if i == j: continue Aug[i] -= Aug[j] * Aug[i][j] / Aug[j][j] Aug[j] /= Aug[j][j] B = Aug[:, n:] return newton(A, B) if touch_up else B def newton(A, X=None): ''' @param -- X : array-like or None (default=None) a starting seed / guess for the inversion of matrix A Description: 1/(ax_n+1) = [1/(ax_n) + 1/(2-aX_n)] / 2 ==> x_n+1 = x_n(2 - ax_n) The 1st equation aims at leading ax_n+1 to be 1 Reference: - Choosing Starting Values for Newton-Raphson Computation of Reciprocals, Square-Roots and Square-Root Reciprocals https://hal.archives-ouvertes.fr/inria-00071899/document ''' #ToDo
gpl-3.0
erickt/hue
desktop/core/ext-py/httplib2-0.8/python3/httplib2test.py
61
74045
#!/usr/bin/env python3 """ httplib2test A set of unit tests for httplib2.py. Requires Python 3.0 or later """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = ["Mark Pilgrim"] __license__ = "MIT" __history__ = """ """ __version__ = "0.2 ($Rev: 118 $)" import base64 import http.client import httplib2 import io import os import pickle import socket import ssl import sys import time import unittest import urllib.parse # The test resources base uri base = 'http://bitworking.org/projects/httplib2/test/' #base = 'http://localhost/projects/httplib2/test/' cacheDirName = ".cache" class CredentialsTest(unittest.TestCase): def test(self): c = httplib2.Credentials() c.add("joe", "password") self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0]) self.assertEqual(("joe", "password"), list(c.iter(""))[0]) c.add("fred", "password2", "wellformedweb.org") self.assertEqual(("joe", "password"), list(c.iter("bitworking.org"))[0]) self.assertEqual(1, len(list(c.iter("bitworking.org")))) self.assertEqual(2, len(list(c.iter("wellformedweb.org")))) self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org"))) c.clear() self.assertEqual(0, len(list(c.iter("bitworking.org")))) c.add("fred", "password2", "wellformedweb.org") self.assertTrue(("fred", "password2") in list(c.iter("wellformedweb.org"))) self.assertEqual(0, len(list(c.iter("bitworking.org")))) self.assertEqual(0, len(list(c.iter("")))) class ParserTest(unittest.TestCase): def testFromStd66(self): self.assertEqual( ('http', 'example.com', '', None, None ), httplib2.parse_uri("http://example.com")) self.assertEqual( ('https', 'example.com', '', None, None ), httplib2.parse_uri("https://example.com")) self.assertEqual( ('https', 'example.com:8080', '', None, None ), httplib2.parse_uri("https://example.com:8080")) self.assertEqual( ('http', 'example.com', '/', None, None ), httplib2.parse_uri("http://example.com/")) self.assertEqual( ('http', 'example.com', '/path', None, None ), httplib2.parse_uri("http://example.com/path")) self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', None ), httplib2.parse_uri("http://example.com/path?a=1&b=2")) self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred")) self.assertEqual( ('http', 'example.com', '/path', 'a=1&b=2', 'fred' ), httplib2.parse_uri("http://example.com/path?a=1&b=2#fred")) class UrlNormTest(unittest.TestCase): def test(self): self.assertEqual( "http://example.org/", httplib2.urlnorm("http://example.org")[-1]) self.assertEqual( "http://example.org/", httplib2.urlnorm("http://EXAMple.org")[-1]) self.assertEqual( "http://example.org/?=b", httplib2.urlnorm("http://EXAMple.org?=b")[-1]) self.assertEqual( "http://example.org/mypath?a=b", httplib2.urlnorm("http://EXAMple.org/mypath?a=b")[-1]) self.assertEqual( "http://localhost:80/", httplib2.urlnorm("http://localhost:80")[-1]) self.assertEqual( httplib2.urlnorm("http://localhost:80/"), httplib2.urlnorm("HTTP://LOCALHOST:80")) try: httplib2.urlnorm("/") self.fail("Non-absolute URIs should raise an exception") except httplib2.RelativeURIError: pass class UrlSafenameTest(unittest.TestCase): def test(self): # Test that different URIs end up generating different safe names self.assertEqual( "example.org,fred,a=b,58489f63a7a83c3b7794a6a398ee8b1f", httplib2.safename("http://example.org/fred/?a=b")) self.assertEqual( "example.org,fred,a=b,8c5946d56fec453071f43329ff0be46b", httplib2.safename("http://example.org/fred?/a=b")) self.assertEqual( "www.example.org,fred,a=b,499c44b8d844a011b67ea2c015116968", httplib2.safename("http://www.example.org/fred?/a=b")) self.assertEqual( httplib2.safename(httplib2.urlnorm("http://www")[-1]), httplib2.safename(httplib2.urlnorm("http://WWW")[-1])) self.assertEqual( "www.example.org,fred,a=b,692e843a333484ce0095b070497ab45d", httplib2.safename("https://www.example.org/fred?/a=b")) self.assertNotEqual( httplib2.safename("http://www"), httplib2.safename("https://www")) # Test the max length limits uri = "http://" + ("w" * 200) + ".org" uri2 = "http://" + ("w" * 201) + ".org" self.assertNotEqual( httplib2.safename(uri2), httplib2.safename(uri)) # Max length should be 200 + 1 (",") + 32 self.assertEqual(233, len(httplib2.safename(uri2))) self.assertEqual(233, len(httplib2.safename(uri))) # Unicode if sys.version_info >= (2,3): self.assertEqual( "xn--http,-4y1d.org,fred,a=b,579924c35db315e5a32e3d9963388193", httplib2.safename("http://\u2304.org/fred/?a=b")) class _MyResponse(io.BytesIO): def __init__(self, body, **kwargs): io.BytesIO.__init__(self, body) self.headers = kwargs def items(self): return self.headers.items() def iteritems(self): return iter(self.headers.items()) class _MyHTTPConnection(object): "This class is just a mock of httplib.HTTPConnection used for testing" def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None): self.host = host self.port = port self.timeout = timeout self.log = "" self.sock = None def set_debuglevel(self, level): pass def connect(self): "Connect to a host on a given port." pass def close(self): pass def request(self, method, request_uri, body, headers): pass def getresponse(self): return _MyResponse(b"the body", status="200") class HttpTest(unittest.TestCase): def setUp(self): if os.path.exists(cacheDirName): [os.remove(os.path.join(cacheDirName, file)) for file in os.listdir(cacheDirName)] self.http = httplib2.Http(cacheDirName) self.http.clear_credentials() def testIPv6NoSSL(self): try: self.http.request("http://[::1]/") except socket.gaierror: self.fail("should get the address family right for IPv6") except socket.error: # Even if IPv6 isn't installed on a machine it should just raise socket.error pass def testIPv6SSL(self): try: self.http.request("https://[::1]/") except socket.gaierror: self.fail("should get the address family right for IPv6") except socket.error: # Even if IPv6 isn't installed on a machine it should just raise socket.error pass def testConnectionType(self): self.http.force_exception_to_status_code = False response, content = self.http.request("http://bitworking.org", connection_type=_MyHTTPConnection) self.assertEqual(response['content-location'], "http://bitworking.org") self.assertEqual(content, b"the body") def testGetUnknownServer(self): self.http.force_exception_to_status_code = False try: self.http.request("http://fred.bitworking.org/") self.fail("An httplib2.ServerNotFoundError Exception must be thrown on an unresolvable server.") except httplib2.ServerNotFoundError: pass # Now test with exceptions turned off self.http.force_exception_to_status_code = True (response, content) = self.http.request("http://fred.bitworking.org/") self.assertEqual(response['content-type'], 'text/plain') self.assertTrue(content.startswith(b"Unable to find")) self.assertEqual(response.status, 400) def testGetConnectionRefused(self): self.http.force_exception_to_status_code = False try: self.http.request("http://localhost:7777/") self.fail("An socket.error exception must be thrown on Connection Refused.") except socket.error: pass # Now test with exceptions turned off self.http.force_exception_to_status_code = True (response, content) = self.http.request("http://localhost:7777/") self.assertEqual(response['content-type'], 'text/plain') self.assertTrue(b"Connection refused" in content) self.assertEqual(response.status, 400) def testGetIRI(self): if sys.version_info >= (2,3): uri = urllib.parse.urljoin(base, "reflector/reflector.cgi?d=\N{CYRILLIC CAPITAL LETTER DJE}") (response, content) = self.http.request(uri, "GET") d = self.reflector(content) self.assertTrue('QUERY_STRING' in d) self.assertTrue(d['QUERY_STRING'].find('%D0%82') > 0) def testGetIsDefaultMethod(self): # Test that GET is the default method uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi") (response, content) = self.http.request(uri) self.assertEqual(response['x-method'], "GET") def testDifferentMethods(self): # Test that all methods can be used uri = urllib.parse.urljoin(base, "methods/method_reflector.cgi") for method in ["GET", "PUT", "DELETE", "POST"]: (response, content) = self.http.request(uri, method, body=b" ") self.assertEqual(response['x-method'], method) def testHeadRead(self): # Test that we don't try to read the response of a HEAD request # since httplib blocks response.read() for HEAD requests. # Oddly enough this doesn't appear as a problem when doing HEAD requests # against Apache servers. uri = "http://www.google.com/" (response, content) = self.http.request(uri, "HEAD") self.assertEqual(response.status, 200) self.assertEqual(content, b"") def testGetNoCache(self): # Test that can do a GET w/o the cache turned on. http = httplib2.Http() uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.previous, None) def testGetOnlyIfCachedCacheHit(self): # Test that can do a GET with cache and 'only-if-cached' uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'}) self.assertEqual(response.fromcache, True) self.assertEqual(response.status, 200) def testGetOnlyIfCachedCacheMiss(self): # Test that can do a GET with no cache with 'only-if-cached' uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET", headers={'cache-control': 'only-if-cached'}) self.assertEqual(response.fromcache, False) self.assertEqual(response.status, 504) def testGetOnlyIfCachedNoCacheAtAll(self): # Test that can do a GET with no cache with 'only-if-cached' # Of course, there might be an intermediary beyond us # that responds to the 'only-if-cached', so this # test can't really be guaranteed to pass. http = httplib2.Http() uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = http.request(uri, "GET", headers={'cache-control': 'only-if-cached'}) self.assertEqual(response.fromcache, False) self.assertEqual(response.status, 504) def testUserAgent(self): # Test that we provide a default user-agent uri = urllib.parse.urljoin(base, "user-agent/test.cgi") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertTrue(content.startswith(b"Python-httplib2/")) def testUserAgentNonDefault(self): # Test that the default user-agent can be over-ridden uri = urllib.parse.urljoin(base, "user-agent/test.cgi") (response, content) = self.http.request(uri, "GET", headers={'User-Agent': 'fred/1.0'}) self.assertEqual(response.status, 200) self.assertTrue(content.startswith(b"fred/1.0")) def testGet300WithLocation(self): # Test the we automatically follow 300 redirects if a Location: header is provided uri = urllib.parse.urljoin(base, "300/with-location-header.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 300) self.assertEqual(response.previous.fromcache, False) # Confirm that the intermediate 300 is not cached (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 300) self.assertEqual(response.previous.fromcache, False) def testGet300WithLocationNoRedirect(self): # Test the we automatically follow 300 redirects if a Location: header is provided self.http.follow_redirects = False uri = urllib.parse.urljoin(base, "300/with-location-header.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 300) def testGet300WithoutLocation(self): # Not giving a Location: header in a 300 response is acceptable # In which case we just return the 300 response uri = urllib.parse.urljoin(base, "300/without-location-header.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 300) self.assertTrue(response['content-type'].startswith("text/html")) self.assertEqual(response.previous, None) def testGet301(self): # Test that we automatically follow 301 redirects # and that we cache the 301 response uri = urllib.parse.urljoin(base, "301/onestep.asis") destination = urllib.parse.urljoin(base, "302/final-destination.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertTrue('content-location' in response) self.assertEqual(response['content-location'], destination) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 301) self.assertEqual(response.previous.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response['content-location'], destination) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 301) self.assertEqual(response.previous.fromcache, True) def testHead301(self): # Test that we automatically follow 301 redirects uri = urllib.parse.urljoin(base, "301/onestep.asis") (response, content) = self.http.request(uri, "HEAD") self.assertEqual(response.status, 200) self.assertEqual(response.previous.status, 301) self.assertEqual(response.previous.fromcache, False) def testGet301NoRedirect(self): # Test that we automatically follow 301 redirects # and that we cache the 301 response self.http.follow_redirects = False uri = urllib.parse.urljoin(base, "301/onestep.asis") destination = urllib.parse.urljoin(base, "302/final-destination.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 301) def testGet302(self): # Test that we automatically follow 302 redirects # and that we DO NOT cache the 302 response uri = urllib.parse.urljoin(base, "302/onestep.asis") destination = urllib.parse.urljoin(base, "302/final-destination.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response['content-location'], destination) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 302) self.assertEqual(response.previous.fromcache, False) uri = urllib.parse.urljoin(base, "302/onestep.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) self.assertEqual(response['content-location'], destination) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 302) self.assertEqual(response.previous.fromcache, False) self.assertEqual(response.previous['content-location'], uri) uri = urllib.parse.urljoin(base, "302/twostep.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 302) self.assertEqual(response.previous.fromcache, False) def testGet302RedirectionLimit(self): # Test that we can set a lower redirection limit # and that we raise an exception when we exceed # that limit. self.http.force_exception_to_status_code = False uri = urllib.parse.urljoin(base, "302/twostep.asis") try: (response, content) = self.http.request(uri, "GET", redirections = 1) self.fail("This should not happen") except httplib2.RedirectLimit: pass except Exception as e: self.fail("Threw wrong kind of exception ") # Re-run the test with out the exceptions self.http.force_exception_to_status_code = True (response, content) = self.http.request(uri, "GET", redirections = 1) self.assertEqual(response.status, 500) self.assertTrue(response.reason.startswith("Redirected more")) self.assertEqual("302", response['status']) self.assertTrue(content.startswith(b"<html>")) self.assertTrue(response.previous != None) def testGet302NoLocation(self): # Test that we throw an exception when we get # a 302 with no Location: header. self.http.force_exception_to_status_code = False uri = urllib.parse.urljoin(base, "302/no-location.asis") try: (response, content) = self.http.request(uri, "GET") self.fail("Should never reach here") except httplib2.RedirectMissingLocation: pass except Exception as e: self.fail("Threw wrong kind of exception ") # Re-run the test with out the exceptions self.http.force_exception_to_status_code = True (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 500) self.assertTrue(response.reason.startswith("Redirected but")) self.assertEqual("302", response['status']) self.assertTrue(content.startswith(b"This is content")) def testGet301ViaHttps(self): # Google always redirects to http://google.com (response, content) = self.http.request("https://code.google.com/apis/", "GET") self.assertEqual(200, response.status) self.assertEqual(301, response.previous.status) def testGetViaHttps(self): # Test that we can handle HTTPS (response, content) = self.http.request("https://google.com/adsense/", "GET") self.assertEqual(200, response.status) def testGetViaHttpsSpecViolationOnLocation(self): # Test that we follow redirects through HTTPS # even if they violate the spec by including # a relative Location: header instead of an # absolute one. (response, content) = self.http.request("https://google.com/adsense", "GET") self.assertEqual(200, response.status) self.assertNotEqual(None, response.previous) def testGetViaHttpsKeyCert(self): # At this point I can only test # that the key and cert files are passed in # correctly to httplib. It would be nice to have # a real https endpoint to test against. http = httplib2.Http(timeout=2) http.add_certificate("akeyfile", "acertfile", "bitworking.org") try: (response, content) = http.request("https://bitworking.org", "GET") except AttributeError: self.assertEqual(http.connections["https:bitworking.org"].key_file, "akeyfile") self.assertEqual(http.connections["https:bitworking.org"].cert_file, "acertfile") except IOError: # Skip on 3.2 pass try: (response, content) = http.request("https://notthere.bitworking.org", "GET") except httplib2.ServerNotFoundError: self.assertEqual(http.connections["https:notthere.bitworking.org"].key_file, None) self.assertEqual(http.connections["https:notthere.bitworking.org"].cert_file, None) except IOError: # Skip on 3.2 pass def testSslCertValidation(self): # Test that we get an ssl.SSLError when specifying a non-existent CA # certs file. http = httplib2.Http(ca_certs='/nosuchfile') self.assertRaises(IOError, http.request, "https://www.google.com/", "GET") # Test that we get a SSLHandshakeError if we try to access # https://www.google.com, using a CA cert file that doesn't contain # the CA Gogole uses (i.e., simulating a cert that's not signed by a # trusted CA). other_ca_certs = os.path.join( os.path.dirname(os.path.abspath(httplib2.__file__ )), "test", "other_cacerts.txt") http = httplib2.Http(ca_certs=other_ca_certs) self.assertRaises(ssl.SSLError, http.request,"https://www.google.com/", "GET") def testSniHostnameValidation(self): self.http.request("https://google.com/", method="GET") def testGet303(self): # Do a follow-up GET on a Location: header # returned from a POST that gave a 303. uri = urllib.parse.urljoin(base, "303/303.cgi") (response, content) = self.http.request(uri, "POST", " ") self.assertEqual(response.status, 200) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 303) def testGet303NoRedirect(self): # Do a follow-up GET on a Location: header # returned from a POST that gave a 303. self.http.follow_redirects = False uri = urllib.parse.urljoin(base, "303/303.cgi") (response, content) = self.http.request(uri, "POST", " ") self.assertEqual(response.status, 303) def test303ForDifferentMethods(self): # Test that all methods can be used uri = urllib.parse.urljoin(base, "303/redirect-to-reflector.cgi") for (method, method_on_303) in [("PUT", "GET"), ("DELETE", "GET"), ("POST", "GET"), ("GET", "GET"), ("HEAD", "GET")]: (response, content) = self.http.request(uri, method, body=b" ") self.assertEqual(response['x-method'], method_on_303) def testGet304(self): # Test that we use ETags properly to validate our cache uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['etag'], "") (response, content) = self.http.request(uri, "GET") (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'must-revalidate'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) cache_file_name = os.path.join(cacheDirName, httplib2.safename(httplib2.urlnorm(uri)[-1])) f = open(cache_file_name, "r") status_line = f.readline() f.close() self.assertTrue(status_line.startswith("status:")) (response, content) = self.http.request(uri, "HEAD") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "GET", headers = {'range': 'bytes=0-0'}) self.assertEqual(response.status, 206) self.assertEqual(response.fromcache, False) def testGetIgnoreEtag(self): # Test that we can forcibly ignore ETags uri = urllib.parse.urljoin(base, "reflector/reflector.cgi") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['etag'], "") (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'}) d = self.reflector(content) self.assertTrue('HTTP_IF_NONE_MATCH' in d) self.http.ignore_etag = True (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'}) d = self.reflector(content) self.assertEqual(response.fromcache, False) self.assertFalse('HTTP_IF_NONE_MATCH' in d) def testOverrideEtag(self): # Test that we can forcibly ignore ETags uri = urllib.parse.urljoin(base, "reflector/reflector.cgi") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['etag'], "") (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0'}) d = self.reflector(content) self.assertTrue('HTTP_IF_NONE_MATCH' in d) self.assertNotEqual(d['HTTP_IF_NONE_MATCH'], "fred") (response, content) = self.http.request(uri, "GET", headers = {'cache-control': 'max-age=0', 'if-none-match': 'fred'}) d = self.reflector(content) self.assertTrue('HTTP_IF_NONE_MATCH' in d) self.assertEqual(d['HTTP_IF_NONE_MATCH'], "fred") #MAP-commented this out because it consistently fails # def testGet304EndToEnd(self): # # Test that end to end headers get overwritten in the cache # uri = urllib.parse.urljoin(base, "304/end2end.cgi") # (response, content) = self.http.request(uri, "GET") # self.assertNotEqual(response['etag'], "") # old_date = response['date'] # time.sleep(2) # # (response, content) = self.http.request(uri, "GET", headers = {'Cache-Control': 'max-age=0'}) # # The response should be from the cache, but the Date: header should be updated. # new_date = response['date'] # self.assertNotEqual(new_date, old_date) # self.assertEqual(response.status, 200) # self.assertEqual(response.fromcache, True) def testGet304LastModified(self): # Test that we can still handle a 304 # by only using the last-modified cache validator. uri = urllib.parse.urljoin(base, "304/last-modified-only/last-modified-only.txt") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['last-modified'], "") (response, content) = self.http.request(uri, "GET") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) def testGet307(self): # Test that we do follow 307 redirects but # do not cache the 307 uri = urllib.parse.urljoin(base, "307/onestep.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 307) self.assertEqual(response.previous.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) self.assertEqual(content, b"This is the final destination.\n") self.assertEqual(response.previous.status, 307) self.assertEqual(response.previous.fromcache, False) def testGet410(self): # Test that we pass 410's through uri = urllib.parse.urljoin(base, "410/410.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 410) def testVaryHeaderSimple(self): """ RFC 2616 13.6 When the cache receives a subsequent request whose Request-URI specifies one or more cache entries including a Vary header field, the cache MUST NOT use such a cache entry to construct a response to the new request unless all of the selecting request-headers present in the new request match the corresponding stored request-headers in the original request. """ # test that the vary header is sent uri = urllib.parse.urljoin(base, "vary/accept.asis") (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'}) self.assertEqual(response.status, 200) self.assertTrue('vary' in response) # get the resource again, from the cache since accept header in this # request is the same as the request (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True, msg="Should be from cache") # get the resource again, not from cache since Accept headers does not match (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False, msg="Should not be from cache") # get the resource again, without any Accept header, so again no match (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False, msg="Should not be from cache") def testNoVary(self): pass # when there is no vary, a different Accept header (e.g.) should not # impact if the cache is used # test that the vary header is not sent # uri = urllib.parse.urljoin(base, "vary/no-vary.asis") # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'}) # self.assertEqual(response.status, 200) # self.assertFalse('vary' in response) # # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'}) # self.assertEqual(response.status, 200) # self.assertEqual(response.fromcache, True, msg="Should be from cache") # # (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/html'}) # self.assertEqual(response.status, 200) # self.assertEqual(response.fromcache, True, msg="Should be from cache") def testVaryHeaderDouble(self): uri = urllib.parse.urljoin(base, "vary/accept-double.asis") (response, content) = self.http.request(uri, "GET", headers={ 'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'}) self.assertEqual(response.status, 200) self.assertTrue('vary' in response) # we are from cache (response, content) = self.http.request(uri, "GET", headers={ 'Accept': 'text/plain', 'Accept-Language': 'da, en-gb;q=0.8, en;q=0.7'}) self.assertEqual(response.fromcache, True, msg="Should be from cache") (response, content) = self.http.request(uri, "GET", headers={'Accept': 'text/plain'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) # get the resource again, not from cache, varied headers don't match exact (response, content) = self.http.request(uri, "GET", headers={'Accept-Language': 'da'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False, msg="Should not be from cache") def testVaryUnusedHeader(self): # A header's value is not considered to vary if it's not used at all. uri = urllib.parse.urljoin(base, "vary/unused-header.asis") (response, content) = self.http.request(uri, "GET", headers={ 'Accept': 'text/plain'}) self.assertEqual(response.status, 200) self.assertTrue('vary' in response) # we are from cache (response, content) = self.http.request(uri, "GET", headers={ 'Accept': 'text/plain',}) self.assertEqual(response.fromcache, True, msg="Should be from cache") def testHeadGZip(self): # Test that we don't try to decompress a HEAD response uri = urllib.parse.urljoin(base, "gzip/final-destination.txt") (response, content) = self.http.request(uri, "HEAD") self.assertEqual(response.status, 200) self.assertNotEqual(int(response['content-length']), 0) self.assertEqual(content, b"") def testGetGZip(self): # Test that we support gzip compression uri = urllib.parse.urljoin(base, "gzip/final-destination.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertFalse('content-encoding' in response) self.assertTrue('-content-encoding' in response) self.assertEqual(int(response['content-length']), len(b"This is the final destination.\n")) self.assertEqual(content, b"This is the final destination.\n") def testPostAndGZipResponse(self): uri = urllib.parse.urljoin(base, "gzip/post.cgi") (response, content) = self.http.request(uri, "POST", body=" ") self.assertEqual(response.status, 200) self.assertFalse('content-encoding' in response) self.assertTrue('-content-encoding' in response) def testGetGZipFailure(self): # Test that we raise a good exception when the gzip fails self.http.force_exception_to_status_code = False uri = urllib.parse.urljoin(base, "gzip/failed-compression.asis") try: (response, content) = self.http.request(uri, "GET") self.fail("Should never reach here") except httplib2.FailedToDecompressContent: pass except Exception: self.fail("Threw wrong kind of exception") # Re-run the test with out the exceptions self.http.force_exception_to_status_code = True (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 500) self.assertTrue(response.reason.startswith("Content purported")) def testIndividualTimeout(self): uri = urllib.parse.urljoin(base, "timeout/timeout.cgi") http = httplib2.Http(timeout=1) http.force_exception_to_status_code = True (response, content) = http.request(uri) self.assertEqual(response.status, 408) self.assertTrue(response.reason.startswith("Request Timeout")) self.assertTrue(content.startswith(b"Request Timeout")) def testGetDeflate(self): # Test that we support deflate compression uri = urllib.parse.urljoin(base, "deflate/deflated.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertFalse('content-encoding' in response) self.assertEqual(int(response['content-length']), len("This is the final destination.")) self.assertEqual(content, b"This is the final destination.") def testGetDeflateFailure(self): # Test that we raise a good exception when the deflate fails self.http.force_exception_to_status_code = False uri = urllib.parse.urljoin(base, "deflate/failed-compression.asis") try: (response, content) = self.http.request(uri, "GET") self.fail("Should never reach here") except httplib2.FailedToDecompressContent: pass except Exception: self.fail("Threw wrong kind of exception") # Re-run the test with out the exceptions self.http.force_exception_to_status_code = True (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 500) self.assertTrue(response.reason.startswith("Content purported")) def testGetDuplicateHeaders(self): # Test that duplicate headers get concatenated via ',' uri = urllib.parse.urljoin(base, "duplicate-headers/multilink.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(content, b"This is content\n") self.assertEqual(response['link'].split(",")[0], '<http://bitworking.org>; rel="home"; title="BitWorking"') def testGetCacheControlNoCache(self): # Test Cache-Control: no-cache on requests uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['etag'], "") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-cache'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) def testGetCacheControlPragmaNoCache(self): # Test Pragma: no-cache on requests uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") self.assertNotEqual(response['etag'], "") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "GET", headers={'Pragma': 'no-cache'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) def testGetCacheControlNoStoreRequest(self): # A no-store request means that the response should not be stored. uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) def testGetCacheControlNoStoreResponse(self): # A no-store response means that the response should not be stored. uri = urllib.parse.urljoin(base, "no-store/no-store.asis") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) def testGetCacheControlNoCacheNoStoreRequest(self): # Test that a no-store, no-cache clears the entry from the cache # even if it was cached previously. uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'}) (response, content) = self.http.request(uri, "GET", headers={'Cache-Control': 'no-store, no-cache'}) self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) def testUpdateInvalidatesCache(self): # Test that calling PUT or DELETE on a # URI that is cache invalidates that cache. uri = urllib.parse.urljoin(base, "304/test_etag.txt") (response, content) = self.http.request(uri, "GET") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "DELETE") self.assertEqual(response.status, 405) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.fromcache, False) def testUpdateUsesCachedETag(self): # Test that we natively support http://www.w3.org/1999/04/Editing/ uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "PUT", body="foo") self.assertEqual(response.status, 200) (response, content) = self.http.request(uri, "PUT", body="foo") self.assertEqual(response.status, 412) def testUpdatePatchUsesCachedETag(self): # Test that we natively support http://www.w3.org/1999/04/Editing/ uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "PATCH", body="foo") self.assertEqual(response.status, 200) (response, content) = self.http.request(uri, "PATCH", body="foo") self.assertEqual(response.status, 412) def testUpdateUsesCachedETagAndOCMethod(self): # Test that we natively support http://www.w3.org/1999/04/Editing/ uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) self.http.optimistic_concurrency_methods.append("DELETE") (response, content) = self.http.request(uri, "DELETE") self.assertEqual(response.status, 200) def testUpdateUsesCachedETagOverridden(self): # Test that we natively support http://www.w3.org/1999/04/Editing/ uri = urllib.parse.urljoin(base, "conditional-updates/test.cgi") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, False) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) self.assertEqual(response.fromcache, True) (response, content) = self.http.request(uri, "PUT", body="foo", headers={'if-match': 'fred'}) self.assertEqual(response.status, 412) def testBasicAuth(self): # Test Basic Authentication uri = urllib.parse.urljoin(base, "basic/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) uri = urllib.parse.urljoin(base, "basic/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) self.http.add_credentials('joe', 'password') (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "basic/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) def testBasicAuthWithDomain(self): # Test Basic Authentication uri = urllib.parse.urljoin(base, "basic/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) uri = urllib.parse.urljoin(base, "basic/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) self.http.add_credentials('joe', 'password', "example.org") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) uri = urllib.parse.urljoin(base, "basic/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) domain = urllib.parse.urlparse(base)[1] self.http.add_credentials('joe', 'password', domain) (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "basic/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) def testBasicAuthTwoDifferentCredentials(self): # Test Basic Authentication with multiple sets of credentials uri = urllib.parse.urljoin(base, "basic2/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) uri = urllib.parse.urljoin(base, "basic2/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) self.http.add_credentials('fred', 'barney') (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "basic2/file.txt") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) def testBasicAuthNested(self): # Test Basic Authentication with resources # that are nested uri = urllib.parse.urljoin(base, "basic-nested/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) uri = urllib.parse.urljoin(base, "basic-nested/subdir") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) # Now add in credentials one at a time and test. self.http.add_credentials('joe', 'password') uri = urllib.parse.urljoin(base, "basic-nested/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "basic-nested/subdir") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) self.http.add_credentials('fred', 'barney') uri = urllib.parse.urljoin(base, "basic-nested/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "basic-nested/subdir") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) def testDigestAuth(self): # Test that we support Digest Authentication uri = urllib.parse.urljoin(base, "digest/") (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 401) self.http.add_credentials('joe', 'password') (response, content) = self.http.request(uri, "GET") self.assertEqual(response.status, 200) uri = urllib.parse.urljoin(base, "digest/file.txt") (response, content) = self.http.request(uri, "GET") def testDigestAuthNextNonceAndNC(self): # Test that if the server sets nextnonce that we reset # the nonce count back to 1 uri = urllib.parse.urljoin(base, "digest/file.txt") self.http.add_credentials('joe', 'password') (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"}) info = httplib2._parse_www_authenticate(response, 'authentication-info') self.assertEqual(response.status, 200) (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"}) info2 = httplib2._parse_www_authenticate(response, 'authentication-info') self.assertEqual(response.status, 200) if 'nextnonce' in info: self.assertEqual(info2['nc'], 1) def testDigestAuthStale(self): # Test that we can handle a nonce becoming stale uri = urllib.parse.urljoin(base, "digest-expire/file.txt") self.http.add_credentials('joe', 'password') (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"}) info = httplib2._parse_www_authenticate(response, 'authentication-info') self.assertEqual(response.status, 200) time.sleep(3) # Sleep long enough that the nonce becomes stale (response, content) = self.http.request(uri, "GET", headers = {"cache-control":"no-cache"}) self.assertFalse(response.fromcache) self.assertTrue(response._stale_digest) info3 = httplib2._parse_www_authenticate(response, 'authentication-info') self.assertEqual(response.status, 200) def reflector(self, content): return dict( [tuple(x.split("=", 1)) for x in content.decode('utf-8').strip().split("\n")] ) def testReflector(self): uri = urllib.parse.urljoin(base, "reflector/reflector.cgi") (response, content) = self.http.request(uri, "GET") d = self.reflector(content) self.assertTrue('HTTP_USER_AGENT' in d) def testConnectionClose(self): uri = "http://www.google.com/" (response, content) = self.http.request(uri, "GET") for c in self.http.connections.values(): self.assertNotEqual(None, c.sock) (response, content) = self.http.request(uri, "GET", headers={"connection": "close"}) for c in self.http.connections.values(): self.assertEqual(None, c.sock) def testPickleHttp(self): pickled_http = pickle.dumps(self.http) new_http = pickle.loads(pickled_http) self.assertEqual(sorted(new_http.__dict__.keys()), sorted(self.http.__dict__.keys())) for key in new_http.__dict__: if key in ('certificates', 'credentials'): self.assertEqual(new_http.__dict__[key].credentials, self.http.__dict__[key].credentials) elif key == 'cache': self.assertEqual(new_http.__dict__[key].cache, self.http.__dict__[key].cache) else: self.assertEqual(new_http.__dict__[key], self.http.__dict__[key]) def testPickleHttpWithConnection(self): self.http.request('http://bitworking.org', connection_type=_MyHTTPConnection) pickled_http = pickle.dumps(self.http) new_http = pickle.loads(pickled_http) self.assertEqual(list(self.http.connections.keys()), ['http:bitworking.org']) self.assertEqual(new_http.connections, {}) def testPickleCustomRequestHttp(self): def dummy_request(*args, **kwargs): return new_request(*args, **kwargs) dummy_request.dummy_attr = 'dummy_value' self.http.request = dummy_request pickled_http = pickle.dumps(self.http) self.assertFalse(b"S'request'" in pickled_http) try: import memcache class HttpTestMemCached(HttpTest): def setUp(self): self.cache = memcache.Client(['127.0.0.1:11211'], debug=0) #self.cache = memcache.Client(['10.0.0.4:11211'], debug=1) self.http = httplib2.Http(self.cache) self.cache.flush_all() # Not exactly sure why the sleep is needed here, but # if not present then some unit tests that rely on caching # fail. Memcached seems to lose some sets immediately # after a flush_all if the set is to a value that # was previously cached. (Maybe the flush is handled async?) time.sleep(1) self.http.clear_credentials() except: pass # ------------------------------------------------------------------------ class HttpPrivateTest(unittest.TestCase): def testParseCacheControl(self): # Test that we can parse the Cache-Control header self.assertEqual({}, httplib2._parse_cache_control({})) self.assertEqual({'no-cache': 1}, httplib2._parse_cache_control({'cache-control': ' no-cache'})) cc = httplib2._parse_cache_control({'cache-control': ' no-cache, max-age = 7200'}) self.assertEqual(cc['no-cache'], 1) self.assertEqual(cc['max-age'], '7200') cc = httplib2._parse_cache_control({'cache-control': ' , '}) self.assertEqual(cc[''], 1) try: cc = httplib2._parse_cache_control({'cache-control': 'Max-age=3600;post-check=1800,pre-check=3600'}) self.assertTrue("max-age" in cc) except: self.fail("Should not throw exception") def testNormalizeHeaders(self): # Test that we normalize headers to lowercase h = httplib2._normalize_headers({'Cache-Control': 'no-cache', 'Other': 'Stuff'}) self.assertTrue('cache-control' in h) self.assertTrue('other' in h) self.assertEqual('Stuff', h['other']) def testExpirationModelTransparent(self): # Test that no-cache makes our request TRANSPARENT response_headers = { 'cache-control': 'max-age=7200' } request_headers = { 'cache-control': 'no-cache' } self.assertEqual("TRANSPARENT", httplib2._entry_disposition(response_headers, request_headers)) def testMaxAgeNonNumeric(self): # Test that no-cache makes our request TRANSPARENT response_headers = { 'cache-control': 'max-age=fred, min-fresh=barney' } request_headers = { } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelNoCacheResponse(self): # The date and expires point to an entry that should be # FRESH, but the no-cache over-rides that. now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)), 'cache-control': 'no-cache' } request_headers = { } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelStaleRequestMustReval(self): # must-revalidate forces STALE self.assertEqual("STALE", httplib2._entry_disposition({}, {'cache-control': 'must-revalidate'})) def testExpirationModelStaleResponseMustReval(self): # must-revalidate forces STALE self.assertEqual("STALE", httplib2._entry_disposition({'cache-control': 'must-revalidate'}, {})) def testExpirationModelFresh(self): response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()), 'cache-control': 'max-age=2' } request_headers = { } self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers)) time.sleep(3) self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationMaxAge0(self): response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()), 'cache-control': 'max-age=0' } request_headers = { } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelDateAndExpires(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)), } request_headers = { } self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers)) time.sleep(3) self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpiresZero(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'expires': "0", } request_headers = { } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelDateOnly(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+3)), } request_headers = { } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelOnlyIfCached(self): response_headers = { } request_headers = { 'cache-control': 'only-if-cached', } self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelMaxAgeBoth(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'cache-control': 'max-age=2' } request_headers = { 'cache-control': 'max-age=0' } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelDateAndExpiresMinFresh1(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+2)), } request_headers = { 'cache-control': 'min-fresh=2' } self.assertEqual("STALE", httplib2._entry_disposition(response_headers, request_headers)) def testExpirationModelDateAndExpiresMinFresh2(self): now = time.time() response_headers = { 'date': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now)), 'expires': time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(now+4)), } request_headers = { 'cache-control': 'min-fresh=2' } self.assertEqual("FRESH", httplib2._entry_disposition(response_headers, request_headers)) def testParseWWWAuthenticateEmpty(self): res = httplib2._parse_www_authenticate({}) self.assertEqual(len(list(res.keys())), 0) def testParseWWWAuthenticate(self): # different uses of spaces around commas res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="test realm" , foo=foo ,bar="bar", baz=baz,qux=qux'}) self.assertEqual(len(list(res.keys())), 1) self.assertEqual(len(list(res['test'].keys())), 5) # tokens with non-alphanum res = httplib2._parse_www_authenticate({ 'www-authenticate': 'T*!%#st realm=to*!%#en, to*!%#en="quoted string"'}) self.assertEqual(len(list(res.keys())), 1) self.assertEqual(len(list(res['t*!%#st'].keys())), 2) # quoted string with quoted pairs res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Test realm="a \\"test\\" realm"'}) self.assertEqual(len(list(res.keys())), 1) self.assertEqual(res['test']['realm'], 'a "test" realm') def testParseWWWAuthenticateStrict(self): httplib2.USE_WWW_AUTH_STRICT_PARSING = 1; self.testParseWWWAuthenticate(); httplib2.USE_WWW_AUTH_STRICT_PARSING = 0; def testParseWWWAuthenticateBasic(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me"'}) basic = res['basic'] self.assertEqual('me', basic['realm']) res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm="MD5"'}) basic = res['basic'] self.assertEqual('me', basic['realm']) self.assertEqual('MD5', basic['algorithm']) res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me", algorithm=MD5'}) basic = res['basic'] self.assertEqual('me', basic['realm']) self.assertEqual('MD5', basic['algorithm']) def testParseWWWAuthenticateBasic2(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic realm="me",other="fred" '}) basic = res['basic'] self.assertEqual('me', basic['realm']) self.assertEqual('fred', basic['other']) def testParseWWWAuthenticateBasic3(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Basic REAlm="me" '}) basic = res['basic'] self.assertEqual('me', basic['realm']) def testParseWWWAuthenticateDigest(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"'}) digest = res['digest'] self.assertEqual('testrealm@host.com', digest['realm']) self.assertEqual('auth,auth-int', digest['qop']) def testParseWWWAuthenticateMultiple(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41" Basic REAlm="me" '}) digest = res['digest'] self.assertEqual('testrealm@host.com', digest['realm']) self.assertEqual('auth,auth-int', digest['qop']) self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce']) self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque']) basic = res['basic'] self.assertEqual('me', basic['realm']) def testParseWWWAuthenticateMultiple2(self): # Handle an added comma between challenges, which might get thrown in if the challenges were # originally sent in separate www-authenticate headers. res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me" '}) digest = res['digest'] self.assertEqual('testrealm@host.com', digest['realm']) self.assertEqual('auth,auth-int', digest['qop']) self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce']) self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque']) basic = res['basic'] self.assertEqual('me', basic['realm']) def testParseWWWAuthenticateMultiple3(self): # Handle an added comma between challenges, which might get thrown in if the challenges were # originally sent in separate www-authenticate headers. res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'}) digest = res['digest'] self.assertEqual('testrealm@host.com', digest['realm']) self.assertEqual('auth,auth-int', digest['qop']) self.assertEqual('dcd98b7102dd2f0e8b11d0f600bfb0c093', digest['nonce']) self.assertEqual('5ccc069c403ebaf9f0171e9517f40e41', digest['opaque']) basic = res['basic'] self.assertEqual('me', basic['realm']) wsse = res['wsse'] self.assertEqual('foo', wsse['realm']) self.assertEqual('UsernameToken', wsse['profile']) def testParseWWWAuthenticateMultiple4(self): res = httplib2._parse_www_authenticate({ 'www-authenticate': 'Digest realm="test-real.m@host.com", qop \t=\t"\tauth,auth-int", nonce="(*)&^&$%#",opaque="5ccc069c403ebaf9f0171e9517f40e41", Basic REAlm="me", WSSE realm="foo", profile="UsernameToken"'}) digest = res['digest'] self.assertEqual('test-real.m@host.com', digest['realm']) self.assertEqual('\tauth,auth-int', digest['qop']) self.assertEqual('(*)&^&$%#', digest['nonce']) def testParseWWWAuthenticateMoreQuoteCombos(self): res = httplib2._parse_www_authenticate({'www-authenticate':'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true'}) digest = res['digest'] self.assertEqual('myrealm', digest['realm']) def testParseWWWAuthenticateMalformed(self): try: res = httplib2._parse_www_authenticate({'www-authenticate':'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'}) self.fail("should raise an exception") except httplib2.MalformedHeader: pass def testDigestObject(self): credentials = ('joe', 'password') host = None request_uri = '/projects/httplib2/test/digest/' headers = {} response = { 'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth"' } content = b"" d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None) d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46") our_request = "authorization: %s" % headers['authorization'] working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46"' self.assertEqual(our_request, working_request) def testDigestObjectWithOpaque(self): credentials = ('joe', 'password') host = None request_uri = '/projects/httplib2/test/digest/' headers = {} response = { 'www-authenticate': 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", opaque="atestopaque"' } content = "" d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None) d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46") our_request = "authorization: %s" % headers['authorization'] working_request = 'authorization: Digest username="joe", realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", uri="/projects/httplib2/test/digest/", algorithm=MD5, response="97ed129401f7cdc60e5db58a80f3ea8b", qop=auth, nc=00000001, cnonce="33033375ec278a46", opaque="atestopaque"' self.assertEqual(our_request, working_request) def testDigestObjectStale(self): credentials = ('joe', 'password') host = None request_uri = '/projects/httplib2/test/digest/' headers = {} response = httplib2.Response({ }) response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true' response.status = 401 content = b"" d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None) # Returns true to force a retry self.assertTrue( d.response(response, content) ) def testDigestObjectAuthInfo(self): credentials = ('joe', 'password') host = None request_uri = '/projects/httplib2/test/digest/' headers = {} response = httplib2.Response({ }) response['www-authenticate'] = 'Digest realm="myrealm", nonce="Ygk86AsKBAA=3516200d37f9a3230352fde99977bd6d472d4306", algorithm=MD5, qop="auth", stale=true' response['authentication-info'] = 'nextnonce="fred"' content = b"" d = httplib2.DigestAuthentication(credentials, host, request_uri, headers, response, content, None) # Returns true to force a retry self.assertFalse( d.response(response, content) ) self.assertEqual('fred', d.challenge['nonce']) self.assertEqual(1, d.challenge['nc']) def testWsseAlgorithm(self): digest = httplib2._wsse_username_token("d36e316282959a9ed4c89851497a717f", "2003-12-15T14:43:07Z", "taadtaadpstcsm") expected = b"quR/EWLAV4xLf9Zqyw4pDmfV9OY=" self.assertEqual(expected, digest) def testEnd2End(self): # one end to end header response = {'content-type': 'application/atom+xml', 'te': 'deflate'} end2end = httplib2._get_end2end_headers(response) self.assertTrue('content-type' in end2end) self.assertTrue('te' not in end2end) self.assertTrue('connection' not in end2end) # one end to end header that gets eliminated response = {'connection': 'content-type', 'content-type': 'application/atom+xml', 'te': 'deflate'} end2end = httplib2._get_end2end_headers(response) self.assertTrue('content-type' not in end2end) self.assertTrue('te' not in end2end) self.assertTrue('connection' not in end2end) # Degenerate case of no headers response = {} end2end = httplib2._get_end2end_headers(response) self.assertEqual(0, len(end2end)) # Degenerate case of connection referrring to a header not passed in response = {'connection': 'content-type'} end2end = httplib2._get_end2end_headers(response) self.assertEqual(0, len(end2end)) class TestProxyInfo(unittest.TestCase): def setUp(self): self.orig_env = dict(os.environ) def tearDown(self): os.environ.clear() os.environ.update(self.orig_env) def test_from_url(self): pi = httplib2.proxy_info_from_url('http://myproxy.example.com') self.assertEqual(pi.proxy_host, 'myproxy.example.com') self.assertEqual(pi.proxy_port, 80) self.assertEqual(pi.proxy_user, None) def test_from_url_ident(self): pi = httplib2.proxy_info_from_url('http://zoidberg:fish@someproxy:99') self.assertEqual(pi.proxy_host, 'someproxy') self.assertEqual(pi.proxy_port, 99) self.assertEqual(pi.proxy_user, 'zoidberg') self.assertEqual(pi.proxy_pass, 'fish') def test_from_env(self): os.environ['http_proxy'] = 'http://myproxy.example.com:8080' pi = httplib2.proxy_info_from_environment() self.assertEqual(pi.proxy_host, 'myproxy.example.com') self.assertEqual(pi.proxy_port, 8080) def test_from_env_no_proxy(self): os.environ['http_proxy'] = 'http://myproxy.example.com:80' os.environ['https_proxy'] = 'http://myproxy.example.com:81' pi = httplib2.proxy_info_from_environment('https') self.assertEqual(pi.proxy_host, 'myproxy.example.com') self.assertEqual(pi.proxy_port, 81) def test_from_env_none(self): os.environ.clear() pi = httplib2.proxy_info_from_environment() self.assertEqual(pi, None) if __name__ == '__main__': unittest.main()
apache-2.0
daannijkamp/sketchy
sketchy/__init__.py
3
5091
#!/usr/bin/env python # Copyright 2014 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Imports import logging import os from functools import wraps from logging import Formatter from celery import Celery, Task from flask.ext.restful import Api from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.restful import Resource, reqparse from flask import Flask, send_from_directory, request, abort from sqlalchemy.exc import IntegrityError from sketchy.loggers import sketchy_logger from logging.handlers import RotatingFileHandler from werkzeug import secure_filename # App Config app = Flask(__name__) # Specify which config file to load (config-test or config-default) app.config.from_object('config-default') db = SQLAlchemy(app) sketchy_logger(app) # Model Imports from sketchy.models.capture import Capture from sketchy.models.static import Static def make_celery(app): """Make a celery object that extends Flask context.""" celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL']) celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) def after_return(self, status, retval, task_id, args, kwargs, einfo): db.session.remove() def on_failure(self, exc, task_id, args, kwargs, einfo): from sketchy.controllers.tasks import finisher # Check if the failures was on a capture or a static capture try: if kwargs['model'] == 'capture': the_record = Capture.query.filter(Capture.id == kwargs['capture_id']).first() else: the_record = Static.query.filter(Static.id == kwargs['capture_id']).first() the_record.job_status = 'FAILURE' if str(exc): the_record.capture_status = str(exc) app.logger.error(exc) db.session.add(the_record) db.session.commit() except IntegrityError, exc: app.logger.error(exc) if the_record and the_record.callback: finisher(the_record) celery.Task = ContextTask return celery # Instantiate celery object for associated tasks celery = make_celery(app) def app_key_check(view_function): """ Token auth decorator returns 401 if token is not specified or incorrect """ @wraps(view_function) def decorated_function(*args, **kwargs): if app.config['REQUIRE_AUTH'] == True and app.config['AUTH_TOKEN']: if request.headers.get('Token') == app.config['AUTH_TOKEN'] or request.args.get('token') == app.config['AUTH_TOKEN']: return view_function(*args, **kwargs) else: app.logger.error("Missing required 'TOKEN'") abort(401) else: return view_function(*args, **kwargs) return decorated_function # If Token Auth is required, apply to Flask API flask_api = Api(app, decorators=[app_key_check]) # Setup API calls for sketching urls or html files from controllers.controller import CaptureView, CaptureViewList, CaptureViewLast, Eager from controllers.static_upload import StaticView, StaticViewList, StaticViewLast flask_api.add_resource(CaptureView, '/api/v1.0/capture/<int:id>') flask_api.add_resource(CaptureViewList, '/api/v1.0/capture') flask_api.add_resource(CaptureViewLast, '/api/v1.0/capture/last') flask_api.add_resource(Eager, '/eager') flask_api.add_resource(StaticView, '/api/v1.0/static/<int:id>') flask_api.add_resource(StaticViewList, '/api/v1.0/static') flask_api.add_resource(StaticViewLast, '/api/v1.0/static/last') # Setup Screenshot directory if not os.path.exists(app.config['LOCAL_STORAGE_FOLDER']): os.makedirs(app.config['LOCAL_STORAGE_FOLDER']) @app.route('/files/<filename>') @app_key_check def uploaded_file(filename): """ Route to retrieve a sketch, scrape, or html file when requested. If token auth is required, run the app_key_check decorator. """ return send_from_directory(app.config['LOCAL_STORAGE_FOLDER'], filename, as_attachment=True) # Healthcheck @app.route('/healthcheck') def home(): return 'Ok' # Launch. if __name__ == '__main__': PORT = int(os.environ.get('PORT', 5000)) app.run(host='0.0.0.0', port=PORT)
apache-2.0
WeAreWizards/proppy
proppy/tests/test_proposal.py
1
4982
from proppy.proposal import Proposal def _get_config(): return { 'customer': { 'company': 'ACME', 'email': 'boss@acme.com', 'person': 'Bob Marley' }, 'project': { 'name': 'Project X', 'description': 'This is what we are supposed', 'currency': '£', 'discount': 5, 'worker': 1, 'start': '2015/03/18', 'end': '2015/03/23', 'uat_start': '2015/03/27', 'uat_end': '2015/03/31', 'rates': [ { 'amount': 800, 'name': 'dev' }, { 'amount': 700, 'name': 'pm' }, { 'amount': 750, 'name': 'design' } ], 'deliverables': [ { 'name': 'Fix Facebook and Twitter integration', 'estimate': 2, 'rate': 'dev', 'free': False, 'description': 'Ensure those are working properly' }, { 'name': 'Add a badge list page', 'estimate': 0.5, 'rate': 'design', 'free': True, 'description': 'Add a badge list page in the profile' } ] } } def test_fetch_value(): config = _get_config() proposal = Proposal(config=config) customer = config['customer'] assert proposal._fetch_value('customer') == customer assert proposal._fetch_value('customer.company') == customer['company'] assert proposal._fetch_value('project.rates') == config['project']['rates'] assert proposal._fetch_value('customer.nonexistent') is None def test_basic_validation_one_error(): wrong_config = _get_config() del wrong_config['customer']['email'] proposal = Proposal(config=wrong_config) proposal.basic_validation() assert len(proposal._errors) == 1 assert "Field customer.email is missing" == proposal._errors[0] def test_basic_several_several_errors_in_a_field(): """ Validation should stop at first failure for a field """ wrong_config = _get_config() wrong_config['project']['currency'] = '' proposal = Proposal(config=wrong_config) proposal.basic_validation() assert len(proposal._errors) == 1 assert "Field project.currency is missing" == proposal._errors[0] def test_logic_validation_not_doing_everything_free(): wrong_config = _get_config() wrong_config['project']['deliverables'][0]['free'] = True proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "Can't have all deliverables set to free" == proposal._errors[0] def test_logic_validation_unknown_rate_in_deliverable(): wrong_config = _get_config() wrong_config['project']['deliverables'][1]['rate'] = 'magic' proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "An unknown rate was used in a deliverable" == proposal._errors[0] def test_logic_validation_timeline_too_short(): wrong_config = _get_config() wrong_config['project']['start'] = '2015/03/25' proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "Project take more time than the timeline allows" == proposal._errors[0] # NOQA def test_logic_validation_timeline_too_long(): wrong_config = _get_config() wrong_config['project']['start'] = '2015/02/25' proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "Project take way less time than the timeline shows" == proposal._errors[0] # NOQA def test_logic_validation_uat_starting_during_project(): wrong_config = _get_config() wrong_config['project']['uat_start'] = '2015/03/19' proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "UAT can't start before the end of the project" == proposal._errors[0] # NOQA def test_logic_validation_uat_too_long(): wrong_config = _get_config() wrong_config['project']['uat_end'] = '2015/05/19' proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "UAT can't take longer than two weeks" == proposal._errors[0] # NOQA def test_logic_validation_discount_too_high(): wrong_config = _get_config() wrong_config['project']['discount'] = 51 proposal = Proposal(config=wrong_config) proposal.logic_validation() assert len(proposal._errors) == 1 assert "Discount is set too high" == proposal._errors[0] # NOQA
mit
habedi/Emacs-theme-creator
venv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/tests/test_sandbox.py
204
1724
"""develop tests """ import sys import os import shutil import unittest import tempfile from setuptools.sandbox import DirectorySandbox, SandboxViolation def has_win32com(): """ Run this to determine if the local machine has win32com, and if it does, include additional tests. """ if not sys.platform.startswith('win32'): return False try: mod = __import__('win32com') except ImportError: return False return True class TestSandbox(unittest.TestCase): def setUp(self): self.dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.dir) def test_devnull(self): if sys.version < '2.4': return sandbox = DirectorySandbox(self.dir) sandbox.run(self._file_writer(os.devnull)) def _file_writer(path): def do_write(): f = open(path, 'w') f.write('xxx') f.close() return do_write _file_writer = staticmethod(_file_writer) if has_win32com(): def test_win32com(self): """ win32com should not be prevented from caching COM interfaces in gen_py. """ import win32com gen_py = win32com.__gen_path__ target = os.path.join(gen_py, 'test_write') sandbox = DirectorySandbox(self.dir) try: try: sandbox.run(self._file_writer(target)) except SandboxViolation: self.fail("Could not create gen_py file due to SandboxViolation") finally: if os.path.exists(target): os.remove(target) if __name__ == '__main__': unittest.main()
gpl-3.0
pixbuffer/django-cms
cms/migrations/0034_auto__chg_field_title_language__chg_field_cmsplugin_language__add_fiel.py
385
19523
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Dummy migration pass def backwards(self, orm): # Dummy migration pass models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': { 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ( 'django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.globalpagepermission': { 'Meta': {'object_name': 'GlobalPagePermission'}, 'can_add': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_moderate': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_recover_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cms.page': { 'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'}, 'changed_by': ( 'django.db.models.fields.CharField', [], {'max_length': '70'}), 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'created_by': ( 'django.db.models.fields.CharField', [], {'max_length': '70'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'limit_visibility_in_menu': ( 'django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'login_required': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}), 'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}), 'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}), 'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'published': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'publisher_public': ( 'django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}), 'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}), 'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'template': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.pagemoderator': { 'Meta': {'object_name': 'PageModerator'}, 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderate_children': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderate_descendants': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'moderate_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'cms.pagemoderatorstate': { 'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'}, 'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}) }, 'cms.pagepermission': { 'Meta': {'object_name': 'PagePermission'}, 'can_add': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_change_advanced_settings': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_change_permissions': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'can_delete': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_moderate': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_move_page': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_publish': ( 'django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_view': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'grant_on': ( 'django.db.models.fields.IntegerField', [], {'default': '5'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'cms.pageuser': { 'Meta': {'object_name': 'PageUser', '_ormbases': ['auth.User']}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['auth.User']"}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}) }, 'cms.pageusergroup': { 'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['auth.User']"}), 'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ( 'django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'cms.title': { 'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'}, 'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}), 'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'slug': ( 'django.db.models.fields.SlugField', [], {'max_length': '255'}), 'title': ( 'django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['cms']
bsd-3-clause
ddrmanxbxfr/servo
tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py
496
1798
# Copyright 2011, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from mod_pywebsocket import handshake def web_socket_do_extra_handshake(request): raise handshake.AbortedByUserException("abort for test") def web_socket_transfer_data(request): raise handshake.AbortedByUserException("abort for test") # vi:sts=4 sw=4 et
mpl-2.0
yoer/hue
desktop/core/ext-py/boto-2.38.0/boto/glacier/layer1.py
121
60796
# -*- coding: utf-8 -*- # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import os import boto.glacier from boto.compat import json from boto.connection import AWSAuthConnection from boto.glacier.exceptions import UnexpectedHTTPResponseError from boto.glacier.response import GlacierResponse from boto.glacier.utils import ResettingFileSender class Layer1(AWSAuthConnection): """ Amazon Glacier is a storage solution for "cold data." Amazon Glacier is an extremely low-cost storage service that provides secure, durable and easy-to-use storage for data backup and archival. With Amazon Glacier, customers can store their data cost effectively for months, years, or decades. Amazon Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations. Amazon Glacier is a great storage choice when low storage cost is paramount, your data is rarely retrieved, and retrieval latency of several hours is acceptable. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, go to `Amazon Simple Storage Service (Amazon S3)`_. You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Amazon Glacier. If you are a first-time user of Amazon Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide : + `What is Amazon Glacier`_ - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service. + `Getting Started with Amazon Glacier`_ - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives. """ Version = '2012-06-01' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, account_id='-', is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, region=None, region_name='us-east-1', profile_name=None): if not region: for reg in boto.glacier.regions(): if reg.name == region_name: region = reg break self.region = region self.account_id = account_id super(Layer1, self).__init__(region.endpoint, aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, debug, https_connection_factory, path, provider, security_token, suppress_consec_slashes, profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] def make_request(self, verb, resource, headers=None, data='', ok_responses=(200,), params=None, sender=None, response_headers=None): if headers is None: headers = {} headers['x-amz-glacier-version'] = self.Version uri = '/%s/%s' % (self.account_id, resource) response = super(Layer1, self).make_request(verb, uri, params=params, headers=headers, sender=sender, data=data) if response.status in ok_responses: return GlacierResponse(response, response_headers) else: # create glacier-specific exceptions raise UnexpectedHTTPResponseError(ok_responses, response) # Vaults def list_vaults(self, limit=None, marker=None): """ This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name. By default, this operation returns up to 1,000 items. If there are more vaults to list, the response `marker` field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the `marker` field is `null`. To return a list of vaults that begins at a specific vault, set the `marker` request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the `limit` parameter in the request. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Retrieving Vault Metadata in Amazon Glacier`_ and `List Vaults `_ in the Amazon Glacier Developer Guide . :type marker: string :param marker: A string used for pagination. The marker specifies the vault ARN after which the listing of vaults should begin. :type limit: string :param limit: The maximum number of items returned in the response. If you don't specify a value, the List Vaults operation returns up to 1,000 items. """ params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker return self.make_request('GET', 'vaults', params=params) def describe_vault(self, vault_name): """ This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see `Downloading a Vault Inventory in Amazon Glacier`_. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe Vault `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('GET', uri) def create_vault(self, vault_name): """ This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier. You must use the following guidelines when naming a vault. + Names can be between 1 and 255 characters long. + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). This operation is idempotent. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('PUT', uri, ok_responses=(201,), response_headers=[('Location', 'Location')]) def delete_vault(self, vault_name): """ This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use `Initiate a Job (POST jobs)`_ to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using `Delete Archive (DELETE archive)`_. This operation is idempotent. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. """ uri = 'vaults/%s' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) def get_vault_notifications(self, vault_name): """ This operation retrieves the `notification-configuration` subresource of the specified vault. For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a `404 Not Found` error. For more information about vault notifications, see `Configuring Vault Notifications in Amazon Glacier`_. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Configuring Vault Notifications in Amazon Glacier`_ and `Get Vault Notification Configuration `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('GET', uri) def set_vault_notifications(self, vault_name, notification_config): """ This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications. To configure vault notifications, send a PUT request to the `notification-configuration` subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic. Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events: + **ArchiveRetrievalCompleted** This event occurs when a job that was initiated for an archive retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob. + **InventoryRetrievalCompleted** This event occurs when a job that was initiated for an inventory retrieval is completed (InitiateJob). The status of the completed job can be "Succeeded" or "Failed". The notification sent to the SNS topic is the same output as returned from DescribeJob. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Configuring Vault Notifications in Amazon Glacier`_ and `Set Vault Notification Configuration `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type vault_notification_config: dict :param vault_notification_config: Provides options for specifying notification configuration. The format of the dictionary is: {'SNSTopic': 'mytopic', 'Events': [event1,...]} """ uri = 'vaults/%s/notification-configuration' % vault_name json_config = json.dumps(notification_config) return self.make_request('PUT', uri, data=json_config, ok_responses=(204,)) def delete_vault_notifications(self, vault_name): """ This operation deletes the notification configuration set for a vault. The operation is eventually consistent;that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Configuring Vault Notifications in Amazon Glacier`_ and `Delete Vault Notification Configuration `_ in the Amazon Glacier Developer Guide. :type vault_name: string :param vault_name: The name of the vault. """ uri = 'vaults/%s/notification-configuration' % vault_name return self.make_request('DELETE', uri, ok_responses=(204,)) # Jobs def list_jobs(self, vault_name, completed=None, status_code=None, limit=None, marker=None): """ This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists. To retrieve an archive or retrieve a vault inventory from Amazon Glacier, you first initiate a job, and after the job completes, you download the data. For an archive retrieval, the output is the archive data, and for an inventory retrieval, it is the inventory list. The List Job operation returns a list of these jobs sorted by job initiation time. This List Jobs operation supports pagination. By default, this operation returns up to 1,000 jobs in the response. You should always check the response for a `marker` at which to continue the list; if there are no more items the `marker` is `null`. To return a list of jobs that begins at a specific job, set the `marker` request parameter to the value you obtained from a previous List Jobs request. You can also limit the number of jobs returned in the response by specifying the `limit` parameter in the request. Additionally, you can filter the jobs list returned by specifying an optional `statuscode` (InProgress, Succeeded, or Failed) and `completed` (true, false) parameter. The `statuscode` allows you to specify that only jobs that match a specified status are returned. The `completed` parameter allows you to specify that only jobs in a specific completion state are returned. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For the underlying REST API, go to `List Jobs `_ :type vault_name: string :param vault_name: The name of the vault. :type limit: string :param limit: Specifies that the response be limited to the specified number of items or fewer. If not specified, the List Jobs operation returns up to 1,000 jobs. :type marker: string :param marker: An opaque string used for pagination. This value specifies the job at which the listing of jobs should begin. Get the marker value from a previous List Jobs response. You need only include the marker if you are continuing the pagination of results started in a previous List Jobs request. :type statuscode: string :param statuscode: Specifies the type of job status to return. You can specify the following values: "InProgress", "Succeeded", or "Failed". :type completed: string :param completed: Specifies the state of the jobs to return. You can specify `True` or `False`. """ params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker if status_code: params['statuscode'] = status_code if completed is not None: params['completed'] = 'true' if completed else 'false' uri = 'vaults/%s/jobs' % vault_name return self.make_request('GET', uri, params=params) def describe_job(self, vault_name, job_id): """ This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob. This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Amazon Glacier can notify the topic after it completes the job. A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For information about the underlying REST API, go to `Working with Archives in Amazon Glacier`_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type job_id: string :param job_id: The ID of the job to describe. """ uri = 'vaults/%s/jobs/%s' % (vault_name, job_id) return self.make_request('GET', uri, ok_responses=(200,)) def initiate_job(self, vault_name, job_data): """ This operation initiates a job of the specified type. In this release, you can initiate a job to retrieve either an archive or a vault inventory (a list of archives in a vault). Retrieving data from Amazon Glacier is a two-step process: #. Initiate a retrieval job. #. After the job completes, download the bytes. The retrieval request is executed asynchronously. When you initiate a retrieval job, Amazon Glacier creates a job and returns a job ID in the response. When Amazon Glacier completes the job, you can get the job output (archive or inventory data). For information about getting job output, see GetJobOutput operation. The job must complete before you can get its output. To determine when a job is complete, you have the following options: + **Use Amazon SNS Notification** You can specify an Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Glacier can post a notification after the job is completed. You can specify an SNS topic per job request. The notification is sent only after Amazon Glacier completes the job. In addition to specifying an SNS topic per job request, you can configure vault notifications for a vault so that job notifications are always sent. For more information, see SetVaultNotifications. + **Get job details** You can make a DescribeJob request to obtain job status information while a job is in progress. However, it is more efficient to use an Amazon SNS notification to determine when a job is complete. The information you get via notification is same that you get by calling DescribeJob. If for a specific event, you add both the notification configuration on the vault and also specify an SNS topic in your initiate job request, Amazon Glacier sends both notifications. For more information, see SetVaultNotifications. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. **About the Vault Inventory** Amazon Glacier prepares an inventory for each vault periodically, every 24 hours. When you initiate a job for a vault inventory, Amazon Glacier returns the last inventory for the vault. The inventory data you get might be up to a day or two days old. Also, the initiate inventory job might take some time to complete before you can download the vault inventory. So you do not want to retrieve a vault inventory for each vault operation. However, in some scenarios, you might find the vault inventory useful. For example, when you upload an archive, you can provide an archive description but not an archive name. Amazon Glacier provides you a unique archive ID, an opaque string of characters. So, you might maintain your own database that maps archive names to their corresponding Amazon Glacier assigned archive IDs. You might find the vault inventory useful in the event you need to reconcile information in your database with the actual vault inventory. **About Ranged Archive Retrieval** You can initiate an archive retrieval for the whole archive or a range of the archive. In the case of ranged archive retrieval, you specify a byte range to return or the whole archive. The range specified must be megabyte (MB) aligned, that is the range start value must be divisible by 1 MB and range end value plus 1 must be divisible by 1 MB or equal the end of the archive. If the ranged archive retrieval is not megabyte aligned, this operation returns a 400 response. Furthermore, to ensure you get checksum values for data you download using Get Job Output API, the range must be tree hash aligned. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and the underlying REST API, go to `Initiate a Job`_ and `Downloading a Vault Inventory`_ :type account_id: string :param account_id: The `AccountId` is the AWS Account ID. You can specify either the AWS Account ID or optionally a '-', in which case Amazon Glacier uses the AWS Account ID associated with the credentials used to sign the request. If you specify your Account ID, do not include hyphens in it. :type vault_name: string :param vault_name: The name of the vault. :type job_parameters: dict :param job_parameters: Provides options for specifying job information. The dictionary can contain the following attributes: * ArchiveId - The ID of the archive you want to retrieve. This field is required only if the Type is set to archive-retrieval. * Description - The optional description for the job. * Format - When initiating a job to retrieve a vault inventory, you can optionally add this parameter to specify the output format. Valid values are: CSV|JSON. * SNSTopic - The Amazon SNS topic ARN where Amazon Glacier sends a notification when the job is completed and the output is ready for you to download. * Type - The job type. Valid values are: archive-retrieval|inventory-retrieval * RetrievalByteRange - Optionally specify the range of bytes to retrieve. * InventoryRetrievalParameters: Optional job parameters * Format - The output format, like "JSON" * StartDate - ISO8601 starting date string * EndDate - ISO8601 ending date string * Limit - Maximum number of entries * Marker - A unique string used for pagination """ uri = 'vaults/%s/jobs' % vault_name response_headers = [('x-amz-job-id', u'JobId'), ('Location', u'Location')] json_job_data = json.dumps(job_data) return self.make_request('POST', uri, data=json_job_data, ok_responses=(202,), response_headers=response_headers) def get_job_output(self, vault_name, job_id, byte_range=None): """ This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory. A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job. If the job output is large, then you can use the `Range` request header to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output: #. Download a 128 MB chunk of output by specifying the appropriate byte range using the `Range` header. #. Along with the data, the response includes a checksum of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data. #. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range. #. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the Describe Job API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and the underlying REST API, go to `Downloading a Vault Inventory`_, `Downloading an Archive`_, and `Get Job Output `_ :type account_id: string :param account_id: The `AccountId` is the AWS Account ID. You can specify either the AWS Account ID or optionally a '-', in which case Amazon Glacier uses the AWS Account ID associated with the credentials used to sign the request. If you specify your Account ID, do not include hyphens in it. :type vault_name: string :param vault_name: The name of the vault. :type job_id: string :param job_id: The job ID whose data is downloaded. :type byte_range: string :param byte_range: The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify "Range: bytes=0-1048575". By default, this operation downloads the entire output. """ response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), ('Content-Range', u'ContentRange'), ('Content-Type', u'ContentType')] headers = None if byte_range: headers = {'Range': 'bytes=%d-%d' % byte_range} uri = 'vaults/%s/jobs/%s/output' % (vault_name, job_id) response = self.make_request('GET', uri, headers=headers, ok_responses=(200, 206), response_headers=response_headers) return response # Archives def upload_archive(self, vault_name, archive, linear_hash, tree_hash, description=None): """ This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the `x-amz-archive-id` header of the response. You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob. You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see `Computing Checksums`_. You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list. Archives are immutable. After you upload an archive, you cannot edit the archive or its description. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Uploading an Archive in Amazon Glacier`_ and `Upload Archive`_ in the Amazon Glacier Developer Guide . :type vault_name: str :param vault_name: The name of the vault :type archive: bytes :param archive: The data to upload. :type linear_hash: str :param linear_hash: The SHA256 checksum (a linear hash) of the payload. :type tree_hash: str :param tree_hash: The user-computed SHA256 tree hash of the payload. For more information on computing the tree hash, see http://goo.gl/u7chF. :type description: str :param description: The optional description of the archive you are uploading. """ response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location'), ('x-amz-sha256-tree-hash', u'TreeHash')] uri = 'vaults/%s/archives' % vault_name try: content_length = str(len(archive)) except (TypeError, AttributeError): # If a file like object is provided, try to retrieve # the file size via fstat. content_length = str(os.fstat(archive.fileno()).st_size) headers = {'x-amz-content-sha256': linear_hash, 'x-amz-sha256-tree-hash': tree_hash, 'Content-Length': content_length} if description: headers['x-amz-archive-description'] = description if self._is_file_like(archive): sender = ResettingFileSender(archive) else: sender = None return self.make_request('POST', uri, headers=headers, sender=sender, data=archive, ok_responses=(201,), response_headers=response_headers) def _is_file_like(self, archive): return hasattr(archive, 'seek') and hasattr(archive, 'tell') def delete_archive(self, vault_name, archive_id): """ This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios: + If the archive retrieval job is actively preparing the data for download when Amazon Glacier receives the delete archive request, the archival retrieval operation might fail. + If the archive retrieval job has successfully prepared the archive for download when Amazon Glacier receives the delete archive request, you will be able to download the output. This operation is idempotent. Attempting to delete an already- deleted archive does not result in an error. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type archive_id: string :param archive_id: The ID of the archive to delete. """ uri = 'vaults/%s/archives/%s' % (vault_name, archive_id) return self.make_request('DELETE', uri, ok_responses=(204,)) # Multipart def initiate_multipart_upload(self, vault_name, part_size, description=None): """ This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB. Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB. You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size. After you complete the multipart upload, Amazon Glacier removes the multipart upload resource referenced by the ID. Amazon Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Uploading Large Archives in Parts (Multipart Upload)`_ and `Initiate Multipart Upload`_ in the Amazon Glacier Developer Guide . The part size must be a megabyte (1024 KB) multiplied by a power of 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB (4096 MB). :type vault_name: str :param vault_name: The name of the vault. :type description: str :param description: The archive description that you are uploading in parts. :type part_size: int :param part_size: The size of each part except the last, in bytes. The last part can be smaller than this part size. """ response_headers = [('x-amz-multipart-upload-id', u'UploadId'), ('Location', u'Location')] headers = {'x-amz-part-size': str(part_size)} if description: headers['x-amz-archive-description'] = description uri = 'vaults/%s/multipart-uploads' % vault_name response = self.make_request('POST', uri, headers=headers, ok_responses=(201,), response_headers=response_headers) return response def complete_multipart_upload(self, vault_name, upload_id, sha256_treehash, archive_size): """ You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob. In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see `Computing Checksums`_. On the server side, Amazon Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Amazon Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue. Additionally, Amazon Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Amazon Glacier returns an error and the operation fails. Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Uploading Large Archives in Parts (Multipart Upload)`_ and `Complete Multipart Upload`_ in the Amazon Glacier Developer Guide . :type checksum: string :param checksum: The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails. :type vault_name: str :param vault_name: The name of the vault. :type upload_id: str :param upload_id: The upload ID of the multipart upload. :type sha256_treehash: str :param sha256_treehash: The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier, Amazon Glacier returns an error and the request fails. :type archive_size: int :param archive_size: The total size, in bytes, of the entire archive. This value should be the sum of all the sizes of the individual parts that you uploaded. """ response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location')] headers = {'x-amz-sha256-tree-hash': sha256_treehash, 'x-amz-archive-size': str(archive_size)} uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) response = self.make_request('POST', uri, headers=headers, ok_responses=(201,), response_headers=response_headers) return response def abort_multipart_upload(self, vault_name, upload_id): """ This operation aborts a multipart upload identified by the upload ID. After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload. This operation is idempotent. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Working with Archives in Amazon Glacier`_ and `Abort Multipart Upload`_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type upload_id: string :param upload_id: The upload ID of the multipart upload to delete. """ uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) return self.make_request('DELETE', uri, ok_responses=(204,)) def list_multipart_uploads(self, vault_name, limit=None, marker=None): """ This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order. The List Multipart Uploads operation supports pagination. By default, this operation returns up to 1,000 multipart uploads in the response. You should always check the response for a `marker` at which to continue the list; if there are no more items the `marker` is `null`. To return a list of multipart uploads that begins at a specific upload, set the `marker` request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the `limit` parameter in the request. Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and the underlying REST API, go to `Working with Archives in Amazon Glacier`_ and `List Multipart Uploads `_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type limit: string :param limit: Specifies the maximum number of uploads returned in the response body. If this value is not specified, the List Uploads operation returns up to 1,000 uploads. :type marker: string :param marker: An opaque string used for pagination. This value specifies the upload at which the listing of uploads should begin. Get the marker value from a previous List Uploads response. You need only include the marker if you are continuing the pagination of results started in a previous List Uploads request. """ params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker uri = 'vaults/%s/multipart-uploads' % vault_name return self.make_request('GET', uri, params=params) def list_parts(self, vault_name, upload_id, limit=None, marker=None): """ This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range. The List Parts operation supports pagination. By default, this operation returns up to 1,000 uploaded parts in the response. You should always check the response for a `marker` at which to continue the list; if there are no more items the `marker` is `null`. To return a list of parts that begins at a specific part, set the `marker` request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the `limit` parameter in the request. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and the underlying REST API, go to `Working with Archives in Amazon Glacier`_ and `List Parts`_ in the Amazon Glacier Developer Guide . :type vault_name: string :param vault_name: The name of the vault. :type upload_id: string :param upload_id: The upload ID of the multipart upload. :type marker: string :param marker: An opaque string used for pagination. This value specifies the part at which the listing of parts should begin. Get the marker value from the response of a previous List Parts response. You need only include the marker if you are continuing the pagination of results started in a previous List Parts request. :type limit: string :param limit: Specifies the maximum number of parts returned in the response body. If this value is not specified, the List Parts operation returns up to 1,000 uploads. """ params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) return self.make_request('GET', uri, params=params) def upload_part(self, vault_name, upload_id, linear_hash, tree_hash, byte_range, part_data): """ This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload. Amazon Glacier rejects your upload part request if any of the following conditions is true: + **SHA256 tree hash does not match**To ensure that part data is not corrupted in transmission, you compute a SHA256 tree hash of the part and include it in your request. Upon receiving the part data, Amazon Glacier also computes a SHA256 tree hash. If these hash values don't match, the operation fails. For information about computing a SHA256 tree hash, see `Computing Checksums`_. + **Part size does not match**The size of each part except the last must match the size specified in the corresponding InitiateMultipartUpload request. The size of the last part must be the same size as, or smaller than, the specified size. If you upload a part whose size is smaller than the part size you specified in your initiate multipart upload request and that part is not the last part, then the upload part request will succeed. However, the subsequent Complete Multipart Upload request will fail. + **Range does not align**The byte range value in the request does not align with the part size specified in the corresponding initiate request. For example, if you specify a part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid part ranges. However, if you set a range value of 2 MB to 6 MB, the range does not align with the part size and the upload will fail. This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data. An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see `Access Control Using AWS Identity and Access Management (IAM)`_. For conceptual information and underlying REST API, go to `Uploading Large Archives in Parts (Multipart Upload)`_ and `Upload Part `_ in the Amazon Glacier Developer Guide . :type vault_name: str :param vault_name: The name of the vault. :type linear_hash: str :param linear_hash: The SHA256 checksum (a linear hash) of the payload. :type tree_hash: str :param tree_hash: The user-computed SHA256 tree hash of the payload. For more information on computing the tree hash, see http://goo.gl/u7chF. :type upload_id: str :param upload_id: The unique ID associated with this upload operation. :type byte_range: tuple of ints :param byte_range: Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*. :type part_data: bytes :param part_data: The data to be uploaded for the part """ headers = {'x-amz-content-sha256': linear_hash, 'x-amz-sha256-tree-hash': tree_hash, 'Content-Range': 'bytes %d-%d/*' % byte_range} response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')] uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) return self.make_request('PUT', uri, headers=headers, data=part_data, ok_responses=(204,), response_headers=response_headers)
apache-2.0
chiefspace/udemy-rest-api
udemy_rest_api_section5/code/env/lib/python3.4/site-packages/setuptools/tests/__init__.py
412
12582
"""Tests for the 'setuptools' package""" import sys import os import unittest from setuptools.tests import doctest import distutils.core import distutils.cmd from distutils.errors import DistutilsOptionError, DistutilsPlatformError from distutils.errors import DistutilsSetupError from distutils.core import Extension from distutils.version import LooseVersion from setuptools.compat import func_code from setuptools.compat import func_code import setuptools.dist import setuptools.depends as dep from setuptools import Feature from setuptools.depends import Require def additional_tests(): import doctest, unittest suite = unittest.TestSuite(( doctest.DocFileSuite( os.path.join('tests', 'api_tests.txt'), optionflags=doctest.ELLIPSIS, package='pkg_resources', ), )) if sys.platform == 'win32': suite.addTest(doctest.DocFileSuite('win_script_wrapper.txt')) return suite def makeSetup(**args): """Return distribution from 'setup(**args)', without executing commands""" distutils.core._setup_stop_after = "commandline" # Don't let system command line leak into tests! args.setdefault('script_args',['install']) try: return setuptools.setup(**args) finally: distutils.core._setup_stop_after = None class DependsTests(unittest.TestCase): def testExtractConst(self): if not hasattr(dep, 'extract_constant'): # skip on non-bytecode platforms return def f1(): global x, y, z x = "test" y = z fc = func_code(f1) # unrecognized name self.assertEqual(dep.extract_constant(fc,'q', -1), None) # constant assigned self.assertEqual(dep.extract_constant(fc,'x', -1), "test") # expression assigned self.assertEqual(dep.extract_constant(fc,'y', -1), -1) # recognized name, not assigned self.assertEqual(dep.extract_constant(fc,'z', -1), None) def testFindModule(self): self.assertRaises(ImportError, dep.find_module, 'no-such.-thing') self.assertRaises(ImportError, dep.find_module, 'setuptools.non-existent') f,p,i = dep.find_module('setuptools.tests') f.close() def testModuleExtract(self): if not hasattr(dep, 'get_module_constant'): # skip on non-bytecode platforms return from email import __version__ self.assertEqual( dep.get_module_constant('email','__version__'), __version__ ) self.assertEqual( dep.get_module_constant('sys','version'), sys.version ) self.assertEqual( dep.get_module_constant('setuptools.tests','__doc__'),__doc__ ) def testRequire(self): if not hasattr(dep, 'extract_constant'): # skip on non-bytecode platformsh return req = Require('Email','1.0.3','email') self.assertEqual(req.name, 'Email') self.assertEqual(req.module, 'email') self.assertEqual(req.requested_version, '1.0.3') self.assertEqual(req.attribute, '__version__') self.assertEqual(req.full_name(), 'Email-1.0.3') from email import __version__ self.assertEqual(req.get_version(), __version__) self.assertTrue(req.version_ok('1.0.9')) self.assertTrue(not req.version_ok('0.9.1')) self.assertTrue(not req.version_ok('unknown')) self.assertTrue(req.is_present()) self.assertTrue(req.is_current()) req = Require('Email 3000','03000','email',format=LooseVersion) self.assertTrue(req.is_present()) self.assertTrue(not req.is_current()) self.assertTrue(not req.version_ok('unknown')) req = Require('Do-what-I-mean','1.0','d-w-i-m') self.assertTrue(not req.is_present()) self.assertTrue(not req.is_current()) req = Require('Tests', None, 'tests', homepage="http://example.com") self.assertEqual(req.format, None) self.assertEqual(req.attribute, None) self.assertEqual(req.requested_version, None) self.assertEqual(req.full_name(), 'Tests') self.assertEqual(req.homepage, 'http://example.com') paths = [os.path.dirname(p) for p in __path__] self.assertTrue(req.is_present(paths)) self.assertTrue(req.is_current(paths)) class DistroTests(unittest.TestCase): def setUp(self): self.e1 = Extension('bar.ext',['bar.c']) self.e2 = Extension('c.y', ['y.c']) self.dist = makeSetup( packages=['a', 'a.b', 'a.b.c', 'b', 'c'], py_modules=['b.d','x'], ext_modules = (self.e1, self.e2), package_dir = {}, ) def testDistroType(self): self.assertTrue(isinstance(self.dist,setuptools.dist.Distribution)) def testExcludePackage(self): self.dist.exclude_package('a') self.assertEqual(self.dist.packages, ['b','c']) self.dist.exclude_package('b') self.assertEqual(self.dist.packages, ['c']) self.assertEqual(self.dist.py_modules, ['x']) self.assertEqual(self.dist.ext_modules, [self.e1, self.e2]) self.dist.exclude_package('c') self.assertEqual(self.dist.packages, []) self.assertEqual(self.dist.py_modules, ['x']) self.assertEqual(self.dist.ext_modules, [self.e1]) # test removals from unspecified options makeSetup().exclude_package('x') def testIncludeExclude(self): # remove an extension self.dist.exclude(ext_modules=[self.e1]) self.assertEqual(self.dist.ext_modules, [self.e2]) # add it back in self.dist.include(ext_modules=[self.e1]) self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) # should not add duplicate self.dist.include(ext_modules=[self.e1]) self.assertEqual(self.dist.ext_modules, [self.e2, self.e1]) def testExcludePackages(self): self.dist.exclude(packages=['c','b','a']) self.assertEqual(self.dist.packages, []) self.assertEqual(self.dist.py_modules, ['x']) self.assertEqual(self.dist.ext_modules, [self.e1]) def testEmpty(self): dist = makeSetup() dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) dist = makeSetup() dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2]) def testContents(self): self.assertTrue(self.dist.has_contents_for('a')) self.dist.exclude_package('a') self.assertTrue(not self.dist.has_contents_for('a')) self.assertTrue(self.dist.has_contents_for('b')) self.dist.exclude_package('b') self.assertTrue(not self.dist.has_contents_for('b')) self.assertTrue(self.dist.has_contents_for('c')) self.dist.exclude_package('c') self.assertTrue(not self.dist.has_contents_for('c')) def testInvalidIncludeExclude(self): self.assertRaises(DistutilsSetupError, self.dist.include, nonexistent_option='x' ) self.assertRaises(DistutilsSetupError, self.dist.exclude, nonexistent_option='x' ) self.assertRaises(DistutilsSetupError, self.dist.include, packages={'x':'y'} ) self.assertRaises(DistutilsSetupError, self.dist.exclude, packages={'x':'y'} ) self.assertRaises(DistutilsSetupError, self.dist.include, ext_modules={'x':'y'} ) self.assertRaises(DistutilsSetupError, self.dist.exclude, ext_modules={'x':'y'} ) self.assertRaises(DistutilsSetupError, self.dist.include, package_dir=['q'] ) self.assertRaises(DistutilsSetupError, self.dist.exclude, package_dir=['q'] ) class FeatureTests(unittest.TestCase): def setUp(self): self.req = Require('Distutils','1.0.3','distutils') self.dist = makeSetup( features={ 'foo': Feature("foo",standard=True,require_features=['baz',self.req]), 'bar': Feature("bar", standard=True, packages=['pkg.bar'], py_modules=['bar_et'], remove=['bar.ext'], ), 'baz': Feature( "baz", optional=False, packages=['pkg.baz'], scripts = ['scripts/baz_it'], libraries=[('libfoo','foo/foofoo.c')] ), 'dwim': Feature("DWIM", available=False, remove='bazish'), }, script_args=['--without-bar', 'install'], packages = ['pkg.bar', 'pkg.foo'], py_modules = ['bar_et', 'bazish'], ext_modules = [Extension('bar.ext',['bar.c'])] ) def testDefaults(self): self.assertTrue(not Feature( "test",standard=True,remove='x',available=False ).include_by_default() ) self.assertTrue( Feature("test",standard=True,remove='x').include_by_default() ) # Feature must have either kwargs, removes, or require_features self.assertRaises(DistutilsSetupError, Feature, "test") def testAvailability(self): self.assertRaises( DistutilsPlatformError, self.dist.features['dwim'].include_in, self.dist ) def testFeatureOptions(self): dist = self.dist self.assertTrue( ('with-dwim',None,'include DWIM') in dist.feature_options ) self.assertTrue( ('without-dwim',None,'exclude DWIM (default)') in dist.feature_options ) self.assertTrue( ('with-bar',None,'include bar (default)') in dist.feature_options ) self.assertTrue( ('without-bar',None,'exclude bar') in dist.feature_options ) self.assertEqual(dist.feature_negopt['without-foo'],'with-foo') self.assertEqual(dist.feature_negopt['without-bar'],'with-bar') self.assertEqual(dist.feature_negopt['without-dwim'],'with-dwim') self.assertTrue(not 'without-baz' in dist.feature_negopt) def testUseFeatures(self): dist = self.dist self.assertEqual(dist.with_foo,1) self.assertEqual(dist.with_bar,0) self.assertEqual(dist.with_baz,1) self.assertTrue(not 'bar_et' in dist.py_modules) self.assertTrue(not 'pkg.bar' in dist.packages) self.assertTrue('pkg.baz' in dist.packages) self.assertTrue('scripts/baz_it' in dist.scripts) self.assertTrue(('libfoo','foo/foofoo.c') in dist.libraries) self.assertEqual(dist.ext_modules,[]) self.assertEqual(dist.require_features, [self.req]) # If we ask for bar, it should fail because we explicitly disabled # it on the command line self.assertRaises(DistutilsOptionError, dist.include_feature, 'bar') def testFeatureWithInvalidRemove(self): self.assertRaises( SystemExit, makeSetup, features = {'x':Feature('x', remove='y')} ) class TestCommandTests(unittest.TestCase): def testTestIsCommand(self): test_cmd = makeSetup().get_command_obj('test') self.assertTrue(isinstance(test_cmd, distutils.cmd.Command)) def testLongOptSuiteWNoDefault(self): ts1 = makeSetup(script_args=['test','--test-suite=foo.tests.suite']) ts1 = ts1.get_command_obj('test') ts1.ensure_finalized() self.assertEqual(ts1.test_suite, 'foo.tests.suite') def testDefaultSuite(self): ts2 = makeSetup(test_suite='bar.tests.suite').get_command_obj('test') ts2.ensure_finalized() self.assertEqual(ts2.test_suite, 'bar.tests.suite') def testDefaultWModuleOnCmdLine(self): ts3 = makeSetup( test_suite='bar.tests', script_args=['test','-m','foo.tests'] ).get_command_obj('test') ts3.ensure_finalized() self.assertEqual(ts3.test_module, 'foo.tests') self.assertEqual(ts3.test_suite, 'foo.tests.test_suite') def testConflictingOptions(self): ts4 = makeSetup( script_args=['test','-m','bar.tests', '-s','foo.tests.suite'] ).get_command_obj('test') self.assertRaises(DistutilsOptionError, ts4.ensure_finalized) def testNoSuite(self): ts5 = makeSetup().get_command_obj('test') ts5.ensure_finalized() self.assertEqual(ts5.test_suite, None)
gpl-2.0
sahildua2305/eden
modules/tests/core/core_dataTable.py
2
12072
# -*- coding: utf-8 -*- __all__ = ["dt_filter", "dt_row_cnt", "dt_data", "dt_data_item", "dt_find", "dt_links", "dt_action", ] # @ToDo: There are performance issues # - need to profile and find out in which functions are the bottlenecks import time from gluon import current # ----------------------------------------------------------------------------- def convert_repr_number (number): """ Helper function to convert a string representation back to a number. Assumptions: * It may have a thousand separator * It may have a decimal point * If it has a thousand separator then it will have a decimal point It will return false is the number doesn't look valid """ sep = "" dec = "" part_one = "0" part_two = "" for digit in number: if digit.isdigit(): if sep == "": part_one += digit else: part_two += digit else: if digit == "-" and part_one == "0": part_one = "-0" elif sep == "" and sep != digit: sep = digit elif dec == "": dec = digit part_two += "." else: # Doesn't look like a valid number repr so return return False if dec == "": return float("%s.%s" % (part_one, part_two)) else: return float("%s%s" % (part_one, part_two)) # ----------------------------------------------------------------------------- def dt_filter(reporter, search_string=" ", forceClear = True, quiet = True): """ Filter the dataTable """ if forceClear: if not dt_filter(reporter, forceClear = False, quiet = quiet): return False config = current.test_config browser = config.browser sleep_limit = 10 elem = browser.find_element_by_css_selector('label > input[type="text"]') elem.clear() elem.send_keys(search_string) time.sleep(1) # give time for the list_processing element to appear waiting_elem = browser.find_element_by_id("datatable_processing") sleep_time = 0 while (waiting_elem.value_of_css_property("visibility") == "visible"): time.sleep(1) sleep_time += 1 if sleep_time > sleep_limit: if not quiet: reporter("DataTable filter didn't respond within %d seconds" % sleep_limit) return False return True # ----------------------------------------------------------------------------- def dt_row_cnt(reporter, check = (), quiet = True, utObj = None): """ return the rows that are being displayed and the total rows in the dataTable """ config = current.test_config browser = config.browser elem = browser.find_element_by_id("datatable_info") details = elem.text if not quiet: reporter(details) words = details.split() start = int(words[1]) end = int(words[3]) length = int(words[5]) filtered = None if len(words) > 10: filtered = int(words[9]) if check != (): if len(check ) == 3: expected = "Showing %d to %d of %d entries" % check actual = "Showing %d to %d of %d entries" % (start, end, length) msg = "Expected result of '%s' doesn't equal '%s'" % (expected, actual) if utObj != None: utObj.assertEqual((start, end, length) == check, msg) else: assert (start, end, length) == check, msg elif len(check) == 4: expected = "Showing %d to %d of %d entries (filtered from %d total entries)" % check if filtered: actual = "Showing %d to %d of %d entries (filtered from %d total entries)" % (start, end, length, filtered) else: actual = "Showing %d to %d of %d entries" % (start, end, length) msg = "Expected result of '%s' doesn't equal '%s'" % (expected, actual) if utObj != None: utObj.assertEqual((start, end, length) == check, msg) else: assert (start, end, length, filtered) == check, msg if len(words) > 10: return (start, end, length, filtered) else: return (start, end, length) # ----------------------------------------------------------------------------- def dt_data(row_list = None, add_header = False): """ return the data in the displayed dataTable """ config = current.test_config browser = config.browser cell = browser.find_element_by_id("table-container") text = cell.text parts = text.splitlines() records = [] cnt = 0 lastrow = "" header = "" for row in parts: if row.startswith("Detail"): header = lastrow row = row[8:] if row_list == None or cnt in row_list: records.append(row) cnt += 1 else: lastrow = row if add_header: return [header] + records return records # ----------------------------------------------------------------------------- def dt_data_item(row = 1, column = 1, tableID = "datatable", ): """ Returns the data found in the cell of the dataTable """ config = current.test_config browser = config.browser td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, row, column) try: elem = browser.find_element_by_xpath(td) return elem.text except: return False # ----------------------------------------------------------------------------- def dt_find(search = "", row = None, column = None, cellList = None, tableID = "datatable", first = False, ): """ Find the cells where search is found in the dataTable search: the string to search for. If you pass in a number (int, float) then the function will attempt to convert all text values to a float for comparison by using the convert_repr_number helper function row: The row or list of rows to search along column: The column or list of columns to search down cellList: This is a list of cells which may be returned from a previous call, these cells will be searched again for the search string. However if a row or column value is also provided then for each cell in cellList the column or row will be offset. For example cellList = [(3,1)] and column = 5, means rather than looking in cell (3,1) the function will look in cell (3,5) tableID: The HTML id of the table first: Stop on the first match, or find all matches Example of use (test url: /inv/warehouse/n/inv_item {where n is the warehouse id} ): match = dt_find("Plastic Sheets") if match: if not dt_find(4200, cellList=match, column=5, first=True): assert 0, "Unable to find 4200 Plastic Sheets" else: assert 0, "Unable to find any Plastic Sheets" """ config = current.test_config browser = config.browser def find_match(search, tableID, r, c): td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, r, c) try: elem = browser.find_element_by_xpath(td) text = elem.text if isinstance(search,(int, float)): text = convert_repr_number(text) if text == search: return (r, c) except: return False result = [] if cellList: for cell in cellList: if row: r = row else: r = cell[0] if column: c = column else: c = cell[1] found = find_match(search, tableID, r, c) if found: result.append(found) if first: return result else: # Calculate the rows that need to be navigated along to find the search string colList = [] rowList = [] if row == None: r = 1 while True: tr = ".//*[@id='%s']/tbody/tr[%s]" % (tableID, r) try: elem = browser.find_element_by_xpath(tr) rowList.append(r) r += 1 except: break elif isinstance(row, int): rowList = [row] else: rowList = row # Calculate the columns that need to be navigated down to find the search string if column == None: c = 1 while True: td = ".//*[@id='%s']/tbody/tr[1]/td[%s]" % (tableID, c) try: elem = browser.find_element_by_xpath(td) colList.append(c) c += 1 except: break elif isinstance(column, int): colList = [column] else: colList = column # Now try and find a match for r in rowList: for c in colList: found = find_match(search, tableID, r, c) if found: result.append(found) if first: return result return result # ----------------------------------------------------------------------------- def dt_links(reporter, row = 1, tableID = "datatable", quiet = True ): """ Returns a list of links in the given row of the dataTable """ config = current.test_config browser = config.browser links = [] # loop through each column column = 1 while True: td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, row, column) try: elem = browser.find_element_by_xpath(td) except: break # loop through looking for links in the cell cnt = 1 while True: link = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a[%s]" % (tableID, row, column, cnt) try: elem = browser.find_element_by_xpath(link) except: break cnt += 1 if not quiet: reporter("%2d) %s" % (column, elem.text)) links.append([column,elem.text]) column += 1 return links # ----------------------------------------------------------------------------- def dt_action(row = 1, action = None, column = 1, tableID = "datatable", ): """ click the action button in the dataTable """ config = current.test_config browser = config.browser # What looks like a fairly fragile xpath, but it should work unless DataTable changes if action: button = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a[contains(text(),'%s')]" % (tableID, row, column, action) else: button = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a" % (tableID, row, column) giveup = 0.0 sleeptime = 0.2 while giveup < 10.0: try: element = browser.find_element_by_xpath(button) url = element.get_attribute("href") if url: browser.get(url) return True except Exception as inst: print "%s with %s" % (type(inst), button) time.sleep(sleeptime) giveup += sleeptime return False # END =========================================================================
mit
pjg101/SickRage
lib/sqlalchemy/util/langhelpers.py
75
37513
# util/langhelpers.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to help with the creation, loading and introspection of modules, classes, hierarchies, attributes, functions, and methods. """ import itertools import inspect import operator import re import sys import types import warnings from functools import update_wrapper from .. import exc import hashlib from . import compat from . import _collections def md5_hex(x): if compat.py3k: x = x.encode('utf-8') m = hashlib.md5() m.update(x) return m.hexdigest() class safe_reraise(object): """Reraise an exception after invoking some handler code. Stores the existing exception info before invoking so that it is maintained across a potential coroutine context switch. e.g.:: try: sess.commit() except: with safe_reraise(): sess.rollback() """ def __enter__(self): self._exc_info = sys.exc_info() def __exit__(self, type_, value, traceback): # see #2703 for notes if type_ is None: exc_type, exc_value, exc_tb = self._exc_info self._exc_info = None # remove potential circular references compat.reraise(exc_type, exc_value, exc_tb) else: self._exc_info = None # remove potential circular references compat.reraise(type_, value, traceback) def decode_slice(slc): """decode a slice object as sent to __getitem__. takes into account the 2.5 __index__() method, basically. """ ret = [] for x in slc.start, slc.stop, slc.step: if hasattr(x, '__index__'): x = x.__index__() ret.append(x) return tuple(ret) def _unique_symbols(used, *bases): used = set(used) for base in bases: pool = itertools.chain((base,), compat.itertools_imap(lambda i: base + str(i), range(1000))) for sym in pool: if sym not in used: used.add(sym) yield sym break else: raise NameError("exhausted namespace for symbol base %s" % base) def decorator(target): """A signature-matching decorator factory.""" def decorate(fn): if not inspect.isfunction(fn): raise Exception("not a decoratable function") spec = compat.inspect_getfullargspec(fn) names = tuple(spec[0]) + spec[1:3] + (fn.__name__,) targ_name, fn_name = _unique_symbols(names, 'target', 'fn') metadata = dict(target=targ_name, fn=fn_name) metadata.update(format_argspec_plus(spec, grouped=False)) metadata['name'] = fn.__name__ code = """\ def %(name)s(%(args)s): return %(target)s(%(fn)s, %(apply_kw)s) """ % metadata decorated = _exec_code_in_env(code, {targ_name: target, fn_name: fn}, fn.__name__) decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ decorated.__wrapped__ = fn return update_wrapper(decorated, fn) return update_wrapper(decorate, target) def _exec_code_in_env(code, env, fn_name): exec(code, env) return env[fn_name] def public_factory(target, location): """Produce a wrapping function for the given cls or classmethod. Rationale here is so that the __init__ method of the class can serve as documentation for the function. """ if isinstance(target, type): fn = target.__init__ callable_ = target doc = "Construct a new :class:`.%s` object. \n\n"\ "This constructor is mirrored as a public API function; see :func:`~%s` "\ "for a full usage and argument description." % ( target.__name__, location, ) else: fn = callable_ = target doc = "This function is mirrored; see :func:`~%s` "\ "for a description of arguments." % location location_name = location.split(".")[-1] spec = compat.inspect_getfullargspec(fn) del spec[0][0] metadata = format_argspec_plus(spec, grouped=False) metadata['name'] = location_name code = """\ def %(name)s(%(args)s): return cls(%(apply_kw)s) """ % metadata env = {'cls': callable_, 'symbol': symbol} exec(code, env) decorated = env[location_name] decorated.__doc__ = fn.__doc__ if compat.py2k or hasattr(fn, '__func__'): fn.__func__.__doc__ = doc else: fn.__doc__ = doc return decorated class PluginLoader(object): def __init__(self, group, auto_fn=None): self.group = group self.impls = {} self.auto_fn = auto_fn def load(self, name): if name in self.impls: return self.impls[name]() if self.auto_fn: loader = self.auto_fn(name) if loader: self.impls[name] = loader return loader() try: import pkg_resources except ImportError: pass else: for impl in pkg_resources.iter_entry_points( self.group, name): self.impls[name] = impl.load return impl.load() raise exc.NoSuchModuleError( "Can't load plugin: %s:%s" % (self.group, name)) def register(self, name, modulepath, objname): def load(): mod = compat.import_(modulepath) for token in modulepath.split(".")[1:]: mod = getattr(mod, token) return getattr(mod, objname) self.impls[name] = load def get_cls_kwargs(cls, _set=None): """Return the full set of inherited kwargs for the given `cls`. Probes a class's __init__ method, collecting all named arguments. If the __init__ defines a \**kwargs catch-all, then the constructor is presumed to pass along unrecognized keywords to it's base classes, and the collection process is repeated recursively on each of the bases. Uses a subset of inspect.getargspec() to cut down on method overhead. No anonymous tuple arguments please ! """ toplevel = _set == None if toplevel: _set = set() ctr = cls.__dict__.get('__init__', False) has_init = ctr and isinstance(ctr, types.FunctionType) and \ isinstance(ctr.__code__, types.CodeType) if has_init: names, has_kw = inspect_func_args(ctr) _set.update(names) if not has_kw and not toplevel: return None if not has_init or has_kw: for c in cls.__bases__: if get_cls_kwargs(c, _set) is None: break _set.discard('self') return _set try: # TODO: who doesn't have this constant? from inspect import CO_VARKEYWORDS def inspect_func_args(fn): co = fn.__code__ nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) has_kw = bool(co.co_flags & CO_VARKEYWORDS) return args, has_kw except ImportError: def inspect_func_args(fn): names, _, has_kw, _ = inspect.getargspec(fn) return names, bool(has_kw) def get_func_kwargs(func): """Return the set of legal kwargs for the given `func`. Uses getargspec so is safe to call for methods, functions, etc. """ return compat.inspect_getargspec(func)[0] def get_callable_argspec(fn, no_self=False, _is_init=False): """Return the argument signature for any callable. All pure-Python callables are accepted, including functions, methods, classes, objects with __call__; builtins and other edge cases like functools.partial() objects raise a TypeError. """ if inspect.isbuiltin(fn): raise TypeError("Can't inspect builtin: %s" % fn) elif inspect.isfunction(fn): if _is_init and no_self: spec = compat.inspect_getargspec(fn) return compat.ArgSpec(spec.args[1:], spec.varargs, spec.keywords, spec.defaults) else: return compat.inspect_getargspec(fn) elif inspect.ismethod(fn): if no_self and (_is_init or fn.__self__): spec = compat.inspect_getargspec(fn.__func__) return compat.ArgSpec(spec.args[1:], spec.varargs, spec.keywords, spec.defaults) else: return compat.inspect_getargspec(fn.__func__) elif inspect.isclass(fn): return get_callable_argspec(fn.__init__, no_self=no_self, _is_init=True) elif hasattr(fn, '__func__'): return compat.inspect_getargspec(fn.__func__) elif hasattr(fn, '__call__'): if inspect.ismethod(fn.__call__): return get_callable_argspec(fn.__call__, no_self=no_self) else: raise TypeError("Can't inspect callable: %s" % fn) else: raise TypeError("Can't inspect callable: %s" % fn) def format_argspec_plus(fn, grouped=True): """Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'} """ if compat.callable(fn): spec = compat.inspect_getfullargspec(fn) else: # we accept an existing argspec... spec = fn args = inspect.formatargspec(*spec) if spec[0]: self_arg = spec[0][0] elif spec[1]: self_arg = '%s[0]' % spec[1] else: self_arg = None if compat.py3k: apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2], None, spec[4]) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) if spec[4]: num_defaults += len(spec[4]) name_args = spec[0] + spec[4] else: apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) name_args = spec[0] if num_defaults: defaulted_vals = name_args[0 - num_defaults:] else: defaulted_vals = () apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: '=' + x) if grouped: return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw) else: return dict(args=args[1:-1], self_arg=self_arg, apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) def format_argspec_init(method, grouped=True): """format_argspec_plus with considerations for typical __init__ methods Wraps format_argspec_plus with error handling strategies for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ if method is object.__init__: args = grouped and '(self)' or 'self' else: try: return format_argspec_plus(method, grouped=grouped) except TypeError: args = (grouped and '(self, *args, **kwargs)' or 'self, *args, **kwargs') return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) def getargspec_init(method): """inspect.getargspec with considerations for typical __init__ methods Wraps inspect.getargspec with error handling for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ try: return inspect.getargspec(method) except TypeError: if method is object.__init__: return (['self'], None, None, None) else: return (['self'], 'args', 'kwargs', None) def unbound_method_to_callable(func_or_cls): """Adjust the incoming callable such that a 'self' argument is not required. """ if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__: return func_or_cls.__func__ else: return func_or_cls def generic_repr(obj, additional_kw=(), to_inspect=None): """Produce a __repr__() based on direct association of the __init__() specification vs. same-named attributes present. """ if to_inspect is None: to_inspect = [obj] else: to_inspect = _collections.to_list(to_inspect) missing = object() pos_args = [] kw_args = _collections.OrderedDict() vargs = None for i, insp in enumerate(to_inspect): try: (_args, _vargs, vkw, defaults) = \ inspect.getargspec(insp.__init__) except TypeError: continue else: default_len = defaults and len(defaults) or 0 if i == 0: if _vargs: vargs = _vargs if default_len: pos_args.extend(_args[1:-default_len]) else: pos_args.extend(_args[1:]) else: kw_args.update([ (arg, missing) for arg in _args[1:-default_len] ]) if default_len: kw_args.update([ (arg, default) for arg, default in zip(_args[-default_len:], defaults) ]) output = [] output.extend(repr(getattr(obj, arg, None)) for arg in pos_args) if vargs is not None and hasattr(obj, vargs): output.extend([repr(val) for val in getattr(obj, vargs)]) for arg, defval in kw_args.items(): try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append('%s=%r' % (arg, val)) except: pass if additional_kw: for arg, defval in additional_kw: try: val = getattr(obj, arg, missing) if val is not missing and val != defval: output.append('%s=%r' % (arg, val)) except: pass return "%s(%s)" % (obj.__class__.__name__, ", ".join(output)) class portable_instancemethod(object): """Turn an instancemethod into a (parent, name) pair to produce a serializable callable. """ def __init__(self, meth): self.target = meth.__self__ self.name = meth.__name__ def __call__(self, *arg, **kw): return getattr(self.target, self.name)(*arg, **kw) def class_hierarchy(cls): """Return an unordered sequence of all classes related to cls. Traverses diamond hierarchies. Fibs slightly: subclasses of builtin types are not returned. Thus class_hierarchy(class A(object)) returns (A, object), not A plus every class systemwide that derives from object. Old-style classes are discarded and hierarchies rooted on them will not be descended. """ if compat.py2k: if isinstance(cls, types.ClassType): return list() hier = set([cls]) process = list(cls.__mro__) while process: c = process.pop() if compat.py2k: if isinstance(c, types.ClassType): continue bases = (_ for _ in c.__bases__ if _ not in hier and not isinstance(_, types.ClassType)) else: bases = (_ for _ in c.__bases__ if _ not in hier) for b in bases: process.append(b) hier.add(b) if compat.py3k: if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): continue else: if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'): continue for s in [_ for _ in c.__subclasses__() if _ not in hier]: process.append(s) hier.add(s) return list(hier) def iterate_attributes(cls): """iterate all the keys and attributes associated with a class, without using getattr(). Does not use getattr() so that class-sensitive descriptors (i.e. property.__get__()) are not called. """ keys = dir(cls) for key in keys: for c in cls.__mro__: if key in c.__dict__: yield (key, c.__dict__[key]) break def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, name='self.proxy', from_instance=None): """Automates delegation of __specials__ for a proxying type.""" if only: dunders = only else: if skip is None: skip = ('__slots__', '__del__', '__getattribute__', '__metaclass__', '__getstate__', '__setstate__') dunders = [m for m in dir(from_cls) if (m.startswith('__') and m.endswith('__') and not hasattr(into_cls, m) and m not in skip)] for method in dunders: try: fn = getattr(from_cls, method) if not hasattr(fn, '__call__'): continue fn = getattr(fn, 'im_func', fn) except AttributeError: continue try: spec = inspect.getargspec(fn) fn_args = inspect.formatargspec(spec[0]) d_args = inspect.formatargspec(spec[0][1:]) except TypeError: fn_args = '(self, *args, **kw)' d_args = '(*args, **kw)' py = ("def %(method)s%(fn_args)s: " "return %(name)s.%(method)s%(d_args)s" % locals()) env = from_instance is not None and {name: from_instance} or {} compat.exec_(py, env) try: env[method].__defaults__ = fn.__defaults__ except AttributeError: pass setattr(into_cls, method, env[method]) def methods_equivalent(meth1, meth2): """Return True if the two methods are the same implementation.""" return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2) def as_interface(obj, cls=None, methods=None, required=None): """Ensure basic interface compliance for an instance or dict of callables. Checks that ``obj`` implements public methods of ``cls`` or has members listed in ``methods``. If ``required`` is not supplied, implementing at least one interface method is sufficient. Methods present on ``obj`` that are not in the interface are ignored. If ``obj`` is a dict and ``dict`` does not meet the interface requirements, the keys of the dictionary are inspected. Keys present in ``obj`` that are not in the interface will raise TypeErrors. Raises TypeError if ``obj`` does not meet the interface criteria. In all passing cases, an object with callable members is returned. In the simple case, ``obj`` is returned as-is; if dict processing kicks in then an anonymous class is returned. obj A type, instance, or dictionary of callables. cls Optional, a type. All public methods of cls are considered the interface. An ``obj`` instance of cls will always pass, ignoring ``required``.. methods Optional, a sequence of method names to consider as the interface. required Optional, a sequence of mandatory implementations. If omitted, an ``obj`` that provides at least one interface method is considered sufficient. As a convenience, required may be a type, in which case all public methods of the type are required. """ if not cls and not methods: raise TypeError('a class or collection of method names are required') if isinstance(cls, type) and isinstance(obj, cls): return obj interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) implemented = set(dir(obj)) complies = operator.ge if isinstance(required, type): required = interface elif not required: required = set() complies = operator.gt else: required = set(required) if complies(implemented.intersection(interface), required): return obj # No dict duck typing here. if not type(obj) is dict: qualifier = complies is operator.gt and 'any of' or 'all of' raise TypeError("%r does not implement %s: %s" % ( obj, qualifier, ', '.join(interface))) class AnonymousInterface(object): """A callable-holding shell.""" if cls: AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ found = set() for method, impl in dictlike_iteritems(obj): if method not in interface: raise TypeError("%r: unknown in this interface" % method) if not compat.callable(impl): raise TypeError("%r=%r is not callable" % (method, impl)) setattr(AnonymousInterface, method, staticmethod(impl)) found.add(method) if complies(found, required): return AnonymousInterface raise TypeError("dictionary does not contain required keys %s" % ', '.join(required - found)) class memoized_property(object): """A read-only @property that is only evaluated once.""" def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self obj.__dict__[self.__name__] = result = self.fget(obj) return result def _reset(self, obj): memoized_property.reset(obj, self.__name__) @classmethod def reset(cls, obj, name): obj.__dict__.pop(name, None) class memoized_instancemethod(object): """Decorate a method memoize its return value. Best applied to no-arg methods: memoization is not sensitive to argument values, and will always return the same value even when called with different arguments. """ def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self def oneshot(*args, **kw): result = self.fget(obj, *args, **kw) memo = lambda *a, **kw: result memo.__name__ = self.__name__ memo.__doc__ = self.__doc__ obj.__dict__[self.__name__] = memo return result oneshot.__name__ = self.__name__ oneshot.__doc__ = self.__doc__ return oneshot class group_expirable_memoized_property(object): """A family of @memoized_properties that can be expired in tandem.""" def __init__(self, attributes=()): self.attributes = [] if attributes: self.attributes.extend(attributes) def expire_instance(self, instance): """Expire all memoized properties for *instance*.""" stash = instance.__dict__ for attribute in self.attributes: stash.pop(attribute, None) def __call__(self, fn): self.attributes.append(fn.__name__) return memoized_property(fn) def method(self, fn): self.attributes.append(fn.__name__) return memoized_instancemethod(fn) def dependency_for(modulename): def decorate(obj): # TODO: would be nice to improve on this import silliness, # unfortunately importlib doesn't work that great either tokens = modulename.split(".") mod = compat.import_(".".join(tokens[0:-1]), globals(), locals(), tokens[-1]) mod = getattr(mod, tokens[-1]) setattr(mod, obj.__name__, obj) return obj return decorate class dependencies(object): """Apply imported dependencies as arguments to a function. E.g.:: @util.dependencies( "sqlalchemy.sql.widget", "sqlalchemy.engine.default" ); def some_func(self, widget, default, arg1, arg2, **kw): # ... Rationale is so that the impact of a dependency cycle can be associated directly with the few functions that cause the cycle, and not pollute the module-level namespace. """ def __init__(self, *deps): self.import_deps = [] for dep in deps: tokens = dep.split(".") self.import_deps.append( dependencies._importlater( ".".join(tokens[0:-1]), tokens[-1] ) ) def __call__(self, fn): import_deps = self.import_deps spec = compat.inspect_getfullargspec(fn) spec_zero = list(spec[0]) hasself = spec_zero[0] in ('self', 'cls') for i in range(len(import_deps)): spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i inner_spec = format_argspec_plus(spec, grouped=False) for impname in import_deps: del spec_zero[1 if hasself else 0] spec[0][:] = spec_zero outer_spec = format_argspec_plus(spec, grouped=False) code = 'lambda %(args)s: fn(%(apply_kw)s)' % { "args": outer_spec['args'], "apply_kw": inner_spec['apply_kw'] } decorated = eval(code, locals()) decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ return update_wrapper(decorated, fn) @classmethod def resolve_all(cls, path): for m in list(dependencies._unresolved): if m._full_path.startswith(path): m._resolve() _unresolved = set() _by_key = {} class _importlater(object): _unresolved = set() _by_key = {} def __new__(cls, path, addtl): key = path + "." + addtl if key in dependencies._by_key: return dependencies._by_key[key] else: dependencies._by_key[key] = imp = object.__new__(cls) return imp def __init__(self, path, addtl): self._il_path = path self._il_addtl = addtl dependencies._unresolved.add(self) @property def _full_path(self): return self._il_path + "." + self._il_addtl @memoized_property def module(self): if self in dependencies._unresolved: raise ImportError( "importlater.resolve_all() hasn't " "been called (this is %s %s)" % (self._il_path, self._il_addtl)) return getattr(self._initial_import, self._il_addtl) def _resolve(self): dependencies._unresolved.discard(self) self._initial_import = compat.import_( self._il_path, globals(), locals(), [self._il_addtl]) def __getattr__(self, key): if key == 'module': raise ImportError("Could not resolve module %s" % self._full_path) try: attr = getattr(self.module, key) except AttributeError: raise AttributeError( "Module %s has no attribute '%s'" % (self._full_path, key) ) self.__dict__[key] = attr return attr # from paste.deploy.converters def asbool(obj): if isinstance(obj, compat.string_types): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError("String is not true/false: %r" % obj) return bool(obj) def bool_or_str(*text): """Return a callable that will evaulate a string as boolean, or one of a set of "alternate" string values. """ def bool_or_value(obj): if obj in text: return obj else: return asbool(obj) return bool_or_value def asint(value): """Coerce to integer.""" if value is None: return value return int(value) def coerce_kw_type(kw, key, type_, flexi_bool=True): """If 'key' is present in dict 'kw', coerce its value to type 'type\_' if necessary. If 'flexi_bool' is True, the string '0' is considered false when coercing to boolean. """ if key in kw and type(kw[key]) is not type_ and kw[key] is not None: if type_ is bool and flexi_bool: kw[key] = asbool(kw[key]) else: kw[key] = type_(kw[key]) def constructor_copy(obj, cls, **kw): """Instantiate cls using the __dict__ of obj as constructor arguments. Uses inspect to match the named arguments of ``cls``. """ names = get_cls_kwargs(cls) kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__) return cls(**kw) def counter(): """Return a threadsafe counter function.""" lock = compat.threading.Lock() counter = itertools.count(1) # avoid the 2to3 "next" transformation... def _next(): lock.acquire() try: return next(counter) finally: lock.release() return _next def duck_type_collection(specimen, default=None): """Given an instance or class, guess if it is or is acting as one of the basic collection types: list, set and dict. If the __emulates__ property is present, return that preferentially. """ if hasattr(specimen, '__emulates__'): # canonicalize set vs sets.Set to a standard: the builtin set if (specimen.__emulates__ is not None and issubclass(specimen.__emulates__, set)): return set else: return specimen.__emulates__ isa = isinstance(specimen, type) and issubclass or isinstance if isa(specimen, list): return list elif isa(specimen, set): return set elif isa(specimen, dict): return dict if hasattr(specimen, 'append'): return list elif hasattr(specimen, 'add'): return set elif hasattr(specimen, 'set'): return dict else: return default def assert_arg_type(arg, argtype, name): if isinstance(arg, argtype): return arg else: if isinstance(argtype, tuple): raise exc.ArgumentError( "Argument '%s' is expected to be one of type %s, got '%s'" % (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) else: raise exc.ArgumentError( "Argument '%s' is expected to be of type '%s', got '%s'" % (name, argtype, type(arg))) def dictlike_iteritems(dictlike): """Return a (key, value) iterator for almost any dict-like object.""" if compat.py3k: if hasattr(dictlike, 'items'): return list(dictlike.items()) else: if hasattr(dictlike, 'iteritems'): return dictlike.iteritems() elif hasattr(dictlike, 'items'): return iter(dictlike.items()) getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) if getter is None: raise TypeError( "Object '%r' is not dict-like" % dictlike) if hasattr(dictlike, 'iterkeys'): def iterator(): for key in dictlike.iterkeys(): yield key, getter(key) return iterator() elif hasattr(dictlike, 'keys'): return iter((key, getter(key)) for key in dictlike.keys()) else: raise TypeError( "Object '%r' is not dict-like" % dictlike) class classproperty(property): """A decorator that behaves like @property except that operates on classes rather than instances. The decorator is currently special when using the declarative module, but note that the :class:`~.sqlalchemy.ext.declarative.declared_attr` decorator should be used for this purpose with declarative. """ def __init__(self, fget, *arg, **kw): super(classproperty, self).__init__(fget, *arg, **kw) self.__doc__ = fget.__doc__ def __get__(desc, self, cls): return desc.fget(cls) class hybridmethod(object): """Decorate a function as cls- or instance- level.""" def __init__(self, func, expr=None): self.func = func def __get__(self, instance, owner): if instance is None: return self.func.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) class _symbol(int): def __new__(self, name, doc=None, canonical=None): """Construct a new named symbol.""" assert isinstance(name, compat.string_types) if canonical is None: canonical = hash(name) v = int.__new__(_symbol, canonical) v.name = name if doc: v.__doc__ = doc return v def __reduce__(self): return symbol, (self.name, "x", int(self)) def __str__(self): return repr(self) def __repr__(self): return "symbol(%r)" % self.name _symbol.__name__ = 'symbol' class symbol(object): """A constant symbol. >>> symbol('foo') is symbol('foo') True >>> symbol('foo') <symbol 'foo> A slight refinement of the MAGICCOOKIE=object() pattern. The primary advantage of symbol() is its repr(). They are also singletons. Repeated calls of symbol('name') will all return the same instance. The optional ``doc`` argument assigns to ``__doc__``. This is strictly so that Sphinx autoattr picks up the docstring we want (it doesn't appear to pick up the in-module docstring if the datamember is in a different module - autoattribute also blows up completely). If Sphinx fixes/improves this then we would no longer need ``doc`` here. """ symbols = {} _lock = compat.threading.Lock() def __new__(cls, name, doc=None, canonical=None): cls._lock.acquire() try: sym = cls.symbols.get(name) if sym is None: cls.symbols[name] = sym = _symbol(name, doc, canonical) return sym finally: symbol._lock.release() _creation_order = 1 def set_creation_order(instance): """Assign a '_creation_order' sequence to the given instance. This allows multiple instances to be sorted in order of creation (typically within a single thread; the counter is not particularly threadsafe). """ global _creation_order instance._creation_order = _creation_order _creation_order += 1 def warn_exception(func, *args, **kwargs): """executes the given function, catches all exceptions and converts to a warning. """ try: return func(*args, **kwargs) except: warn("%s('%s') ignored" % sys.exc_info()[0:2]) def warn(msg, stacklevel=3): """Issue a warning. If msg is a string, :class:`.exc.SAWarning` is used as the category. .. note:: This function is swapped out when the test suite runs, with a compatible version that uses warnings.warn_explicit, so that the warnings registry can be controlled. """ if isinstance(msg, compat.string_types): warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel) else: warnings.warn(msg, stacklevel=stacklevel) def only_once(fn): """Decorate the given function to be a no-op after it is called exactly once.""" once = [fn] def go(*arg, **kw): if once: once_fn = once.pop() return once_fn(*arg, **kw) return go _SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') _UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): """Chop extraneous lines off beginning and end of a traceback. :param tb: a list of traceback lines as returned by ``traceback.format_stack()`` :param exclude_prefix: a regular expression object matching lines to skip at beginning of ``tb`` :param exclude_suffix: a regular expression object matching lines to skip at end of ``tb`` """ start = 0 end = len(tb) - 1 while start <= end and exclude_prefix.search(tb[start]): start += 1 while start <= end and exclude_suffix.search(tb[end]): end -= 1 return tb[start:end + 1] NoneType = type(None)
gpl-3.0
DataDog/integrations-core
datadog_checks_dev/datadog_checks/dev/tooling/commands/env/check.py
1
3151
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import json import click from ....fs import read_file from ...e2e import create_interface, get_configured_envs from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success @click.command('check', context_settings=CONTEXT_SETTINGS, short_help='Run an Agent check') @click.argument('check') @click.argument('env', required=False) @click.option( '--rate', '-r', is_flag=True, help='Compute rates by running the check twice with a pause between each run' ) @click.option('--times', '-t', type=click.INT, help='Number of times to run the check') @click.option('--pause', type=click.INT, help='Number of milliseconds to pause between multiple check runs') @click.option( '--delay', '-d', type=click.INT, help='Delay in milliseconds between running the check and grabbing what was collected', ) @click.option('--log-level', '-l', help='Set the log level (default `off`)') @click.option('--json', 'as_json', is_flag=True, help='Format the aggregator and check runner output as JSON') @click.option('--table', 'as_table', is_flag=True, help='Format the aggregator and check runner output as tabular') @click.option( '--breakpoint', '-b', 'break_point', type=click.INT, help='Line number to start a PDB session (0: first line, -1: last line)', ) @click.option('--config', 'config_file', help='Path to a JSON check configuration to use') @click.option('--jmx-list', 'jmx_list', help='JMX metrics listing method') def check_run(check, env, rate, times, pause, delay, log_level, as_json, as_table, break_point, config_file, jmx_list): """Run an Agent check.""" envs = get_configured_envs(check) if not envs: echo_failure(f'No active environments found for `{check}`.') echo_info(f'See what is available to start via `ddev env ls {check}`.') abort() if not env: if len(envs) > 1: echo_failure(f'Multiple active environments found for `{check}`, please specify one.') echo_info('See what is active via `ddev env ls`.') abort() env = envs[0] if env not in envs: echo_failure(f'`{env}` is not an active environment.') echo_info('See what is active via `ddev env ls`.') abort() environment = create_interface(check, env) check_args = dict( rate=rate, times=times, pause=pause, delay=delay, log_level=log_level, as_json=as_json, as_table=as_table, break_point=break_point, jmx_list=jmx_list, ) if config_file: config = json.loads(read_file(config_file)) with environment.use_config(config): environment.run_check(**check_args) else: environment.run_check(**check_args) if not rate and not as_json: echo_success('Note: ', nl=False) echo_info( 'If some metrics are missing, you may want to try again with the -r / --rate flag ' 'for a classic integration.' )
bsd-3-clause