code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# No shebang line, this module is meant to be imported # # Copyright 2013 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Functions ========= Contains core functions and data for use by :mod:`pyfarm.models` """ from uuid import UUID from datetime import datetime from textwrap import dedent from pyfarm.core.enums import STRING_TYPES from pyfarm.master.application import db from pyfarm.master.config import config from pyfarm.models.core.types import ( id_column, IDTypeWork, IPAddress, WorkStateEnum) DEFAULT_PRIORITY = config.get("queue_default_priority") def modelfor(model, table): """ Returns True if the given `model` object is for the expected `table`. >>> from pyfarm.master.config import config >>> from pyfarm.models.agent import Agent >>> modelfor(Agent("foo", "10.56.0.0", "255.0.0.0"), config.get("table_agent")) True """ try: return model.__tablename__ == table except AttributeError: return False def getuuid(value, table, table_attrib, error_tail): """ Returns the proper value for the given input. Depending on the type being provided this will return one of the following: * None * the value from an attribute * string from a UUID * the original value (after validating it's a UUID) :arg string value: the value to validate and returning data from :arg string table: the table which the provided `value` belongs to :arg string table_attrib: the attribute to use when attempting to pull data off of a model object :arg string error_tail: added to the end of error messages :arg str error_text: error text to render in the event of problems :exception ValueError: raised when the provided input is invalid, blank, or otherwise unexpected """ if value is None: return value elif modelfor(value, table): value = getattr(value, table_attrib, None) if value is None: raise ValueError("null id provided for %s" % error_tail) return value # if a string was provided then we should # try to convert it into a uuid first to # be sure it's valid elif isinstance(value, STRING_TYPES): UUID(value) return value elif isinstance(value, UUID): return str(value) else: raise ValueError("failed to determine %s" % error_tail) def work_columns(state_default, priority_default): """ Produces some default columns which are used by models which produce work. """ return ( # id id_column(IDTypeWork), # state db.Column(WorkStateEnum, default=state_default, doc=dedent(""" The state of the job with a value provided by :class:`.WorkState`""")), # priority db.Column(db.Integer, default=DEFAULT_PRIORITY, doc=dedent(""" The priority of the job relative to others in the queue. This is not the same as task priority. **configured by**: `%s`""" % priority_default)), # time_submitted db.Column(db.DateTime, default=datetime.utcnow, doc=dedent(""" The time the job was submitted. By default this defaults to using :meth:`datetime.datetime.utcnow` as the source of submission time. This value will not be set more than once and will not change even after a job is requeued.""")), # time_started db.Column(db.DateTime, doc=dedent(""" The time this job was started. By default this value is set when :attr:`state` is changed to an appropriate value or when a job is requeued.""")), # time_finished db.Column(db.DateTime, doc=dedent(""" Time the job was finished. This will be set when the last task finishes and reset if a job is requeued.""")) ) def split_and_extend(items): """ Takes a list of input elements and splits them before producing an extended set. **Example** >>> split_and_extend(["root.admin", "admin"]) set(['admin', 'root.admin', 'root']) """ if not items: return items output = set() for item in items: current = [] for split_item in item.split("."): current = current + [split_item] output.add(".".join(current)) return output def repr_ip(value): """properly formats an :class:`.IPAddress` object""" if isinstance(value, IPAddress): value = value.format() return repr(value) def repr_enum(value, enum=None): """produces the string representation of an enum value""" assert enum is not None, "`enum` required" for key, value in enum._asdict().iteritems(): if value == value: return repr(key) raise KeyError( "%s does not map to a key in %s" % (repr(value), enum.__class__))
unknown
codeparrot/codeparrot-clean
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys import time from eventlet import event from eventlet import greenthread from nova.openstack.common._i18n import _LE, _LW LOG = logging.getLogger(__name__) # NOTE(zyluo): This lambda function was declared to avoid mocking collisions # with time.time() called in the standard logging module # during unittests. _ts = lambda: time.time() class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCallBase. The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue class LoopingCallBase(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False self.done = None def stop(self): self._running = False def wait(self): return self.done.wait() class FixedIntervalLoopingCall(LoopingCallBase): """A fixed interval looping call.""" def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warning(_LW('task %(func_name)r run outlasted ' 'interval by %(delay).2f sec'), {'func_name': self.f, 'delay': delay}) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ def start(self, initial_delay=None, periodic_interval_max=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug('Dynamic looping call %(func_name)r sleeping ' 'for %(idle).02f seconds', {'func_name': self.f, 'idle': idle}) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # Copyright: (c) 2018, Kevin Breit <kevin.breit@kevinbreit.net> # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from ansible.module_utils.basic import AnsibleModule, json, env_fallback from ansible.module_utils.urls import fetch_url from ansible.module_utils._text import to_native, to_bytes, to_text def meraki_argument_spec(): return dict(auth_key=dict(type='str', no_log=True, fallback=(env_fallback, ['MERAKI_KEY'])), host=dict(type='str', default='api.meraki.com'), use_proxy=dict(type='bool', default=False), use_https=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), output_level=dict(type='str', default='normal', choices=['normal', 'debug']), timeout=dict(type='int', default=30), org_name=dict(type='str', aliases=['organization']), org_id=dict(type='str'), ) class MerakiModule(object): def __init__(self, module, function=None): self.module = module self.params = module.params self.result = dict(changed=False) self.headers = dict() self.function = function self.orgs = None self.nets = None self.org_id = None self.net_id = None # normal output self.existing = None # info output self.config = dict() self.original = None self.proposed = dict() self.merged = None # debug output self.filter_string = '' self.method = None self.path = None self.response = None self.status = None self.url = None # If URLs need to be modified or added for specific purposes, use .update() on the url_catalog dictionary self.get_urls = {'organizations': '/organizations', 'network': '/organizations/{org_id}/networks', 'admins': '/organizations/{org_id}/admins', 'configTemplates': '/organizations/{org_id}/configTemplates', 'samlRoles': '/organizations/{org_id}/samlRoles', 'ssids': '/networks/{net_id}/ssids', 'groupPolicies': '/networks/{net_id}/groupPolicies', 'staticRoutes': '/networks/{net_id}/staticRoutes', 'vlans': '/networks/{net_id}/vlans', 'devices': '/networks/{net_id}/devices', } # Used to retrieve only one item self.get_one_urls = {'organizations': '/organizations/{org_id}', 'network': '/networks/{net_id}', } # Module should add URLs which are required by the module self.url_catalog = {'get_all': self.get_urls, 'get_one': self.get_one_urls, 'create': None, 'update': None, 'delete': None, 'misc': None, } if self.module._debug or self.params['output_level'] == 'debug': self.module.warn('Enable debug output because ANSIBLE_DEBUG was set or output_level is set to debug.') # TODO: This should be removed as org_name isn't always required self.module.required_if = [('state', 'present', ['org_name']), ('state', 'absent', ['org_name']), ] # self.module.mutually_exclusive = [('org_id', 'org_name'), # ] self.modifiable_methods = ['POST', 'PUT', 'DELETE'] self.headers = {'Content-Type': 'application/json', 'X-Cisco-Meraki-API-Key': module.params['auth_key'], } def define_protocol(self): """Set protocol based on use_https parameters.""" if self.params['use_https'] is True: self.params['protocol'] = 'https' else: self.params['protocol'] = 'http' def is_update_required(self, original, proposed, optional_ignore=None): """Compare original and proposed data to see if an update is needed.""" is_changed = False ignored_keys = ('id', 'organizationId') if not optional_ignore: optional_ignore = ('') # for k, v in original.items(): # try: # if k not in ignored_keys and k not in optional_ignore: # if v != proposed[k]: # is_changed = True # except KeyError: # if v != '': # is_changed = True for k, v in proposed.items(): try: if k not in ignored_keys and k not in optional_ignore: if v != original[k]: is_changed = True except KeyError: if v != '': is_changed = True return is_changed def get_orgs(self): """Downloads all organizations for a user.""" response = self.request('/organizations', method='GET') if self.status != 200: self.fail_json(msg='Organization lookup failed') self.orgs = response return self.orgs def is_org_valid(self, data, org_name=None, org_id=None): """Checks whether a specific org exists and is duplicated. If 0, doesn't exist. 1, exists and not duplicated. >1 duplicated. """ org_count = 0 if org_name is not None: for o in data: if o['name'] == org_name: org_count += 1 if org_id is not None: for o in data: if o['id'] == org_id: org_count += 1 return org_count def get_org_id(self, org_name): """Returns an organization id based on organization name, only if unique. If org_id is specified as parameter, return that instead of a lookup. """ orgs = self.get_orgs() # self.fail_json(msg='ogs', orgs=orgs) if self.params['org_id'] is not None: if self.is_org_valid(orgs, org_id=self.params['org_id']) is True: return self.params['org_id'] org_count = self.is_org_valid(orgs, org_name=org_name) if org_count == 0: self.fail_json(msg='There are no organizations with the name {org_name}'.format(org_name=org_name)) if org_count > 1: self.fail_json(msg='There are multiple organizations with the name {org_name}'.format(org_name=org_name)) elif org_count == 1: for i in orgs: if org_name == i['name']: # self.fail_json(msg=i['id']) return str(i['id']) def get_nets(self, org_name=None, org_id=None): """Downloads all networks in an organization.""" if org_name: org_id = self.get_org_id(org_name) path = self.construct_path('get_all', org_id=org_id, function='network') r = self.request(path, method='GET') if self.status != 200: self.fail_json(msg='Network lookup failed') self.nets = r templates = self.get_config_templates(org_id) for t in templates: self.nets.append(t) return self.nets # def get_net(self, org_name, net_name, data=None): # path = self.construct_path('get_all', function='network', org_id=org_id) # r = self.request(path, method='GET') # return r def get_net(self, org_name, net_name, org_id=None, data=None): ''' Return network information ''' if not data: if not org_id: org_id = self.get_org_id(org_name) data = self.get_nets(org_id=org_id) for n in data: if n['name'] == net_name: return n return False def get_net_id(self, org_name=None, net_name=None, data=None): """Return network id from lookup or existing data.""" if data is None: self.fail_json(msg='Must implement lookup') for n in data: if n['name'] == net_name: return n['id'] self.fail_json(msg='No network found with the name {0}'.format(net_name)) def get_config_templates(self, org_id): path = self.construct_path('get_all', function='configTemplates', org_id=org_id) response = self.request(path, 'GET') if self.status != 200: self.fail_json(msg='Unable to get configuration templates') return response def get_template_id(self, name, data): for template in data: if name == template['name']: return template['id'] self.fail_json(msg='No configuration template named {0} found'.format(name)) def construct_path(self, action, function=None, org_id=None, net_id=None, org_name=None, custom=None): """Build a path from the URL catalog. Uses function property from class for catalog lookup. """ built_path = None if function is None: built_path = self.url_catalog[action][self.function] else: built_path = self.url_catalog[action][function] if org_name: org_id = self.get_org_id(org_name) if custom: built_path = built_path.format(org_id=org_id, net_id=net_id, **custom) else: built_path = built_path.format(org_id=org_id, net_id=net_id) return built_path def request(self, path, method=None, payload=None): """Generic HTTP method for Meraki requests.""" self.path = path self.define_protocol() if method is not None: self.method = method self.url = '{protocol}://{host}/api/v0/{path}'.format(path=self.path.lstrip('/'), **self.params) resp, info = fetch_url(self.module, self.url, headers=self.headers, data=payload, method=self.method, timeout=self.params['timeout'], use_proxy=self.params['use_proxy'], ) self.response = info['msg'] self.status = info['status'] if self.status >= 500: self.fail_json(msg='Request failed for {url}: {status} - {msg}'.format(**info)) elif self.status >= 300: self.fail_json(msg='Request failed for {url}: {status} - {msg}'.format(**info), body=json.loads(to_native(info['body']))) try: return json.loads(to_native(resp.read())) except Exception: pass def exit_json(self, **kwargs): """Custom written method to exit from module.""" self.result['response'] = self.response self.result['status'] = self.status # Return the gory details when we need it if self.params['output_level'] == 'debug': self.result['method'] = self.method self.result['url'] = self.url self.result.update(**kwargs) self.module.exit_json(**self.result) def fail_json(self, msg, **kwargs): """Custom written method to return info on failure.""" self.result['response'] = self.response self.result['status'] = self.status if self.params['output_level'] == 'debug': if self.url is not None: self.result['method'] = self.method self.result['url'] = self.url self.result.update(**kwargs) self.module.fail_json(msg=msg, **self.result)
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals from functools import partial from tastypie import fields from tastypie.resources import Resource from tastypie.exceptions import ApiFieldError from django.db import models from django.core.exceptions import ObjectDoesNotExist from .resources import GenericResource class GenericForeignKeyField(fields.ToOneField): """ Provides access to GenericForeignKey objects from the django content_types framework. """ def __init__(self, to, attribute, **kwargs): if not isinstance(to, dict): raise ValueError('to field must be a dictionary in GenericForeignKeyField') if len(to) <= 0: raise ValueError('to field must have some values') for k, v in to.items(): if not issubclass(k, models.Model) or not issubclass(v, Resource): raise ValueError('to field must map django models to tastypie resources') super(GenericForeignKeyField, self).__init__(to, attribute, **kwargs) def get_related_resource(self, related_instance): self._to_class = self.to.get(type(related_instance), None) if self._to_class is None: raise TypeError('no resource for model %s' % type(related_instance)) return super(GenericForeignKeyField, self).get_related_resource(related_instance) @property def to_class(self): if self._to_class and not issubclass(GenericResource, self._to_class): return self._to_class return partial(GenericResource, resources=self.to.values()) def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None): try: obj = fk_resource.get_via_uri(uri, request=request) fk_resource = self.get_related_resource(obj) return super(GenericForeignKeyField, self).resource_from_uri(fk_resource, uri, request, related_obj, related_name) except ObjectDoesNotExist: raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri) def build_related_resource(self, *args, **kwargs): self._to_class = None return super(GenericForeignKeyField, self).build_related_resource(*args, **kwargs)
unknown
codeparrot/codeparrot-clean
#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c9" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', } import sys, os try: from hashlib import md5 except ImportError: from md5 import md5 def _validate_md5(egg_name, data): if egg_name in md5_data: digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules def do_download(): egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg try: import pkg_resources except ImportError: return do_download() try: pkg_resources.require("setuptools>="+version); return except pkg_resources.VersionConflict, e: if was_imported: print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first, using 'easy_install -U setuptools'." "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return do_download() except pkg_resources.DistributionNotFound: return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:])
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2008 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.collect.testing; import static com.google.common.collect.testing.DerivedCollectionGenerators.keySetGenerator; import static com.google.common.collect.testing.Helpers.copyToSet; import com.google.common.annotations.GwtIncompatible; import com.google.common.collect.testing.DerivedCollectionGenerators.MapEntrySetGenerator; import com.google.common.collect.testing.DerivedCollectionGenerators.MapValueCollectionGenerator; import com.google.common.collect.testing.features.CollectionFeature; import com.google.common.collect.testing.features.CollectionSize; import com.google.common.collect.testing.features.Feature; import com.google.common.collect.testing.features.MapFeature; import com.google.common.collect.testing.testers.MapClearTester; import com.google.common.collect.testing.testers.MapContainsKeyTester; import com.google.common.collect.testing.testers.MapContainsValueTester; import com.google.common.collect.testing.testers.MapCreationTester; import com.google.common.collect.testing.testers.MapEntrySetTester; import com.google.common.collect.testing.testers.MapEqualsTester; import com.google.common.collect.testing.testers.MapGetTester; import com.google.common.collect.testing.testers.MapHashCodeTester; import com.google.common.collect.testing.testers.MapIsEmptyTester; import com.google.common.collect.testing.testers.MapPutAllTester; import com.google.common.collect.testing.testers.MapPutTester; import com.google.common.collect.testing.testers.MapRemoveTester; import com.google.common.collect.testing.testers.MapSerializationTester; import com.google.common.collect.testing.testers.MapSizeTester; import com.google.common.collect.testing.testers.MapToStringTester; import com.google.common.testing.SerializableTester; import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import junit.framework.TestSuite; /** * Creates, based on your criteria, a JUnit test suite that exhaustively tests a Map implementation. * * @author George van den Driessche */ @GwtIncompatible public class MapTestSuiteBuilder<K, V> extends PerCollectionSizeTestSuiteBuilder< MapTestSuiteBuilder<K, V>, TestMapGenerator<K, V>, Map<K, V>, Entry<K, V>> { public static <K, V> MapTestSuiteBuilder<K, V> using(TestMapGenerator<K, V> generator) { return new MapTestSuiteBuilder<K, V>().usingGenerator(generator); } @SuppressWarnings("rawtypes") // class literals @Override protected List<Class<? extends AbstractTester>> getTesters() { return Arrays.asList( MapClearTester.class, MapContainsKeyTester.class, MapContainsValueTester.class, MapCreationTester.class, MapEntrySetTester.class, MapEqualsTester.class, MapGetTester.class, MapHashCodeTester.class, MapIsEmptyTester.class, MapPutTester.class, MapPutAllTester.class, MapRemoveTester.class, MapSerializationTester.class, MapSizeTester.class, MapToStringTester.class); } @Override protected List<TestSuite> createDerivedSuites( FeatureSpecificTestSuiteBuilder< ?, ? extends OneSizeTestContainerGenerator<Map<K, V>, Entry<K, V>>> parentBuilder) { // TODO: Once invariant support is added, supply invariants to each of the // derived suites, to check that mutations to the derived collections are // reflected in the underlying map. List<TestSuite> derivedSuites = super.createDerivedSuites(parentBuilder); if (parentBuilder.getFeatures().contains(CollectionFeature.SERIALIZABLE)) { derivedSuites.add( MapTestSuiteBuilder.using( new ReserializedMapGenerator<K, V>(parentBuilder.getSubjectGenerator())) .withFeatures(computeReserializedMapFeatures(parentBuilder.getFeatures())) .named(parentBuilder.getName() + " reserialized") .suppressing(parentBuilder.getSuppressedTests()) .withSetUp(parentBuilder.getSetUp()) .withTearDown(parentBuilder.getTearDown()) .createTestSuite()); } derivedSuites.add( createDerivedEntrySetSuite( new MapEntrySetGenerator<K, V>(parentBuilder.getSubjectGenerator())) .withFeatures(computeEntrySetFeatures(parentBuilder.getFeatures())) .named(parentBuilder.getName() + " entrySet") .suppressing(parentBuilder.getSuppressedTests()) .withSetUp(parentBuilder.getSetUp()) .withTearDown(parentBuilder.getTearDown()) .createTestSuite()); derivedSuites.add( createDerivedKeySetSuite(keySetGenerator(parentBuilder.getSubjectGenerator())) .withFeatures(computeKeySetFeatures(parentBuilder.getFeatures())) .named(parentBuilder.getName() + " keys") .suppressing(parentBuilder.getSuppressedTests()) .withSetUp(parentBuilder.getSetUp()) .withTearDown(parentBuilder.getTearDown()) .createTestSuite()); derivedSuites.add( createDerivedValueCollectionSuite( new MapValueCollectionGenerator<K, V>(parentBuilder.getSubjectGenerator())) .named(parentBuilder.getName() + " values") .withFeatures(computeValuesCollectionFeatures(parentBuilder.getFeatures())) .suppressing(parentBuilder.getSuppressedTests()) .withSetUp(parentBuilder.getSetUp()) .withTearDown(parentBuilder.getTearDown()) .createTestSuite()); return derivedSuites; } protected SetTestSuiteBuilder<Entry<K, V>> createDerivedEntrySetSuite( TestSetGenerator<Entry<K, V>> entrySetGenerator) { return SetTestSuiteBuilder.using(entrySetGenerator); } protected SetTestSuiteBuilder<K> createDerivedKeySetSuite(TestSetGenerator<K> keySetGenerator) { return SetTestSuiteBuilder.using(keySetGenerator); } protected CollectionTestSuiteBuilder<V> createDerivedValueCollectionSuite( TestCollectionGenerator<V> valueCollectionGenerator) { return CollectionTestSuiteBuilder.using(valueCollectionGenerator); } private static Set<Feature<?>> computeReserializedMapFeatures(Set<Feature<?>> mapFeatures) { Set<Feature<?>> derivedFeatures = copyToSet(mapFeatures); derivedFeatures.remove(CollectionFeature.SERIALIZABLE); derivedFeatures.remove(CollectionFeature.SERIALIZABLE_INCLUDING_VIEWS); return derivedFeatures; } private static Set<Feature<?>> computeEntrySetFeatures(Set<Feature<?>> mapFeatures) { Set<Feature<?>> entrySetFeatures = computeCommonDerivedCollectionFeatures(mapFeatures); if (mapFeatures.contains(MapFeature.ALLOWS_NULL_ENTRY_QUERIES)) { entrySetFeatures.add(CollectionFeature.ALLOWS_NULL_QUERIES); } return entrySetFeatures; } private static Set<Feature<?>> computeKeySetFeatures(Set<Feature<?>> mapFeatures) { Set<Feature<?>> keySetFeatures = computeCommonDerivedCollectionFeatures(mapFeatures); // TODO(lowasser): make this trigger only if the map is a submap // currently, the KeySetGenerator won't work properly for a subset of a keyset of a submap keySetFeatures.add(CollectionFeature.SUBSET_VIEW); if (mapFeatures.contains(MapFeature.ALLOWS_NULL_KEYS)) { keySetFeatures.add(CollectionFeature.ALLOWS_NULL_VALUES); } else if (mapFeatures.contains(MapFeature.ALLOWS_NULL_KEY_QUERIES)) { keySetFeatures.add(CollectionFeature.ALLOWS_NULL_QUERIES); } return keySetFeatures; } private static Set<Feature<?>> computeValuesCollectionFeatures(Set<Feature<?>> mapFeatures) { Set<Feature<?>> valuesCollectionFeatures = computeCommonDerivedCollectionFeatures(mapFeatures); if (mapFeatures.contains(MapFeature.ALLOWS_NULL_VALUE_QUERIES)) { valuesCollectionFeatures.add(CollectionFeature.ALLOWS_NULL_QUERIES); } if (mapFeatures.contains(MapFeature.ALLOWS_NULL_VALUES)) { valuesCollectionFeatures.add(CollectionFeature.ALLOWS_NULL_VALUES); } return valuesCollectionFeatures; } public static Set<Feature<?>> computeCommonDerivedCollectionFeatures( Set<Feature<?>> mapFeatures) { mapFeatures = new HashSet<>(mapFeatures); Set<Feature<?>> derivedFeatures = new HashSet<>(); mapFeatures.remove(CollectionFeature.SERIALIZABLE); if (mapFeatures.remove(CollectionFeature.SERIALIZABLE_INCLUDING_VIEWS)) { derivedFeatures.add(CollectionFeature.SERIALIZABLE); } if (mapFeatures.contains(MapFeature.SUPPORTS_REMOVE)) { derivedFeatures.add(CollectionFeature.SUPPORTS_REMOVE); } if (mapFeatures.contains(MapFeature.REJECTS_DUPLICATES_AT_CREATION)) { derivedFeatures.add(CollectionFeature.REJECTS_DUPLICATES_AT_CREATION); } if (mapFeatures.contains(MapFeature.FAILS_FAST_ON_CONCURRENT_MODIFICATION)) { derivedFeatures.add(CollectionFeature.FAILS_FAST_ON_CONCURRENT_MODIFICATION); } // add the intersection of CollectionFeature.values() and mapFeatures for (CollectionFeature feature : CollectionFeature.values()) { if (mapFeatures.contains(feature)) { derivedFeatures.add(feature); } } // add the intersection of CollectionSize.values() and mapFeatures for (CollectionSize size : CollectionSize.values()) { if (mapFeatures.contains(size)) { derivedFeatures.add(size); } } return derivedFeatures; } private static final class ReserializedMapGenerator<K, V> implements TestMapGenerator<K, V> { private final OneSizeTestContainerGenerator<Map<K, V>, Entry<K, V>> mapGenerator; ReserializedMapGenerator(OneSizeTestContainerGenerator<Map<K, V>, Entry<K, V>> mapGenerator) { this.mapGenerator = mapGenerator; } @Override public SampleElements<Entry<K, V>> samples() { return mapGenerator.samples(); } @Override public Entry<K, V>[] createArray(int length) { return mapGenerator.createArray(length); } @Override public Iterable<Entry<K, V>> order(List<Entry<K, V>> insertionOrder) { return mapGenerator.order(insertionOrder); } @Override public Map<K, V> create(Object... elements) { return SerializableTester.reserialize(mapGenerator.create(elements)); } @Override public K[] createKeyArray(int length) { return ((TestMapGenerator<K, V>) mapGenerator.getInnerGenerator()).createKeyArray(length); } @Override public V[] createValueArray(int length) { return ((TestMapGenerator<K, V>) mapGenerator.getInnerGenerator()).createValueArray(length); } } }
java
github
https://github.com/google/guava
android/guava-testlib/src/com/google/common/collect/testing/MapTestSuiteBuilder.java
"""RFC-822 message manipulation class. XXX This is only a very rough sketch of a full RFC-822 parser; in particular the tokenizing of addresses does not adhere to all the quoting rules. Directions for use: To create a Message object: first open a file, e.g.: fp = open(file, 'r') You can use any other legal way of getting an open file object, e.g. use sys.stdin or call os.popen(). Then pass the open file object to the Message() constructor: m = Message(fp) This class can work with any input object that supports a readline method. If the input object has seek and tell capability, the rewindbody method will work; also illegal lines will be pushed back onto the input stream. If the input object lacks seek but has an `unread' method that can push back a line of input, Message will use that to push back illegal lines. Thus this class can be used to parse messages coming from a buffered stream. The optional `seekable' argument is provided as a workaround for certain stdio libraries in which tell() discards buffered data before discovering that the lseek() system call doesn't work. For maximum portability, you should set the seekable argument to zero to prevent that initial \code{tell} when passing in an unseekable object such as a a file object created from a socket object. If it is 1 on entry -- which it is by default -- the tell() method of the open file object is called once; if this raises an exception, seekable is reset to 0. For other nonzero values of seekable, this test is not made. To get the text of a particular header there are several methods: str = m.getheader(name) str = m.getrawheader(name) where name is the name of the header, e.g. 'Subject'. The difference is that getheader() strips the leading and trailing whitespace, while getrawheader() doesn't. Both functions retain embedded whitespace (including newlines) exactly as they are specified in the header, and leave the case of the text unchanged. For addresses and address lists there are functions realname, mailaddress = m.getaddr(name) and list = m.getaddrlist(name) where the latter returns a list of (realname, mailaddr) tuples. There is also a method time = m.getdate(name) which parses a Date-like field and returns a time-compatible tuple, i.e. a tuple such as returned by time.localtime() or accepted by time.mktime(). See the class definition for lower level access methods. There are also some utility functions here. """ # Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com> import time __all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] _blanklines = ('\r\n', '\n') # Optimization for islast() class Message: """Represents a single RFC-822-compliant message.""" def __init__(self, fp, seekable = 1): """Initialize the class instance and read the headers.""" if seekable == 1: # Exercise tell() to make sure it works # (and then assume seek() works, too) try: fp.tell() except: seekable = 0 else: seekable = 1 self.fp = fp self.seekable = seekable self.startofheaders = None self.startofbody = None # if self.seekable: try: self.startofheaders = self.fp.tell() except IOError: self.seekable = 0 # self.readheaders() # if self.seekable: try: self.startofbody = self.fp.tell() except IOError: self.seekable = 0 def rewindbody(self): """Rewind the file to the start of the body (if seekable).""" if not self.seekable: raise IOError, "unseekable file" self.fp.seek(self.startofbody) def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = list = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. list.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. list.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break def isheader(self, line): """Determine whether a given line is a legal header. This method should return the header name, suitably canonicalized. You may override this method in order to use Message parsing on tagged data in RFC822-like formats with special header formats. """ i = line.find(':') if i > 0: return line[:i].lower() else: return None def islast(self, line): """Determine whether a line is a legal end of RFC-822 headers. You may override this method if your application wants to bend the rules, e.g. to strip trailing whitespace, or to recognize MH template separators ('--------'). For convenience (e.g. for code reading from sockets) a line consisting of \r\n also matches. """ return line in _blanklines def iscomment(self, line): """Determine whether a line should be skipped entirely. You may override this method in order to use Message parsing on tagged data in RFC822-like formats that support embedded comments or free-text data. """ return None def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) list = [] hit = 0 for line in self.headers: if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: list.append(line) return list def getfirstmatchingheader(self, name): """Get the first header line matching name. This is similar to getallmatchingheaders, but it returns only the first matching header (and its continuation lines). """ name = name.lower() + ':' n = len(name) list = [] hit = 0 for line in self.headers: if hit: if not line[:1].isspace(): break elif line[:n].lower() == name: hit = 1 if hit: list.append(line) return list def getrawheader(self, name): """A higher-level interface to getfirstmatchingheader(). Return a string containing the literal text of the header but with the keyword stripped. All leading, trailing and embedded whitespace is kept in the string, however. Return None if the header does not occur. """ list = self.getfirstmatchingheader(name) if not list: return None list[0] = list[0][len(name) + 1:] return ''.join(list) def getheader(self, name, default=None): """Get the header value for a name. This is the normal interface: it returns a stripped version of the header value for a given header name, or None if it doesn't exist. This uses the dictionary version which finds the *last* such header. """ try: return self.dict[name.lower()] except KeyError: return default get = getheader def getheaders(self, name): """Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list. """ result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result def getaddr(self, name): """Get a single address from a header, as a tuple. An example return value: ('Guido van Rossum', 'guido@cwi.nl') """ # New, by Ben Escoto alist = self.getaddrlist(name) if alist: return alist[0] else: return (None, None) def getaddrlist(self, name): """Get a list of addresses from a header. Retrieves a list of addresses from a header, where each address is a tuple as returned by getaddr(). Scans all named headers, so it works properly with multiple To: or Cc: headers for example. """ raw = [] for h in self.getallmatchingheaders(name): if h[0] in ' \t': raw.append(h) else: if raw: raw.append(', ') i = h.find(':') if i > 0: addr = h[i+1:] raw.append(addr) alladdrs = ''.join(raw) a = AddrlistClass(alladdrs) return a.getaddrlist() def getdate(self, name): """Retrieve a date field from a header. Retrieves a date field from the named header, returning a tuple compatible with time.mktime(). """ try: data = self[name] except KeyError: return None return parsedate(data) def getdate_tz(self, name): """Retrieve a date field from a header as a 10-tuple. The first 9 elements make up a tuple compatible with time.mktime(), and the 10th is the offset of the poster's time zone from GMT/UTC. """ try: data = self[name] except KeyError: return None return parsedate_tz(data) # Access as a dictionary (only finds *last* header of each type): def __len__(self): """Get the number of headers in a message.""" return len(self.dict) def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()] def __setitem__(self, name, value): """Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was. """ del self[name] # Won't fail if it doesn't exist self.dict[name.lower()] = value text = name + ": " + value lines = text.split("\n") for line in lines: self.headers.append(line + "\n") def __delitem__(self, name): """Delete all occurrences of a specific header, if it is present.""" name = name.lower() if not self.dict.has_key(name): return del self.dict[name] name = name + ':' n = len(name) list = [] hit = 0 for i in range(len(self.headers)): line = self.headers[i] if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: list.append(i) list.reverse() for i in list: del self.headers[i] def has_key(self, name): """Determine whether a message contains the named header.""" return self.dict.has_key(name.lower()) def keys(self): """Get all of a message's header field names.""" return self.dict.keys() def values(self): """Get all of a message's header field values.""" return self.dict.values() def items(self): """Get all of a message's headers. Returns a list of name, value tuples. """ return self.dict.items() def __str__(self): str = '' for hdr in self.headers: str = str + hdr return str # Utility functions # ----------------- # XXX Should fix unquote() and quote() to be really conformant. # XXX The inverses of the parse functions may also be useful. def unquote(str): """Remove quotes from a string.""" if len(str) > 1: if str[0] == '"' and str[-1:] == '"': return str[1:-1] if str[0] == '<' and str[-1:] == '>': return str[1:-1] return str def quote(str): """Add quotes around a string.""" return str.replace('\\', '\\\\').replace('"', '\\"') def parseaddr(address): """Parse an address into a (realname, mailaddr) tuple.""" a = AddrlistClass(address) list = a.getaddrlist() if not list: return (None, None) else: return list[0] class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC-822 in front of you. Note: this class interface is deprecated and may be removed in the future. Use rfc822.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.atomends = self.specials + self.LWS + self.CR self.field = field self.commentlist = [] def gotonext(self): """Parse up to the start of the next address.""" while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': self.pos = self.pos + 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ ad = self.getaddress() if ad: return ad + self.getaddrlist() else: return [] def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(' '.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos = self.pos + 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos = self.pos + 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(' '.join(plist) + ' (' + \ ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(' '.join(plist), routeaddr)] else: if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos = self.pos + 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos = self.pos + 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = 0 self.pos = self.pos + 1 self.gotonext() adlist = None while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = 0 elif self.field[self.pos] == '>': self.pos = self.pos + 1 break elif self.field[self.pos] == '@': self.pos = self.pos + 1 expectroute = 1 elif self.field[self.pos] == ':': self.pos = self.pos + 1 expectaddrspec = 1 else: adlist = self.getaddrspec() self.pos = self.pos + 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC-822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): if self.field[self.pos] == '.': aslist.append('.') self.pos = self.pos + 1 elif self.field[self.pos] == '"': aslist.append('"%s"' % self.getquote()) elif self.field[self.pos] in self.atomends: break else: aslist.append(self.getatom()) self.gotonext() if self.pos >= len(self.field) or self.field[self.pos] != '@': return ''.join(aslist) aslist.append('@') self.pos = self.pos + 1 self.gotonext() return ''.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos = self.pos + 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos = self.pos + 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return ''.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments = 1): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC-822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = 0 self.pos = self.pos + 1 while self.pos < len(self.field): if quote == 1: slist.append(self.field[self.pos]) quote = 0 elif self.field[self.pos] in endchars: self.pos = self.pos + 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) elif self.field[self.pos] == '\\': quote = 1 else: slist.append(self.field[self.pos]) self.pos = self.pos + 1 return ''.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', 0) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', 1) def getdomainliteral(self): """Parse an RFC-822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', 0) def getatom(self): """Parse an RFC-822 atom.""" atomlist = [''] while self.pos < len(self.field): if self.field[self.pos] in self.atomends: break else: atomlist.append(self.field[self.pos]) self.pos = self.pos + 1 return ''.join(atomlist) def getphraselist(self): """Parse a sequence of RFC-822 phrases. A phrase is a sequence of words, which are in turn either RFC-822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos = self.pos + 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.atomends: break else: plist.append(self.getatom()) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __str__(self): return ", ".join(map(dump_address_pair, self.addresslist)) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index] def dump_address_pair(pair): """Dump a (name, address) pair in a canonicalized form.""" if pair[0]: return '"' + pair[0] + '" <' + pair[1] + '>' else: return pair[1] # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ data = data.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if not mm in _monthnames: dd, mm = mm, dd.lower() if not mm in _monthnames: return None mm = _monthnames.index(mm)+1 if mm > 12: mm = mm - 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None tzoffset = None tz = tz.upper() if _timezones.has_key(tz): tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60) tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset) return tuple def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if type(t) == type( () ): return t[:9] else: return t def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = time.mktime(data[:8] + (0,)) return t - data[9] - time.timezone def formatdate(timeval=None): """Returns time format preferred for Internet standards. Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 """ if timeval is None: timeval = time.time() return "%s" % time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(timeval)) # When used as script, run a small test program. # The first command line argument must be a filename containing one # message in RFC-822 format. if __name__ == '__main__': import sys, os file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') if sys.argv[1:]: file = sys.argv[1] f = open(file, 'r') m = Message(f) print 'From:', m.getaddr('from') print 'To:', m.getaddrlist('to') print 'Subject:', m.getheader('subject') print 'Date:', m.getheader('date') date = m.getdate_tz('date') tz = date[-1] date = time.localtime(mktime_tz(date)) if date: print 'ParsedDate:', time.asctime(date), hhmmss = tz hhmm, ss = divmod(hhmmss, 60) hh, mm = divmod(hhmm, 60) print "%+03d%02d" % (hh, mm), if ss: print ".%02d" % ss, print else: print 'ParsedDate:', None m.rewindbody() n = 0 while f.readline(): n = n + 1 print 'Lines:', n print '-'*70 print 'len =', len(m) if m.has_key('Date'): print 'Date =', m['Date'] if m.has_key('X-Nonsense'): pass print 'keys =', m.keys() print 'values =', m.values() print 'items =', m.items()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields from openerp.osv import osv from openerp.tools.translate import _ class account_analytic_line(osv.osv): _inherit = 'account.analytic.line' _description = 'Analytic Line' _columns = { 'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'), 'product_id': fields.many2one('product.product', 'Product'), 'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'), 'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True), 'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True), 'code': fields.char('Code', size=8), 'ref': fields.char('Ref.', size=64), 'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True), 'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True), } _defaults = { 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c), } _order = 'date desc' def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False): if context is None: context = {} if context.get('from_date',False): args.append(['date', '>=', context['from_date']]) if context.get('to_date',False): args.append(['date','<=', context['to_date']]) return super(account_analytic_line, self).search(cr, uid, args, offset, limit, order, context=context, count=count) def _check_company(self, cr, uid, ids, context=None): lines = self.browse(cr, uid, ids, context=context) for l in lines: if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id: return False return True # Compute the cost based on the price type define into company # property_valuation_price_type property def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id, unit=False, journal_id=False, context=None): if context==None: context={} if not journal_id: j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')]) journal_id = j_ids and j_ids[0] or False if not journal_id or not prod_id: return {} product_obj = self.pool.get('product.product') analytic_journal_obj =self.pool.get('account.analytic.journal') product_price_type_obj = self.pool.get('product.price.type') j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context) prod = product_obj.browse(cr, uid, prod_id, context=context) result = 0.0 if prod_id: unit = prod.uom_id.id if j_id.type == 'purchase': unit = prod.uom_po_id.id if j_id.type <> 'sale': a = prod.property_account_expense.id if not a: a = prod.categ_id.property_account_expense_categ.id if not a: raise osv.except_osv(_('Error!'), _('There is no expense account defined ' \ 'for this product: "%s" (id:%d).') % \ (prod.name, prod.id,)) else: a = prod.property_account_income.id if not a: a = prod.categ_id.property_account_income_categ.id if not a: raise osv.except_osv(_('Error!'), _('There is no income account defined ' \ 'for this product: "%s" (id:%d).') % \ (prod.name, prod_id,)) flag = False # Compute based on pricetype product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context) pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0] if journal_id: journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context) if journal.type == 'sale': product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context) if product_price_type_ids: pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0] # Take the company currency as the reference one if pricetype.field == 'list_price': flag = True ctx = context.copy() if unit: # price_get() will respect a 'uom' in its context, in order # to return a default price for those units ctx['uom'] = unit amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id] prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account') amount = amount_unit * quantity or 0.0 result = round(amount, prec) if not flag: result *= -1 return {'value': { 'amount': result, 'general_account_id': a, 'product_uom_id': unit } } def view_header_get(self, cr, user, view_id, view_type, context=None): if context is None: context = {} if context.get('account_id', False): # account_id in context may also be pointing to an account.account.id cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],)) res = cr.fetchone() if res: res = _('Entries: ')+ (res[0] or '') return res return False class res_partner(osv.osv): """ Inherits partner and adds contract information in the partner form """ _inherit = 'res.partner' _columns = { 'contract_ids': fields.one2many('account.analytic.account', \ 'partner_id', 'Contracts', readonly=True), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
import sys, os from jsonrpc import ServiceHandler, ServiceException class ServiceImplementaionNotFound(ServiceException): pass class ModPyServiceHandler(ServiceHandler): def __init__(self, req): self.req = req ServiceHandler.__init__(self, None) def findServiceEndpoint(self, name): req = self.req (modulePath, fileName) = os.path.split(req.filename) (moduleName, ext) = os.path.splitext(fileName) if not os.path.exists(os.path.join(modulePath, moduleName + ".py")): raise ServiceImplementaionNotFound() else: if not modulePath in sys.path: sys.path.insert(0, modulePath) from mod_python import apache module = apache.import_module(moduleName, log=1) if hasattr(module, "service"): self.service = module.service elif hasattr(module, "Service"): self.service = module.Service() else: self.service = module return ServiceHandler.findServiceEndpoint(self, name) def handleRequest(self, data): self.req.content_type = "text/plain" data = self.req.read() resultData = ServiceHandler.handleRequest(self, data) self.req.write(resultData) self.req.flush() def handler(req): from mod_python import apache ModPyServiceHandler(req).handleRequest(req) return apache.OK
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # A MongoDB Nagios check script # # Script idea taken from a Tag1 script I found and I modified it a lot # # Main Author # - Mike Zupan <mike@zcentric.com> # Contributers # - Frank Brandewiede <brande@travel-iq.com> <brande@bfiw.de> <brande@novolab.de> # - Sam Perman <sam@brightcove.com> # - Shlomo Priymak <shlomoid@gmail.com> # - @jhoff909 on github # - @jbraeuer on github # - Dag Stockstad <dag.stockstad@gmail.com> # - @Andor on github # # License: BSD # # Copyright (c) 2012, Mike Zupan <mike@zcentric.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # README: https://github.com/mzupan/nagios-plugin-mongodb/blob/master/LICENSE import sys import time import optparse import textwrap import re import os try: import pymongo except ImportError, e: print e sys.exit(2) # As of pymongo v 1.9 the SON API is part of the BSON package, therefore attempt # to import from there and fall back to pymongo in cases of older pymongo if pymongo.version >= "1.9": import bson.son as son else: import pymongo.son as son # # thanks to http://stackoverflow.com/a/1229667/72987 # def optional_arg(arg_default): def func(option,opt_str,value,parser): if parser.rargs and not parser.rargs[0].startswith('-'): val=parser.rargs[0] parser.rargs.pop(0) else: val=arg_default setattr(parser.values,option.dest,val) return func def performance_data(perf_data,params): data='' if perf_data: data= " |" for p in params: p+=(None,None,None,None) param,param_name,warning,critical=p[0:4]; data +=" %s=%s" % (param_name,str(param)) if warning or critical: warning=warning or 0 critical=critical or 0 data+=";%s;%s"%(warning,critical) return data def numeric_type(param): if ((type(param)==float or type(param)==int or param==None)): return True return False def check_levels(param, warning, critical,message,ok=[]): if (numeric_type(critical) and numeric_type(warning)): if param >= critical: print "CRITICAL - " + message sys.exit(2) elif param >= warning: print "WARNING - " + message sys.exit(1) else: print "OK - " + message sys.exit(0) else: if param in critical: print "CRITICAL - " + message sys.exit(2) if param in warning: print "WARNING - " + message sys.exit(1) if param in ok: print "OK - " + message sys.exit(0) # unexpected param value print "CRITICAL - Unexpected value : %d" % param + "; " + message return 2 def get_server_status(con): try: set_read_preference(con.admin) data = con.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)])) except: data = con.admin.command(son.SON([('serverStatus', 1)])) return data def main(argv): p = optparse.OptionParser(conflict_handler="resolve", description= "This Nagios plugin checks the health of mongodb.") p.add_option('-H', '--host', action='store', type='string', dest='host', default='127.0.0.1', help='The hostname you want to connect to') p.add_option('-P', '--port', action='store', type='int', dest='port', default=27017, help='The port mongodb is runnung on') p.add_option('-u', '--user', action='store', type='string', dest='user', default=None, help='The username you want to login as') p.add_option('-p', '--pass', action='store', type='string', dest='passwd', default=None, help='The password you want to use for that user') p.add_option('-W', '--warning', action='store', dest='warning', default=None, help='The warning threshold we want to set') p.add_option('-C', '--critical', action='store', dest='critical', default=None, help='The critical threshold we want to set') p.add_option('-A', '--action', action='store', type='choice', dest='action', default='connect', help='The action you want to take', choices=['connect', 'connections', 'replication_lag', 'replication_lag_percent', 'replset_state', 'memory', 'memory_mapped', 'lock', 'flushing', 'last_flush_time', 'index_miss_ratio', 'databases', 'collections', 'database_size','queues','oplog','journal_commits_in_wl', 'write_data_files','journaled','opcounters','current_lock','replica_primary','page_faults','asserts', 'queries_per_second', 'page_faults', 'chunks_balance', 'connect_primary', 'collection_state', 'row_count']) p.add_option('--max-lag',action='store_true',dest='max_lag',default=False,help='Get max replication lag (for replication_lag action only)') p.add_option('--mapped-memory',action='store_true',dest='mapped_memory',default=False,help='Get mapped memory instead of resident (if resident memory can not be read)') p.add_option('-D', '--perf-data', action='store_true', dest='perf_data', default=False, help='Enable output of Nagios performance data') p.add_option('-d', '--database', action='store', dest='database', default='admin', help='Specify the database to check') p.add_option('--all-databases', action='store_true', dest='all_databases', default=False, help='Check all databases (action database_size)') p.add_option('-s', '--ssl', dest='ssl', default=False, action='callback', callback=optional_arg(True), help='Connect using SSL') p.add_option('-r', '--replicaset', dest='replicaset', default=None, action='callback', callback=optional_arg(True), help='Connect to replicaset') p.add_option('-q', '--querytype', action='store', dest='query_type', default='query', help='The query type to check [query|insert|update|delete|getmore|command] from queries_per_second') p.add_option('-c', '--collection', action='store', dest='collection', default='admin', help='Specify the collection to check') p.add_option('-T', '--time', action='store', type='int', dest='sample_time', default=1, help='Time used to sample number of pages faults') options, arguments = p.parse_args() host = options.host port = options.port user = options.user passwd = options.passwd query_type = options.query_type collection = options.collection sample_time = options.sample_time if (options.action=='replset_state'): warning = str(options.warning or "") critical = str(options.critical or "") else: warning = float(options.warning or 0) critical = float(options.critical or 0) action = options.action perf_data = options.perf_data max_lag = options.max_lag database = options.database ssl = options.ssl replicaset=options.replicaset if action == 'replica_primary' and replicaset is None: return "replicaset must be passed in when using replica_primary check" elif not action == 'replica_primary' and replicaset: return "passing a replicaset while not checking replica_primary does not work" # # moving the login up here and passing in the connection # start = time.time() err,con=mongo_connect(host, port,ssl, user,passwd, replicaset) if err!=0: return err; conn_time = time.time() - start conn_time = round(conn_time, 0) if action == "connections": return check_connections(con, warning, critical, perf_data) elif action == "replication_lag": return check_rep_lag(con, host, warning, critical, False, perf_data,max_lag,user,passwd,ssl) elif action == "replication_lag_percent": return check_rep_lag(con, host, warning, critical, True, perf_data,max_lag,user,passwd,ssl) elif action == "replset_state": return check_replset_state(con,perf_data, warning , critical ) elif action == "memory": return check_memory(con, warning, critical, perf_data,options.mapped_memory) elif action == "memory_mapped": return check_memory_mapped(con, warning, critical, perf_data) elif action == "queues": return check_queues(con, warning, critical, perf_data) elif action == "lock": return check_lock(con, warning, critical, perf_data) elif action == "current_lock": return check_current_lock(con, host,warning, critical, perf_data) elif action == "flushing": return check_flushing(con, warning, critical, True, perf_data) elif action == "last_flush_time": return check_flushing(con, warning, critical, False, perf_data) elif action == "index_miss_ratio": index_miss_ratio(con, warning, critical, perf_data) elif action == "databases": return check_databases(con, warning, critical,perf_data) elif action == "collections": return check_collections(con, warning, critical,perf_data) elif action == "oplog": return check_oplog(con, warning, critical,perf_data) elif action == "journal_commits_in_wl": return check_journal_commits_in_wl(con, warning, critical,perf_data) elif action == "database_size": if options.all_databases: return check_all_databases_size(con,warning, critical, perf_data) else: return check_database_size(con, database, warning, critical, perf_data) elif action == "journaled": return check_journaled(con, warning, critical,perf_data) elif action == "write_data_files": return check_write_to_datafiles(con, warning, critical,perf_data) elif action == "opcounters": return check_opcounters(con,host, warning, critical,perf_data) elif action == "asserts": return check_asserts(con,host, warning, critical,perf_data) elif action == "replica_primary": return check_replica_primary(con,host, warning, critical,perf_data) elif action == "queries_per_second": return check_queries_per_second(con, query_type, warning, critical, perf_data) elif action == "page_faults": check_page_faults(con, sample_time, warning, critical, perf_data) elif action == "chunks_balance": chunks_balance(con, database, collection, warning, critical) elif action == "connect_primary": return check_connect_primary(con, warning, critical, perf_data) elif action == "collection_state": return check_collection_state(con, database, collection) elif action == "row_count": return check_row_count(con, database, collection, warning, critical, perf_data) else: return check_connect(host, port, warning, critical, perf_data, user, passwd, conn_time) def mongo_connect(host=None, port=None,ssl=False, user=None,passwd=None,replica=None): try: # ssl connection for pymongo > 2.1 if pymongo.version >= "2.1": if replica is None: con = pymongo.Connection(host, port, read_preference=pymongo.ReadPreference.SECONDARY, ssl=ssl, network_timeout=10) else: con = pymongo.Connection(host, port, read_preference=pymongo.ReadPreference.SECONDARY, ssl=ssl, replicaSet=replica, network_timeout=10) else: if replica is None: con = pymongo.Connection(host, port, slave_okay=True, network_timeout=10) else: con = pymongo.Connection(host, port, slave_okay=True, replicaSet=replica, network_timeout=10) if user and passwd: db = con["admin"] if not db.authenticate(user, passwd): sys.exit("Username/Password incorrect") except Exception, e: if isinstance(e,pymongo.errors.AutoReconnect) and str(e).find(" is an arbiter") != -1: # We got a pymongo AutoReconnect exception that tells us we connected to an Arbiter Server # This means: Arbiter is reachable and can answer requests/votes - this is all we need to know from an arbiter print "OK - State: 7 (Arbiter)" sys.exit(0) return exit_with_general_critical(e),None return 0,con def exit_with_general_warning(e): if isinstance(e, SystemExit): return e else: print "WARNING - General MongoDB warning:", e return 1 def exit_with_general_critical(e): if isinstance(e, SystemExit): return e else: print "CRITICAL - General MongoDB Error:", e return 2 def set_read_preference(db): if pymongo.version >= "2.1": db.read_preference = pymongo.ReadPreference.SECONDARY def check_connect(host, port, warning, critical, perf_data, user, passwd, conn_time): warning = warning or 3 critical = critical or 6 message = "Connection took %i seconds" % conn_time message += performance_data(perf_data,[(conn_time,"connection_time",warning,critical)]) return check_levels(conn_time,warning,critical,message) def check_connections(con, warning, critical, perf_data): warning = warning or 80 critical = critical or 95 try: data=get_server_status(con) current = float(data['connections']['current']) available = float(data['connections']['available']) used_percent = int(float(current / (available + current)) * 100) message = "%i percent (%i of %i connections) used" % (used_percent, current, current + available) message += performance_data(perf_data,[(used_percent,"used_percent",warning, critical), (current,"current_connections"), (available,"available_connections")]) return check_levels(used_percent,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_rep_lag(con, host, warning, critical, percent, perf_data,max_lag, user, passwd, ssl=False): if percent: warning = warning or 50 critical = critical or 75 else: warning = warning or 600 critical = critical or 3600 rs_status = {} slaveDelays = {} try: set_read_preference(con.admin) # Get replica set status try: rs_status = con.admin.command("replSetGetStatus") except pymongo.errors.OperationFailure,e : if e.code == None and str(e).find('failed: not running with --replSet"'): print "OK - Not running with replSet" return 0 serverVersion = tuple(con.server_info()['version'].split('.')) if serverVersion >= tuple("2.0.0".split(".")): # # check for version greater then 2.0 # rs_conf = con.local.system.replset.find_one() for member in rs_conf['members']: if member.get('slaveDelay') is not None: slaveDelays[member['host']] = member.get('slaveDelay') else: slaveDelays[member['host']] = 0 # Find the primary and/or the current node primary_node = None host_node = None for member in rs_status["members"]: if member["stateStr"] == "PRIMARY": primary_node = member if member["name"].split(':')[0] == host: host_node = member # Check if we're in the middle of an election and don't have a primary if primary_node is None: print "WARNING - No primary defined. In an election?" return 1 # Check if we failed to find the current host # below should never happen if host_node is None: print "CRITICAL - Unable to find host '" + host + "' in replica set." return 2 # Is the specified host the primary? if host_node["stateStr"] == "PRIMARY": if max_lag==False: print "OK - This is the primary." return 0 else: #get the maximal replication lag data = "" maximal_lag = 0 for member in rs_status['members']: if not member['stateStr'] == "ARBITER": lastSlaveOpTime = member['optimeDate'] replicationLag = abs(primary_node["optimeDate"] - lastSlaveOpTime).seconds - slaveDelays[member['name']] data = data + member['name'] + " lag=%d;" % replicationLag maximal_lag = max(maximal_lag, replicationLag) if percent: err, con=mongo_connect(primary_node['name'].split(':')[0], int(primary_node['name'].split(':')[1]), ssl, user, passwd) if err!=0: return err primary_timediff=replication_get_time_diff(con) maximal_lag=int(float(maximal_lag)/float(primary_timediff)*100) message = "Maximal lag is "+str( maximal_lag) + " percents" message += performance_data(perf_data,[(maximal_lag,"replication_lag_percent",warning, critical)]) else: message = "Maximal lag is "+str( maximal_lag) + " seconds" message += performance_data(perf_data,[(maximal_lag,"replication_lag",warning, critical)]) return check_levels(maximal_lag,warning,critical,message) elif host_node["stateStr"] == "ARBITER": print "OK - This is an arbiter" return 0 # Find the difference in optime between current node and PRIMARY optime_lag = abs(primary_node["optimeDate"] - host_node["optimeDate"]) if host_node['name'] in slaveDelays: slave_delay = slaveDelays[host_node['name']] elif host_node['name'].endswith(':27017') and host_node['name'][:-len(":27017")] in slaveDelays: slave_delay = slaveDelays[host_node['name'][:-len(":27017")]] else: raise Exception("Unable to determine slave delay for {0}".format(host_node['name'])) try: #work starting from python2.7 lag = optime_lag.total_seconds() except: lag = float(optime_lag.seconds + optime_lag.days * 24 * 3600) if percent: err, con=mongo_connect(primary_node['name'].split(':')[0], int(primary_node['name'].split(':')[1]), ssl, user,passwd) if err!=0: return err primary_timediff=replication_get_time_diff(con) if primary_timediff!=0: lag=int(float(lag)/float(primary_timediff)*100) else: lag=0 message = "Lag is "+str(lag) + " percents" message += performance_data(perf_data,[(lag,"replication_lag_percent",warning, critical)]) else: message = "Lag is "+ str(lag) + " seconds" message += performance_data(perf_data,[(lag,"replication_lag",warning, critical)]) return check_levels(lag,warning+slaveDelays[host_node['name']],critical+slaveDelays[host_node['name']],message) else: # # less than 2.0 check # # Get replica set status rs_status = con.admin.command("replSetGetStatus") # Find the primary and/or the current node primary_node = None host_node = None for member in rs_status["members"]: if member["stateStr"] == "PRIMARY": primary_node = (member["name"], member["optimeDate"]) if member["name"].split(":")[0].startswith(host): host_node = member # Check if we're in the middle of an election and don't have a primary if primary_node is None: print "WARNING - No primary defined. In an election?" sys.exit(1) # Is the specified host the primary? if host_node["stateStr"] == "PRIMARY": print "OK - This is the primary." sys.exit(0) # Find the difference in optime between current node and PRIMARY optime_lag = abs(primary_node[1] - host_node["optimeDate"]) lag = optime_lag.seconds if percent: err, con=mongo_connect(primary_node['name'].split(':')[0], int(primary_node['name'].split(':')[1])) if err!=0: return err primary_timediff=replication_get_time_diff(con) lag=int(float(lag)/float(primary_timediff)*100) message = "Lag is "+str(lag) + " percents" message += performance_data(perf_data,[(lag,"replication_lag_percent",warning, critical)]) else: message = "Lag is "+ str(lag) + " seconds" message += performance_data(perf_data, [(lag, "replication_lag", warning, critical)]) return check_levels(lag, warning, critical, message) except Exception, e: return exit_with_general_critical(e) def check_memory(con, warning, critical, perf_data,mapped_memory): # # These thresholds are basically meaningless, and must be customized to your system's ram # warning = warning or 8 critical = critical or 16 try: data=get_server_status(con) if not data['mem']['supported'] and not mapped_memory: print "OK - Platform not supported for memory info" return 0 # # convert to gigs # message = "Memory Usage:" try: mem_resident =float(data['mem']['resident']) / 1024.0 message += " %.2fGB resident,"%( mem_resident) except: mem_resident = 0 message +=" resident unsupported," try: mem_virtual = float(data['mem']['virtual']) / 1024.0 message +=" %.2fGB virtual," % mem_virtual except: mem_virtual=0 message +=" virtual unsupported," try: mem_mapped = float(data['mem']['mapped']) / 1024.0 message +=" %.2fGB mapped," % mem_mapped except: mem_mapped = 0 message +=" mapped unsupported," try: mem_mapped_journal = float(data['mem']['mappedWithJournal']) / 1024.0 message +=" %.2fGB mappedWithJournal" % mem_mapped_journal except: mem_mapped_journal = 0 message +=performance_data(perf_data,[("%.2f" % mem_resident,"memory_usage",warning, critical), ("%.2f" % mem_mapped,"memory_mapped"),("%.2f" % mem_virtual,"memory_virtual"),("%.2f" %mem_mapped_journal,"mappedWithJournal")]) #added for unsupported systems like Solaris if mapped_memory and mem_resident==0: return check_levels(mem_mapped,warning,critical,message) else: return check_levels(mem_resident,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_memory_mapped(con, warning, critical, perf_data): # # These thresholds are basically meaningless, and must be customized to your application # warning = warning or 8 critical = critical or 16 try: data=get_server_status(con) if not data['mem']['supported']: print "OK - Platform not supported for memory info" return 0 # # convert to gigs # message = "Memory Usage:" try: mem_mapped = float(data['mem']['mapped']) / 1024.0 message +=" %.2fGB mapped," % mem_mapped except: mem_mapped = -1 message +=" mapped unsupported," try: mem_mapped_journal = float(data['mem']['mappedWithJournal']) / 1024.0 message +=" %.2fGB mappedWithJournal" % mem_mapped_journal except: mem_mapped_journal = 0 message +=performance_data(perf_data,[("%.2f" % mem_mapped,"memory_mapped"),("%.2f" %mem_mapped_journal,"mappedWithJournal")]) if not mem_mapped==-1: return check_levels(mem_mapped,warning,critical,message) else: print "OK - Server does not provide mem.mapped info" return 0 except Exception, e: return exit_with_general_critical(e) def check_lock(con, warning, critical, perf_data): warning = warning or 10 critical = critical or 30 try: data=get_server_status(con) # # calculate percentage # lock_percentage = float(data['globalLock']['lockTime']) / float(data['globalLock']['totalTime']) * 100 message = "Lock Percentage: %.2f%%" % lock_percentage message+=performance_data(perf_data,[("%.2f" % lock_percentage,"lock_percentage",warning,critical)]) return check_levels(lock_percentage,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_flushing(con, warning, critical, avg, perf_data): # # These thresholds mean it's taking 5 seconds to perform a background flush to issue a warning # and 10 seconds to issue a critical. # warning = warning or 5000 critical = critical or 15000 try: data=get_server_status(con) if avg: flush_time = float(data['backgroundFlushing']['average_ms']) stat_type = "Average" else: flush_time = float(data['backgroundFlushing']['last_ms']) stat_type = "Last" message = "%s Flush Time: %.2fms" % (stat_type, flush_time) message+=performance_data(perf_data,[("%.2fms" %flush_time,"%s_flush_time" % stat_type.lower(),warning,critical)]) return check_levels(flush_time,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def index_miss_ratio(con, warning, critical, perf_data): warning = warning or 10 critical = critical or 30 try: data=get_server_status(con) try: miss_ratio = float(data['indexCounters']['btree']['missRatio']) except KeyError: not_supported_msg = "not supported on this platform" if data['indexCounters']['note'] == not_supported_msg: print "OK - MongoDB says: " + not_supported_msg return 0 else: print "WARNING - Can't get counter from MongoDB" return 1 message = "Miss Ratio: %.2f" % miss_ratio message+=performance_data(perf_data,[("%.2f" % miss_ratio,"index_miss_ratio" ,warning,critical)]) return check_levels(miss_ratio,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_replset_state(con,perf_data,warning="",critical=""): try: warning = [int(x) for x in warning.split(",")] except : warning = [0,3,5] try: critical= [int(x) for x in critical.split(",") ] except : critical=[8,4,-1] ok = range(-1,8) #should include the range of all posiible values try: try: try: set_read_preference(con.admin) data = con.admin.command(pymongo.son_manipulator.SON([('replSetGetStatus', 1)])) except: data = con.admin.command(son.SON([('replSetGetStatus', 1)])) state = int(data['myState']) except pymongo.errors.OperationFailure,e : if e.code==None and str(e).find('failed: not running with --replSet"'): state=-1 if state == 8: message="State: %i (Down)" % state elif state == 4: message="State: %i (Fatal error)" % state elif state == 0: message="State: %i (Starting up, phase1)" % state elif state == 3: message="State: %i (Recovering)" % state elif state == 5: message="State: %i (Starting up, phase2)" % state elif state == 1: message="State: %i (Primary)" % state elif state == 2: message="State: %i (Secondary)" % state elif state == 7: message="State: %i (Arbiter)" % state elif state==-1: message="Not running with replSet" else: message="State: %i (Unknown state)" % state message+=performance_data(perf_data,[(state,"state")]) return check_levels(state,warning,critical,message,ok) except Exception, e: return exit_with_general_critical(e) def check_databases(con, warning, critical,perf_data=None): try: try: set_read_preference(con.admin) data = con.admin.command(pymongo.son_manipulator.SON([('listDatabases', 1)])) except: data = con.admin.command(son.SON([('listDatabases', 1)])) count = len(data['databases']) message="Number of DBs: %.0f" % count message+=performance_data(perf_data,[(count,"databases",warning,critical,message)]) return check_levels(count,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_collections(con, warning, critical,perf_data=None): try: try: set_read_preference(con.admin) data = con.admin.command(pymongo.son_manipulator.SON([('listDatabases', 1)])) except: data = con.admin.command(son.SON([('listDatabases', 1)])) count = 0 for db in data['databases']: dbname = db['name'] count += len(con[dbname].collection_names()) message="Number of collections: %.0f" % count message+=performance_data(perf_data,[(count,"collections",warning,critical,message)]) return check_levels(count,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_all_databases_size(con, warning, critical, perf_data): warning = warning or 100 critical = critical or 1000 try: set_read_preference(con.admin) all_dbs_data = con.admin.command(pymongo.son_manipulator.SON([('listDatabases', 1)])) except: all_dbs_data = con.admin.command(son.SON([('listDatabases', 1)])) total_storage_size=0 message="" perf_data_param=[()] for db in all_dbs_data['databases']: database = db['name'] data = con[database].command('dbstats') storage_size = round(data['storageSize'] / 1024 / 1024,1) message+="; Database %s size: %.0f MB"%(database,storage_size) perf_data_param.append((storage_size,database+"_database_size")) total_storage_size+=storage_size perf_data_param[0]=(total_storage_size,"total_size",warning,critical) message+=performance_data(perf_data,perf_data_param) message="Total size: %.0f MB" % total_storage_size + message return check_levels(total_storage_size,warning,critical,message) def check_database_size(con, database, warning, critical, perf_data): warning = warning or 100 critical = critical or 1000 perfdata = "" try: set_read_preference(con.admin) data = con[database].command('dbstats') storage_size = data['storageSize'] / 1024 / 1024 if perf_data: perfdata += " | database_size=%i;%i;%i" % (storage_size, warning, critical) #perfdata += " database=%s" %(database) if storage_size >= critical: print "CRITICAL - Database size: %.0f MB, Database: %s%s" % (storage_size, database, perfdata) return 2 elif storage_size >= warning: print "WARNING - Database size: %.0f MB, Database: %s%s" % (storage_size, database, perfdata) return 1 else: print "OK - Database size: %.0f MB, Database: %s%s" % (storage_size, database, perfdata) return 0 except Exception, e: return exit_with_general_critical(e) def check_queues(con, warning, critical, perf_data): warning = warning or 10 critical = critical or 30 try: data=get_server_status(con) total_queues = float(data['globalLock']['currentQueue']['total']) readers_queues = float(data['globalLock']['currentQueue']['readers']) writers_queues = float(data['globalLock']['currentQueue']['writers']) message = "Current queue is : total = %d, readers = %d, writers = %d" % (total_queues, readers_queues, writers_queues) message+=performance_data(perf_data,[(total_queues, "total_queues",warning,critical),(readers_queues, "readers_queues"),(writers_queues,"writers_queues")]) return check_levels(total_queues,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_queries_per_second(con, query_type, warning, critical, perf_data): warning = warning or 250 critical = critical or 500 if query_type not in ['insert', 'query', 'update', 'delete', 'getmore', 'command']: return exit_with_general_critical("The query type of '%s' is not valid" % query_type) try: db = con.local data = get_server_status(con) # grab the count num = int(data['opcounters'][query_type]) # do the math last_count = db.nagios_check.find_one({'check': 'query_counts'}) try: ts = int(time.time()) diff_query = num - last_count['data'][query_type]['count'] diff_ts = ts - last_count['data'][query_type]['ts'] query_per_sec = float(diff_query) / float(diff_ts) # update the count now db.nagios_check.update(last_count, {'$set': {"data.%s" % query_type : {'count': num, 'ts': int(time.time())}}}) message = "Queries / Sec: %f" % query_per_sec message += performance_data(perf_data,[(query_per_sec,"%s_per_sec" % query_type,warning,critical,message)]) except KeyError: # # since it is the first run insert it query_per_sec = 0 message = "First run of check.. no data" db.nagios_check.update(last_count, {'$set': {"data.%s" % query_type : {'count': num, 'ts': int(time.time())}}}) except TypeError: # # since it is the first run insert it query_per_sec = 0 message = "First run of check.. no data" db.nagios_check.insert({'check': 'query_counts', 'data': {query_type: {'count': num, 'ts': int(time.time())}}}) return check_levels(query_per_sec,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_oplog(con, warning, critical, perf_data): """ Checking the oplog time - the time of the log currntly saved in the oplog collection defaults: critical 4 hours warning 24 hours those can be changed as usual with -C and -W parameters""" warning = warning or 24 critical = critical or 4 try: db = con.local ol=db.system.namespaces.find_one({"name":"local.oplog.rs"}) if (db.system.namespaces.find_one({"name":"local.oplog.rs"}) != None) : oplog = "oplog.rs"; else : ol=db.system.namespaces.find_one({"name":"local.oplog.$main"}) if (db.system.namespaces.find_one({"name":"local.oplog.$main"}) != None) : oplog = "oplog.$main"; else : message = "neither master/slave nor replica set replication detected"; return check_levels(None,warning,critical,message) try: set_read_preference(con.admin) data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)])) except: data = con.admin.command(son.SON([('collstats',oplog)])) ol_size=data['size'] ol_storage_size=data['storageSize'] ol_used_storage=int(float(ol_size)/ol_storage_size*100+1) ol=con.local[oplog] firstc = ol.find().sort("$natural",pymongo.ASCENDING).limit(1)[0]['ts'] lastc = ol.find().sort("$natural",pymongo.DESCENDING).limit(1)[0]['ts'] time_in_oplog= (lastc.as_datetime()-firstc.as_datetime()) message="Oplog saves "+ str(time_in_oplog) + " %d%% used" %ol_used_storage try: #work starting from python2.7 hours_in_oplog= time_in_oplog.total_seconds()/60/60 except: hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60 approx_level=hours_in_oplog*100/ol_used_storage message+=performance_data(perf_data,[("%.2f" % hours_in_oplog,'oplog_time',warning,critical),("%.2f " % approx_level, 'oplog_time_100_percent_used')]) return check_levels(-approx_level,-warning,-critical,message) except Exception, e: return exit_with_general_critical(e) def check_journal_commits_in_wl(con, warning, critical,perf_data): """ Checking the number of commits which occurred in the db's write lock. Most commits are performed outside of this lock; committed while in the write lock is undesirable. Under very high write situations it is normal for this value to be nonzero. """ warning = warning or 10 critical = critical or 40 try: data=get_server_status(con) j_commits_in_wl = data['dur']['commitsInWriteLock'] message="Journal commits in DB write lock : %d" % j_commits_in_wl message+=performance_data(perf_data,[(j_commits_in_wl,"j_commits_in_wl",warning, critical)]) return check_levels(j_commits_in_wl,warning, critical, message) except Exception, e: return exit_with_general_critical(e) def check_journaled(con, warning, critical,perf_data): """ Checking the average amount of data in megabytes written to the recovery log in the last four seconds""" warning = warning or 20 critical = critical or 40 try: data=get_server_status(con) journaled = data['dur']['journaledMB'] message="Journaled : %.2f MB" % journaled message+=performance_data(perf_data,[("%.2f"%journaled,"journaled",warning, critical)]) return check_levels(journaled,warning, critical, message) except Exception, e: return exit_with_general_critical(e) def check_write_to_datafiles(con, warning, critical,perf_data): """ Checking the average amount of data in megabytes written to the databases datafiles in the last four seconds. As these writes are already journaled, they can occur lazily, and thus the number indicated here may be lower than the amount physically written to disk.""" warning = warning or 20 critical = critical or 40 try: data=get_server_status(con) writes = data['dur']['writeToDataFilesMB'] message="Write to data files : %.2f MB" % writes message+=performance_data(perf_data,[("%.2f" % writes,"write_to_data_files",warning, critical)]) return check_levels(writes,warning, critical, message) except Exception, e: return exit_with_general_critical(e) def get_opcounters(data,opcounters_name,host): try : insert=data[opcounters_name]['insert'] query=data[opcounters_name]['query'] update=data[opcounters_name]['update'] delete=data[opcounters_name]['delete'] getmore=data[opcounters_name]['getmore'] command=data[opcounters_name]['command'] except KeyError,e: return 0, [0]*100 total_commands=insert+query+update+delete+getmore+command new_vals= [total_commands,insert,query,update,delete,getmore,command] return maintain_delta(new_vals, host,opcounters_name) def check_opcounters(con, host, warning, critical,perf_data): """ A function to get all opcounters delta per minute. In case of a replication - gets the opcounters+opcountersRepl""" warning=warning or 10000 critical=critical or 15000 data=get_server_status(con) err1,delta_opcounters=get_opcounters(data,'opcounters',host) err2,delta_opcounters_repl=get_opcounters(data,'opcountersRepl',host) if err1==0 and err2==0: delta=[(x+y) for x,y in zip(delta_opcounters ,delta_opcounters_repl) ] delta[0]=delta_opcounters[0]#only the time delta shouldn't be summarized per_minute_delta=[int(x/delta[0]*60) for x in delta[1:]] message="Test succeeded , old values missing" message= "Opcounters: total=%d,insert=%d,query=%d,update=%d,delete=%d,getmore=%d,command=%d" % tuple(per_minute_delta) message+=performance_data(perf_data,([(per_minute_delta[0],"total",warning,critical),(per_minute_delta[1],"insert"), (per_minute_delta[2],"query"), (per_minute_delta[3],"update"),(per_minute_delta[5],"delete"), (per_minute_delta[5],"getmore"),(per_minute_delta[6],"command")])) return check_levels(per_minute_delta[0],warning,critical,message) else : return exit_with_general_critical("problem reading data from temp file") def check_current_lock(con, host, warning, critical,perf_data): """ A function to get current lock percentage and not a global one, as check_lock function does""" warning = warning or 10 critical = critical or 30 data=get_server_status(con) lockTime=float(data['globalLock']['lockTime']) totalTime=float(data['globalLock']['totalTime']) err,delta=maintain_delta([totalTime,lockTime],host,"locktime") if err==0: lock_percentage = delta[2]/delta[1]*100 #lockTime/totalTime*100 message = "Current Lock Percentage: %.2f%%" % lock_percentage message+=performance_data(perf_data,[("%.2f" % lock_percentage,"current_lock_percentage",warning,critical)]) return check_levels(lock_percentage,warning,critical,message) else : return exit_with_general_warning("problem reading data from temp file") def check_page_faults(con, host, warning, critical,perf_data): """ A function to get page_faults per second from the system""" warning = warning or 10 critical = critical or 30 data=get_server_status(con) try: page_faults=float(data['extra_info']['page_faults']) except: # page_faults unsupported on the underlaying system return exit_with_general_critical("page_faults unsupported on the underlaying system") err,delta=maintain_delta([page_faults],host,"page_faults") if err==0: page_faults_ps=delta[1]/delta[0] message = "Page faults : %.2f ps" % page_faults_ps message+=performance_data(perf_data,[("%.2f" %page_faults_ps,"page_faults_ps",warning,critical)]) return check_levels(page_faults_ps,warning,critical,message) else: return exit_with_general_warning("problem reading data from temp file") def check_asserts(con, host, warning, critical,perf_data): """ A function to get asserts from the system""" warning = warning or 1 critical = critical or 10 data=get_server_status(con) asserts=data['asserts'] #{ "regular" : 0, "warning" : 6, "msg" : 0, "user" : 12, "rollovers" : 0 } regular=asserts['regular'] warning_asserts=asserts['warning'] msg=asserts['msg'] user=asserts['user'] rollovers=asserts['rollovers'] err,delta=maintain_delta([regular,warning_asserts,msg,user,rollovers],host,"asserts") if err==0: if delta[5]!=0: #the number of rollovers were increased warning=-1 # no matter the metrics this situation should raise a warning # if this is normal rollover - the warning will not appear again, but if there will be a lot of asserts # the warning will stay for a long period of time # although this is not a usual situation regular_ps=delta[1]/delta[0] warning_ps=delta[2]/delta[0] msg_ps=delta[3]/delta[0] user_ps=delta[4]/delta[0] rollovers_ps=delta[5]/delta[0] total_ps=regular_ps+warning_ps+msg_ps+user_ps message = "Total asserts : %.2f ps" % total_ps message+=performance_data(perf_data,[(total_ps,"asserts_ps",warning,critical),(regular_ps,"regular"), (warning_ps,"warning"),(msg_ps,"msg"),(user_ps,"user")]) return check_levels(total_ps,warning,critical,message) else: return exit_with_general_warning("problem reading data from temp file") def get_stored_primary_server_name(db): """ get the stored primary server name from db. """ if "last_primary_server" in db.collection_names(): stored_primary_server = db.last_primary_server.find_one()["server"] else: stored_primary_server = None return stored_primary_server def check_replica_primary(con,host, warning, critical,perf_data): """ A function to check if the primary server of a replica set has changed """ if warning is None and critical is None: warning=1 warning=warning or 2 critical=critical or 2 primary_status=0 message="Primary server has not changed" db=con["nagios"] data=get_server_status(con) current_primary=data['repl'].get('primary') saved_primary=get_stored_primary_server_name(db) if current_primary is None: current_primary = "None" if saved_primary is None: saved_primary = "None" if current_primary != saved_primary: last_primary_server_record = {"server": current_primary} db.last_primary_server.update({"_id": "last_primary"}, {"$set" : last_primary_server_record} , upsert=True, safe=True) message = "Primary server has changed from %s to %s" % (saved_primary, current_primary) primary_status=1 return check_levels(primary_status,warning,critical,message) def check_page_faults(con, sample_time, warning, critical, perf_data): warning = warning or 10 critical = critical or 20 try: try: set_read_preference(con.admin) data1 = con.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)])) time.sleep(sample_time) data2 = con.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)])) except: data1 = con.admin.command(son.SON([('serverStatus', 1)])) time.sleep(sample_time) data2 = con.admin.command(son.SON([('serverStatus', 1)])) try: #on linux servers only page_faults = (int(data2['extra_info']['page_faults']) - int(data1['extra_info']['page_faults']))/sample_time except KeyError: print "WARNING - Can't get extra_info.page_faults counter from MongoDB" sys.exit(1) message = "Page Faults: %i" % (page_faults) message+=performance_data(perf_data,[(page_faults, "page_faults",warning,critical)]) check_levels(page_faults, warning, critical, message) except Exception, e: exit_with_general_critical(e) def chunks_balance(con, database, collection, warning, critical): warning = warning or 10 critical = critical or 20 nsfilter = database+"."+collection try: try: set_read_preference(con.admin) col = con.config.chunks nscount = col.find({"ns":nsfilter}).count() shards = col.distinct("shard") except: print "WARNING - Can't get chunks infos from MongoDB" sys.exit(1) if nscount == 0 : print "WARNING - Namespace %s is not sharded" % (nsfilter) sys.exit(1) avgchunksnb = nscount/len(shards) warningnb = avgchunksnb * warning / 100 criticalnb = avgchunksnb * critical / 100 for shard in shards: delta = abs(avgchunksnb - col.find({"ns":nsfilter,"shard":shard}).count()) message = "Namespace: %s, Shard name: %s, Chunk delta: %i" % (nsfilter,shard,delta) if delta >= criticalnb and delta > 0 : print "CRITICAL - Chunks not well balanced " + message sys.exit(2) elif delta >= warningnb and delta > 0 : print "WARNING - Chunks not well balanced " + message sys.exit(1) print "OK - Chunks well balanced across shards" sys.exit(0) except Exception, e: exit_with_general_critical(e) print "OK - Chunks well balanced across shards" sys.exit(0) def check_connect_primary(con, warning, critical, perf_data): warning = warning or 3 critical = critical or 6 try: try: set_read_preference(con.admin) data = con.admin.command(pymongo.son_manipulator.SON([('isMaster', 1)])) except: data = con.admin.command(son.SON([('isMaster', 1)])) if data['ismaster'] == True : print "OK - This server is primary" return 0 phost = data['primary'].split(':')[0] pport = int(data['primary'].split(':')[1]) start = time.time() err,con=mongo_connect(phost, pport) if err!=0: return err pconn_time = time.time() - start pconn_time = round(pconn_time, 0) message = "Connection to primary server "+data['primary']+" took %i seconds" % pconn_time message += performance_data(perf_data,[(pconn_time,"connection_time",warning,critical)]) return check_levels(pconn_time,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def check_collection_state(con, database, collection): try: con[database][collection].find_one() print "OK - Collection %s.%s is reachable " % (database, collection) return 0 except Exception, e: return exit_with_general_critical(e) def check_row_count(con, database, collection, warning, critical, perf_data): try: count = con[database][collection].count() message = "Row count: %i" % (count) message += performance_data(perf_data,[(count,"row_count",warning,critical)]) return check_levels(count,warning,critical,message) except Exception, e: return exit_with_general_critical(e) def build_file_name(host, action): #done this way so it will work when run independently and from shell module_name=re.match('(.*//*)*(.*)\..*',__file__).group(2) return "/tmp/"+module_name+"_data/"+host+"-"+action+".data" def ensure_dir(f): d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) def write_values(file_name,string): f=None try: f = open(file_name, 'w') except IOError,e: #try creating if (e.errno==2): ensure_dir(file_name) f = open(file_name, 'w') else: raise IOError(e); f.write(string) f.close() return 0 def read_values(file_name): data=None try: f = open(file_name, 'r') data= f.read() f.close() return 0,data except IOError,e: if (e.errno==2): #no previous data return 1,'' except Exception, e: return 2,None def calc_delta(old,new): delta=[] if (len(old)!=len(new)): raise Exception("unequal number of parameters") for i in range(0,len(old)): val=float(new[i])-float(old[i]) if val<0: val=new[i] delta.append(val) return 0, delta def maintain_delta(new_vals,host,action): file_name=build_file_name(host,action) err,data=read_values(file_name) old_vals=data.split(';') new_vals=[str(int(time.time()))]+new_vals delta=None try: err,delta= calc_delta(old_vals,new_vals) except: err=2 write_res=write_values(file_name,";".join(str(x) for x in new_vals)) return err+write_res,delta def replication_get_time_diff(con): col='oplog.rs' local=con.local ol=local.system.namespaces.find_one({"name": "local.oplog.$main"}) if ol: col='oplog.$main' firstc=local[col].find().sort("$natural",1).limit(1) lastc=local[col].find().sort("$natural",-1).limit(1) first=firstc.next() last=lastc.next() tfirst=first["ts"] tlast=last["ts"] delta=tlast.time-tfirst.time return delta # # main app # if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
unknown
codeparrot/codeparrot-clean
import numpy as np import fdmb import itertools # Header print('fdmb test for the EM fit implementation') print('Documentation on the data can be found in ar2Data.README', end='\n\n') # Model parameters obs = np.loadtxt('ar2DataObs.dat') nData = 5000 dim = 2 order = 2 aThresh = 1e-8 pThresh = 1e-10 maxIter = np.int32(1e6) estError = False # Expected result targetA = np.array([[[1.30550248, 0.28797118], [-0.02384243, 1.71119078]], [[-8.02262977e-01, 5.75953420e-03], [-4.26190044e-04, -7.90536237e-01]]]) targetQ = np.array([[1.09429302, -0.00408784], [-0.00408784, 0.88341843]]) targetR = np.array([[10.8341601, -0.11942269], [-0.11942269, 11.0743018]]) # Fit and results print('Start fit') arCoeff = fdmb.emfit(obs, nData, dim, order, aThresh, maxIter, pThresh, estError) estA = np.asanyarray(arCoeff[0]) estQ = arCoeff[1] estR = arCoeff[2] # Compare expectation to actual result passA = np.all((np.around(targetA, 8)-np.around(estA, 8)) < np.finfo(float).eps) passQ = np.all((np.around(targetQ, 8)-np.around(estQ, 8)) < np.finfo(float).eps) passR = np.all((np.around(targetR, 8)-np.around(estR, 8)) < np.finfo(float).eps) labels = ['A: Transition matrix', 'Q: Driving noise covariance', 'R: Observational noise covariance'] failIdx = [not passA, not passQ, not passR] if not np.any(failIdx): print('EM fit installation OK') else: print('EM fit test failed for') failMsg = list(itertools.compress(data=labels, selectors=failIdx)) for msg in failMsg: print('\t%s' % msg) print('\nResult of the fit') print(labels[0]) print(estA, end='\n\n') print(labels[1]) print(estQ, end='\n\n') print(labels[2]) print(estR)
unknown
codeparrot/codeparrot-clean
import { test } from '../../test'; export default test({ error: { code: 'rune_renamed', message: '`$effect.active` is now `$effect.tracking`' } });
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/tests/compiler-errors/samples/effect-active-rune/_config.js
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_metadata from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api import validation from nova import compute from nova import exception from nova.i18n import _ ALIAS = 'server-metadata' authorize = extensions.os_compute_authorizer(ALIAS) class ServerMetadataController(wsgi.Controller): """The server metadata API controller for the OpenStack API.""" def __init__(self): self.compute_api = compute.API(skip_policy_check=True) super(ServerMetadataController, self).__init__() def _get_metadata(self, context, server_id): server = common.get_instance(self.compute_api, context, server_id) try: # NOTE(mikal): get_instanc_metadata sometimes returns # InstanceNotFound in unit tests, even though the instance is # fetched on the line above. I blame mocking. meta = self.compute_api.get_instance_metadata(context, server) except exception.InstanceNotFound: msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) meta_dict = {} for key, value in six.iteritems(meta): meta_dict[key] = value return meta_dict @extensions.expected_errors(404) def index(self, req, server_id): """Returns the list of metadata for a given instance.""" context = req.environ['nova.context'] authorize(context, action='index') return {'metadata': self._get_metadata(context, server_id)} @extensions.expected_errors((400, 403, 404, 409, 413)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 # as this operation complete the creation of metadata. @validation.schema(server_metadata.create) def create(self, req, server_id, body): metadata = body['metadata'] context = req.environ['nova.context'] authorize(context, action='create') new_metadata = self._update_instance_metadata(context, server_id, metadata, delete=False) return {'metadata': new_metadata} @extensions.expected_errors((400, 403, 404, 409, 413)) @validation.schema(server_metadata.update) def update(self, req, server_id, id, body): context = req.environ['nova.context'] authorize(context, action='update') meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) self._update_instance_metadata(context, server_id, meta_item, delete=False) return {'meta': meta_item} @extensions.expected_errors((400, 403, 404, 409, 413)) @validation.schema(server_metadata.update_all) def update_all(self, req, server_id, body): context = req.environ['nova.context'] authorize(context, action='update_all') metadata = body['metadata'] new_metadata = self._update_instance_metadata(context, server_id, metadata, delete=True) return {'metadata': new_metadata} def _update_instance_metadata(self, context, server_id, metadata, delete=False): try: server = common.get_instance(self.compute_api, context, server_id) return self.compute_api.update_instance_metadata(context, server, metadata, delete) except exception.InstanceUnknownCell as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.QuotaError as error: raise exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'update metadata', server_id) @extensions.expected_errors(404) def show(self, req, server_id, id): """Return a single metadata item.""" context = req.environ['nova.context'] authorize(context, action='show') data = self._get_metadata(context, server_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) @extensions.expected_errors((404, 409)) @wsgi.response(204) def delete(self, req, server_id, id): """Deletes an existing metadata.""" context = req.environ['nova.context'] authorize(context, action='delete') metadata = self._get_metadata(context, server_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) server = common.get_instance(self.compute_api, context, server_id) try: self.compute_api.delete_instance_metadata(context, server, id) except exception.InstanceUnknownCell as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete metadata', server_id) class ServerMetadata(extensions.V21APIExtensionBase): """Server Metadata API.""" name = "ServerMetadata" alias = ALIAS version = 1 def get_resources(self): parent = {'member_name': 'server', 'collection_name': 'servers'} resources = [extensions.ResourceExtension('metadata', ServerMetadataController(), member_name='server_meta', parent=parent, custom_routes_fn= self.server_metadata_map )] return resources def get_controller_extensions(self): return [] def server_metadata_map(self, mapper, wsgi_resource): mapper.connect("metadata", "/{project_id}/servers/{server_id}/metadata", controller=wsgi_resource, action='update_all', conditions={"method": ['PUT']}) # Also connect the non project_id routes mapper.connect("metadata", "/servers/{server_id}/metadata", controller=wsgi_resource, action='update_all', conditions={"method": ['PUT']})
unknown
codeparrot/codeparrot-clean
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import "sync" // All entities that do not end with ';' are 6 or fewer bytes long. const longestEntityWithoutSemicolon = 6 // entityMaps returns entity and entity2. // // entity is a map from HTML entity names to their values. The semicolon matters: // https://html.spec.whatwg.org/multipage/named-characters.html // lists both "amp" and "amp;" as two separate entries. // Note that the HTML5 list is larger than the HTML4 list at // http://www.w3.org/TR/html4/sgml/entities.html // // entity2 is a map of HTML entities to two unicode codepoints. var entityMaps = sync.OnceValues(func() (entity map[string]rune, entity2 map[string][2]rune) { entity = map[string]rune{ "AElig;": '\U000000C6', "AMP;": '\U00000026', "Aacute;": '\U000000C1', "Abreve;": '\U00000102', "Acirc;": '\U000000C2', "Acy;": '\U00000410', "Afr;": '\U0001D504', "Agrave;": '\U000000C0', "Alpha;": '\U00000391', "Amacr;": '\U00000100', "And;": '\U00002A53', "Aogon;": '\U00000104', "Aopf;": '\U0001D538', "ApplyFunction;": '\U00002061', "Aring;": '\U000000C5', "Ascr;": '\U0001D49C', "Assign;": '\U00002254', "Atilde;": '\U000000C3', "Auml;": '\U000000C4', "Backslash;": '\U00002216', "Barv;": '\U00002AE7', "Barwed;": '\U00002306', "Bcy;": '\U00000411', "Because;": '\U00002235', "Bernoullis;": '\U0000212C', "Beta;": '\U00000392', "Bfr;": '\U0001D505', "Bopf;": '\U0001D539', "Breve;": '\U000002D8', "Bscr;": '\U0000212C', "Bumpeq;": '\U0000224E', "CHcy;": '\U00000427', "COPY;": '\U000000A9', "Cacute;": '\U00000106', "Cap;": '\U000022D2', "CapitalDifferentialD;": '\U00002145', "Cayleys;": '\U0000212D', "Ccaron;": '\U0000010C', "Ccedil;": '\U000000C7', "Ccirc;": '\U00000108', "Cconint;": '\U00002230', "Cdot;": '\U0000010A', "Cedilla;": '\U000000B8', "CenterDot;": '\U000000B7', "Cfr;": '\U0000212D', "Chi;": '\U000003A7', "CircleDot;": '\U00002299', "CircleMinus;": '\U00002296', "CirclePlus;": '\U00002295', "CircleTimes;": '\U00002297', "ClockwiseContourIntegral;": '\U00002232', "CloseCurlyDoubleQuote;": '\U0000201D', "CloseCurlyQuote;": '\U00002019', "Colon;": '\U00002237', "Colone;": '\U00002A74', "Congruent;": '\U00002261', "Conint;": '\U0000222F', "ContourIntegral;": '\U0000222E', "Copf;": '\U00002102', "Coproduct;": '\U00002210', "CounterClockwiseContourIntegral;": '\U00002233', "Cross;": '\U00002A2F', "Cscr;": '\U0001D49E', "Cup;": '\U000022D3', "CupCap;": '\U0000224D', "DD;": '\U00002145', "DDotrahd;": '\U00002911', "DJcy;": '\U00000402', "DScy;": '\U00000405', "DZcy;": '\U0000040F', "Dagger;": '\U00002021', "Darr;": '\U000021A1', "Dashv;": '\U00002AE4', "Dcaron;": '\U0000010E', "Dcy;": '\U00000414', "Del;": '\U00002207', "Delta;": '\U00000394', "Dfr;": '\U0001D507', "DiacriticalAcute;": '\U000000B4', "DiacriticalDot;": '\U000002D9', "DiacriticalDoubleAcute;": '\U000002DD', "DiacriticalGrave;": '\U00000060', "DiacriticalTilde;": '\U000002DC', "Diamond;": '\U000022C4', "DifferentialD;": '\U00002146', "Dopf;": '\U0001D53B', "Dot;": '\U000000A8', "DotDot;": '\U000020DC', "DotEqual;": '\U00002250', "DoubleContourIntegral;": '\U0000222F', "DoubleDot;": '\U000000A8', "DoubleDownArrow;": '\U000021D3', "DoubleLeftArrow;": '\U000021D0', "DoubleLeftRightArrow;": '\U000021D4', "DoubleLeftTee;": '\U00002AE4', "DoubleLongLeftArrow;": '\U000027F8', "DoubleLongLeftRightArrow;": '\U000027FA', "DoubleLongRightArrow;": '\U000027F9', "DoubleRightArrow;": '\U000021D2', "DoubleRightTee;": '\U000022A8', "DoubleUpArrow;": '\U000021D1', "DoubleUpDownArrow;": '\U000021D5', "DoubleVerticalBar;": '\U00002225', "DownArrow;": '\U00002193', "DownArrowBar;": '\U00002913', "DownArrowUpArrow;": '\U000021F5', "DownBreve;": '\U00000311', "DownLeftRightVector;": '\U00002950', "DownLeftTeeVector;": '\U0000295E', "DownLeftVector;": '\U000021BD', "DownLeftVectorBar;": '\U00002956', "DownRightTeeVector;": '\U0000295F', "DownRightVector;": '\U000021C1', "DownRightVectorBar;": '\U00002957', "DownTee;": '\U000022A4', "DownTeeArrow;": '\U000021A7', "Downarrow;": '\U000021D3', "Dscr;": '\U0001D49F', "Dstrok;": '\U00000110', "ENG;": '\U0000014A', "ETH;": '\U000000D0', "Eacute;": '\U000000C9', "Ecaron;": '\U0000011A', "Ecirc;": '\U000000CA', "Ecy;": '\U0000042D', "Edot;": '\U00000116', "Efr;": '\U0001D508', "Egrave;": '\U000000C8', "Element;": '\U00002208', "Emacr;": '\U00000112', "EmptySmallSquare;": '\U000025FB', "EmptyVerySmallSquare;": '\U000025AB', "Eogon;": '\U00000118', "Eopf;": '\U0001D53C', "Epsilon;": '\U00000395', "Equal;": '\U00002A75', "EqualTilde;": '\U00002242', "Equilibrium;": '\U000021CC', "Escr;": '\U00002130', "Esim;": '\U00002A73', "Eta;": '\U00000397', "Euml;": '\U000000CB', "Exists;": '\U00002203', "ExponentialE;": '\U00002147', "Fcy;": '\U00000424', "Ffr;": '\U0001D509', "FilledSmallSquare;": '\U000025FC', "FilledVerySmallSquare;": '\U000025AA', "Fopf;": '\U0001D53D', "ForAll;": '\U00002200', "Fouriertrf;": '\U00002131', "Fscr;": '\U00002131', "GJcy;": '\U00000403', "GT;": '\U0000003E', "Gamma;": '\U00000393', "Gammad;": '\U000003DC', "Gbreve;": '\U0000011E', "Gcedil;": '\U00000122', "Gcirc;": '\U0000011C', "Gcy;": '\U00000413', "Gdot;": '\U00000120', "Gfr;": '\U0001D50A', "Gg;": '\U000022D9', "Gopf;": '\U0001D53E', "GreaterEqual;": '\U00002265', "GreaterEqualLess;": '\U000022DB', "GreaterFullEqual;": '\U00002267', "GreaterGreater;": '\U00002AA2', "GreaterLess;": '\U00002277', "GreaterSlantEqual;": '\U00002A7E', "GreaterTilde;": '\U00002273', "Gscr;": '\U0001D4A2', "Gt;": '\U0000226B', "HARDcy;": '\U0000042A', "Hacek;": '\U000002C7', "Hat;": '\U0000005E', "Hcirc;": '\U00000124', "Hfr;": '\U0000210C', "HilbertSpace;": '\U0000210B', "Hopf;": '\U0000210D', "HorizontalLine;": '\U00002500', "Hscr;": '\U0000210B', "Hstrok;": '\U00000126', "HumpDownHump;": '\U0000224E', "HumpEqual;": '\U0000224F', "IEcy;": '\U00000415', "IJlig;": '\U00000132', "IOcy;": '\U00000401', "Iacute;": '\U000000CD', "Icirc;": '\U000000CE', "Icy;": '\U00000418', "Idot;": '\U00000130', "Ifr;": '\U00002111', "Igrave;": '\U000000CC', "Im;": '\U00002111', "Imacr;": '\U0000012A', "ImaginaryI;": '\U00002148', "Implies;": '\U000021D2', "Int;": '\U0000222C', "Integral;": '\U0000222B', "Intersection;": '\U000022C2', "InvisibleComma;": '\U00002063', "InvisibleTimes;": '\U00002062', "Iogon;": '\U0000012E', "Iopf;": '\U0001D540', "Iota;": '\U00000399', "Iscr;": '\U00002110', "Itilde;": '\U00000128', "Iukcy;": '\U00000406', "Iuml;": '\U000000CF', "Jcirc;": '\U00000134', "Jcy;": '\U00000419', "Jfr;": '\U0001D50D', "Jopf;": '\U0001D541', "Jscr;": '\U0001D4A5', "Jsercy;": '\U00000408', "Jukcy;": '\U00000404', "KHcy;": '\U00000425', "KJcy;": '\U0000040C', "Kappa;": '\U0000039A', "Kcedil;": '\U00000136', "Kcy;": '\U0000041A', "Kfr;": '\U0001D50E', "Kopf;": '\U0001D542', "Kscr;": '\U0001D4A6', "LJcy;": '\U00000409', "LT;": '\U0000003C', "Lacute;": '\U00000139', "Lambda;": '\U0000039B', "Lang;": '\U000027EA', "Laplacetrf;": '\U00002112', "Larr;": '\U0000219E', "Lcaron;": '\U0000013D', "Lcedil;": '\U0000013B', "Lcy;": '\U0000041B', "LeftAngleBracket;": '\U000027E8', "LeftArrow;": '\U00002190', "LeftArrowBar;": '\U000021E4', "LeftArrowRightArrow;": '\U000021C6', "LeftCeiling;": '\U00002308', "LeftDoubleBracket;": '\U000027E6', "LeftDownTeeVector;": '\U00002961', "LeftDownVector;": '\U000021C3', "LeftDownVectorBar;": '\U00002959', "LeftFloor;": '\U0000230A', "LeftRightArrow;": '\U00002194', "LeftRightVector;": '\U0000294E', "LeftTee;": '\U000022A3', "LeftTeeArrow;": '\U000021A4', "LeftTeeVector;": '\U0000295A', "LeftTriangle;": '\U000022B2', "LeftTriangleBar;": '\U000029CF', "LeftTriangleEqual;": '\U000022B4', "LeftUpDownVector;": '\U00002951', "LeftUpTeeVector;": '\U00002960', "LeftUpVector;": '\U000021BF', "LeftUpVectorBar;": '\U00002958', "LeftVector;": '\U000021BC', "LeftVectorBar;": '\U00002952', "Leftarrow;": '\U000021D0', "Leftrightarrow;": '\U000021D4', "LessEqualGreater;": '\U000022DA', "LessFullEqual;": '\U00002266', "LessGreater;": '\U00002276', "LessLess;": '\U00002AA1', "LessSlantEqual;": '\U00002A7D', "LessTilde;": '\U00002272', "Lfr;": '\U0001D50F', "Ll;": '\U000022D8', "Lleftarrow;": '\U000021DA', "Lmidot;": '\U0000013F', "LongLeftArrow;": '\U000027F5', "LongLeftRightArrow;": '\U000027F7', "LongRightArrow;": '\U000027F6', "Longleftarrow;": '\U000027F8', "Longleftrightarrow;": '\U000027FA', "Longrightarrow;": '\U000027F9', "Lopf;": '\U0001D543', "LowerLeftArrow;": '\U00002199', "LowerRightArrow;": '\U00002198', "Lscr;": '\U00002112', "Lsh;": '\U000021B0', "Lstrok;": '\U00000141', "Lt;": '\U0000226A', "Map;": '\U00002905', "Mcy;": '\U0000041C', "MediumSpace;": '\U0000205F', "Mellintrf;": '\U00002133', "Mfr;": '\U0001D510', "MinusPlus;": '\U00002213', "Mopf;": '\U0001D544', "Mscr;": '\U00002133', "Mu;": '\U0000039C', "NJcy;": '\U0000040A', "Nacute;": '\U00000143', "Ncaron;": '\U00000147', "Ncedil;": '\U00000145', "Ncy;": '\U0000041D', "NegativeMediumSpace;": '\U0000200B', "NegativeThickSpace;": '\U0000200B', "NegativeThinSpace;": '\U0000200B', "NegativeVeryThinSpace;": '\U0000200B', "NestedGreaterGreater;": '\U0000226B', "NestedLessLess;": '\U0000226A', "NewLine;": '\U0000000A', "Nfr;": '\U0001D511', "NoBreak;": '\U00002060', "NonBreakingSpace;": '\U000000A0', "Nopf;": '\U00002115', "Not;": '\U00002AEC', "NotCongruent;": '\U00002262', "NotCupCap;": '\U0000226D', "NotDoubleVerticalBar;": '\U00002226', "NotElement;": '\U00002209', "NotEqual;": '\U00002260', "NotExists;": '\U00002204', "NotGreater;": '\U0000226F', "NotGreaterEqual;": '\U00002271', "NotGreaterLess;": '\U00002279', "NotGreaterTilde;": '\U00002275', "NotLeftTriangle;": '\U000022EA', "NotLeftTriangleEqual;": '\U000022EC', "NotLess;": '\U0000226E', "NotLessEqual;": '\U00002270', "NotLessGreater;": '\U00002278', "NotLessTilde;": '\U00002274', "NotPrecedes;": '\U00002280', "NotPrecedesSlantEqual;": '\U000022E0', "NotReverseElement;": '\U0000220C', "NotRightTriangle;": '\U000022EB', "NotRightTriangleEqual;": '\U000022ED', "NotSquareSubsetEqual;": '\U000022E2', "NotSquareSupersetEqual;": '\U000022E3', "NotSubsetEqual;": '\U00002288', "NotSucceeds;": '\U00002281', "NotSucceedsSlantEqual;": '\U000022E1', "NotSupersetEqual;": '\U00002289', "NotTilde;": '\U00002241', "NotTildeEqual;": '\U00002244', "NotTildeFullEqual;": '\U00002247', "NotTildeTilde;": '\U00002249', "NotVerticalBar;": '\U00002224', "Nscr;": '\U0001D4A9', "Ntilde;": '\U000000D1', "Nu;": '\U0000039D', "OElig;": '\U00000152', "Oacute;": '\U000000D3', "Ocirc;": '\U000000D4', "Ocy;": '\U0000041E', "Odblac;": '\U00000150', "Ofr;": '\U0001D512', "Ograve;": '\U000000D2', "Omacr;": '\U0000014C', "Omega;": '\U000003A9', "Omicron;": '\U0000039F', "Oopf;": '\U0001D546', "OpenCurlyDoubleQuote;": '\U0000201C', "OpenCurlyQuote;": '\U00002018', "Or;": '\U00002A54', "Oscr;": '\U0001D4AA', "Oslash;": '\U000000D8', "Otilde;": '\U000000D5', "Otimes;": '\U00002A37', "Ouml;": '\U000000D6', "OverBar;": '\U0000203E', "OverBrace;": '\U000023DE', "OverBracket;": '\U000023B4', "OverParenthesis;": '\U000023DC', "PartialD;": '\U00002202', "Pcy;": '\U0000041F', "Pfr;": '\U0001D513', "Phi;": '\U000003A6', "Pi;": '\U000003A0', "PlusMinus;": '\U000000B1', "Poincareplane;": '\U0000210C', "Popf;": '\U00002119', "Pr;": '\U00002ABB', "Precedes;": '\U0000227A', "PrecedesEqual;": '\U00002AAF', "PrecedesSlantEqual;": '\U0000227C', "PrecedesTilde;": '\U0000227E', "Prime;": '\U00002033', "Product;": '\U0000220F', "Proportion;": '\U00002237', "Proportional;": '\U0000221D', "Pscr;": '\U0001D4AB', "Psi;": '\U000003A8', "QUOT;": '\U00000022', "Qfr;": '\U0001D514', "Qopf;": '\U0000211A', "Qscr;": '\U0001D4AC', "RBarr;": '\U00002910', "REG;": '\U000000AE', "Racute;": '\U00000154', "Rang;": '\U000027EB', "Rarr;": '\U000021A0', "Rarrtl;": '\U00002916', "Rcaron;": '\U00000158', "Rcedil;": '\U00000156', "Rcy;": '\U00000420', "Re;": '\U0000211C', "ReverseElement;": '\U0000220B', "ReverseEquilibrium;": '\U000021CB', "ReverseUpEquilibrium;": '\U0000296F', "Rfr;": '\U0000211C', "Rho;": '\U000003A1', "RightAngleBracket;": '\U000027E9', "RightArrow;": '\U00002192', "RightArrowBar;": '\U000021E5', "RightArrowLeftArrow;": '\U000021C4', "RightCeiling;": '\U00002309', "RightDoubleBracket;": '\U000027E7', "RightDownTeeVector;": '\U0000295D', "RightDownVector;": '\U000021C2', "RightDownVectorBar;": '\U00002955', "RightFloor;": '\U0000230B', "RightTee;": '\U000022A2', "RightTeeArrow;": '\U000021A6', "RightTeeVector;": '\U0000295B', "RightTriangle;": '\U000022B3', "RightTriangleBar;": '\U000029D0', "RightTriangleEqual;": '\U000022B5', "RightUpDownVector;": '\U0000294F', "RightUpTeeVector;": '\U0000295C', "RightUpVector;": '\U000021BE', "RightUpVectorBar;": '\U00002954', "RightVector;": '\U000021C0', "RightVectorBar;": '\U00002953', "Rightarrow;": '\U000021D2', "Ropf;": '\U0000211D', "RoundImplies;": '\U00002970', "Rrightarrow;": '\U000021DB', "Rscr;": '\U0000211B', "Rsh;": '\U000021B1', "RuleDelayed;": '\U000029F4', "SHCHcy;": '\U00000429', "SHcy;": '\U00000428', "SOFTcy;": '\U0000042C', "Sacute;": '\U0000015A', "Sc;": '\U00002ABC', "Scaron;": '\U00000160', "Scedil;": '\U0000015E', "Scirc;": '\U0000015C', "Scy;": '\U00000421', "Sfr;": '\U0001D516', "ShortDownArrow;": '\U00002193', "ShortLeftArrow;": '\U00002190', "ShortRightArrow;": '\U00002192', "ShortUpArrow;": '\U00002191', "Sigma;": '\U000003A3', "SmallCircle;": '\U00002218', "Sopf;": '\U0001D54A', "Sqrt;": '\U0000221A', "Square;": '\U000025A1', "SquareIntersection;": '\U00002293', "SquareSubset;": '\U0000228F', "SquareSubsetEqual;": '\U00002291', "SquareSuperset;": '\U00002290', "SquareSupersetEqual;": '\U00002292', "SquareUnion;": '\U00002294', "Sscr;": '\U0001D4AE', "Star;": '\U000022C6', "Sub;": '\U000022D0', "Subset;": '\U000022D0', "SubsetEqual;": '\U00002286', "Succeeds;": '\U0000227B', "SucceedsEqual;": '\U00002AB0', "SucceedsSlantEqual;": '\U0000227D', "SucceedsTilde;": '\U0000227F', "SuchThat;": '\U0000220B', "Sum;": '\U00002211', "Sup;": '\U000022D1', "Superset;": '\U00002283', "SupersetEqual;": '\U00002287', "Supset;": '\U000022D1', "THORN;": '\U000000DE', "TRADE;": '\U00002122', "TSHcy;": '\U0000040B', "TScy;": '\U00000426', "Tab;": '\U00000009', "Tau;": '\U000003A4', "Tcaron;": '\U00000164', "Tcedil;": '\U00000162', "Tcy;": '\U00000422', "Tfr;": '\U0001D517', "Therefore;": '\U00002234', "Theta;": '\U00000398', "ThinSpace;": '\U00002009', "Tilde;": '\U0000223C', "TildeEqual;": '\U00002243', "TildeFullEqual;": '\U00002245', "TildeTilde;": '\U00002248', "Topf;": '\U0001D54B', "TripleDot;": '\U000020DB', "Tscr;": '\U0001D4AF', "Tstrok;": '\U00000166', "Uacute;": '\U000000DA', "Uarr;": '\U0000219F', "Uarrocir;": '\U00002949', "Ubrcy;": '\U0000040E', "Ubreve;": '\U0000016C', "Ucirc;": '\U000000DB', "Ucy;": '\U00000423', "Udblac;": '\U00000170', "Ufr;": '\U0001D518', "Ugrave;": '\U000000D9', "Umacr;": '\U0000016A', "UnderBar;": '\U0000005F', "UnderBrace;": '\U000023DF', "UnderBracket;": '\U000023B5', "UnderParenthesis;": '\U000023DD', "Union;": '\U000022C3', "UnionPlus;": '\U0000228E', "Uogon;": '\U00000172', "Uopf;": '\U0001D54C', "UpArrow;": '\U00002191', "UpArrowBar;": '\U00002912', "UpArrowDownArrow;": '\U000021C5', "UpDownArrow;": '\U00002195', "UpEquilibrium;": '\U0000296E', "UpTee;": '\U000022A5', "UpTeeArrow;": '\U000021A5', "Uparrow;": '\U000021D1', "Updownarrow;": '\U000021D5', "UpperLeftArrow;": '\U00002196', "UpperRightArrow;": '\U00002197', "Upsi;": '\U000003D2', "Upsilon;": '\U000003A5', "Uring;": '\U0000016E', "Uscr;": '\U0001D4B0', "Utilde;": '\U00000168', "Uuml;": '\U000000DC', "VDash;": '\U000022AB', "Vbar;": '\U00002AEB', "Vcy;": '\U00000412', "Vdash;": '\U000022A9', "Vdashl;": '\U00002AE6', "Vee;": '\U000022C1', "Verbar;": '\U00002016', "Vert;": '\U00002016', "VerticalBar;": '\U00002223', "VerticalLine;": '\U0000007C', "VerticalSeparator;": '\U00002758', "VerticalTilde;": '\U00002240', "VeryThinSpace;": '\U0000200A', "Vfr;": '\U0001D519', "Vopf;": '\U0001D54D', "Vscr;": '\U0001D4B1', "Vvdash;": '\U000022AA', "Wcirc;": '\U00000174', "Wedge;": '\U000022C0', "Wfr;": '\U0001D51A', "Wopf;": '\U0001D54E', "Wscr;": '\U0001D4B2', "Xfr;": '\U0001D51B', "Xi;": '\U0000039E', "Xopf;": '\U0001D54F', "Xscr;": '\U0001D4B3', "YAcy;": '\U0000042F', "YIcy;": '\U00000407', "YUcy;": '\U0000042E', "Yacute;": '\U000000DD', "Ycirc;": '\U00000176', "Ycy;": '\U0000042B', "Yfr;": '\U0001D51C', "Yopf;": '\U0001D550', "Yscr;": '\U0001D4B4', "Yuml;": '\U00000178', "ZHcy;": '\U00000416', "Zacute;": '\U00000179', "Zcaron;": '\U0000017D', "Zcy;": '\U00000417', "Zdot;": '\U0000017B', "ZeroWidthSpace;": '\U0000200B', "Zeta;": '\U00000396', "Zfr;": '\U00002128', "Zopf;": '\U00002124', "Zscr;": '\U0001D4B5', "aacute;": '\U000000E1', "abreve;": '\U00000103', "ac;": '\U0000223E', "acd;": '\U0000223F', "acirc;": '\U000000E2', "acute;": '\U000000B4', "acy;": '\U00000430', "aelig;": '\U000000E6', "af;": '\U00002061', "afr;": '\U0001D51E', "agrave;": '\U000000E0', "alefsym;": '\U00002135', "aleph;": '\U00002135', "alpha;": '\U000003B1', "amacr;": '\U00000101', "amalg;": '\U00002A3F', "amp;": '\U00000026', "and;": '\U00002227', "andand;": '\U00002A55', "andd;": '\U00002A5C', "andslope;": '\U00002A58', "andv;": '\U00002A5A', "ang;": '\U00002220', "ange;": '\U000029A4', "angle;": '\U00002220', "angmsd;": '\U00002221', "angmsdaa;": '\U000029A8', "angmsdab;": '\U000029A9', "angmsdac;": '\U000029AA', "angmsdad;": '\U000029AB', "angmsdae;": '\U000029AC', "angmsdaf;": '\U000029AD', "angmsdag;": '\U000029AE', "angmsdah;": '\U000029AF', "angrt;": '\U0000221F', "angrtvb;": '\U000022BE', "angrtvbd;": '\U0000299D', "angsph;": '\U00002222', "angst;": '\U000000C5', "angzarr;": '\U0000237C', "aogon;": '\U00000105', "aopf;": '\U0001D552', "ap;": '\U00002248', "apE;": '\U00002A70', "apacir;": '\U00002A6F', "ape;": '\U0000224A', "apid;": '\U0000224B', "apos;": '\U00000027', "approx;": '\U00002248', "approxeq;": '\U0000224A', "aring;": '\U000000E5', "ascr;": '\U0001D4B6', "ast;": '\U0000002A', "asymp;": '\U00002248', "asympeq;": '\U0000224D', "atilde;": '\U000000E3', "auml;": '\U000000E4', "awconint;": '\U00002233', "awint;": '\U00002A11', "bNot;": '\U00002AED', "backcong;": '\U0000224C', "backepsilon;": '\U000003F6', "backprime;": '\U00002035', "backsim;": '\U0000223D', "backsimeq;": '\U000022CD', "barvee;": '\U000022BD', "barwed;": '\U00002305', "barwedge;": '\U00002305', "bbrk;": '\U000023B5', "bbrktbrk;": '\U000023B6', "bcong;": '\U0000224C', "bcy;": '\U00000431', "bdquo;": '\U0000201E', "becaus;": '\U00002235', "because;": '\U00002235', "bemptyv;": '\U000029B0', "bepsi;": '\U000003F6', "bernou;": '\U0000212C', "beta;": '\U000003B2', "beth;": '\U00002136', "between;": '\U0000226C', "bfr;": '\U0001D51F', "bigcap;": '\U000022C2', "bigcirc;": '\U000025EF', "bigcup;": '\U000022C3', "bigodot;": '\U00002A00', "bigoplus;": '\U00002A01', "bigotimes;": '\U00002A02', "bigsqcup;": '\U00002A06', "bigstar;": '\U00002605', "bigtriangledown;": '\U000025BD', "bigtriangleup;": '\U000025B3', "biguplus;": '\U00002A04', "bigvee;": '\U000022C1', "bigwedge;": '\U000022C0', "bkarow;": '\U0000290D', "blacklozenge;": '\U000029EB', "blacksquare;": '\U000025AA', "blacktriangle;": '\U000025B4', "blacktriangledown;": '\U000025BE', "blacktriangleleft;": '\U000025C2', "blacktriangleright;": '\U000025B8', "blank;": '\U00002423', "blk12;": '\U00002592', "blk14;": '\U00002591', "blk34;": '\U00002593', "block;": '\U00002588', "bnot;": '\U00002310', "bopf;": '\U0001D553', "bot;": '\U000022A5', "bottom;": '\U000022A5', "bowtie;": '\U000022C8', "boxDL;": '\U00002557', "boxDR;": '\U00002554', "boxDl;": '\U00002556', "boxDr;": '\U00002553', "boxH;": '\U00002550', "boxHD;": '\U00002566', "boxHU;": '\U00002569', "boxHd;": '\U00002564', "boxHu;": '\U00002567', "boxUL;": '\U0000255D', "boxUR;": '\U0000255A', "boxUl;": '\U0000255C', "boxUr;": '\U00002559', "boxV;": '\U00002551', "boxVH;": '\U0000256C', "boxVL;": '\U00002563', "boxVR;": '\U00002560', "boxVh;": '\U0000256B', "boxVl;": '\U00002562', "boxVr;": '\U0000255F', "boxbox;": '\U000029C9', "boxdL;": '\U00002555', "boxdR;": '\U00002552', "boxdl;": '\U00002510', "boxdr;": '\U0000250C', "boxh;": '\U00002500', "boxhD;": '\U00002565', "boxhU;": '\U00002568', "boxhd;": '\U0000252C', "boxhu;": '\U00002534', "boxminus;": '\U0000229F', "boxplus;": '\U0000229E', "boxtimes;": '\U000022A0', "boxuL;": '\U0000255B', "boxuR;": '\U00002558', "boxul;": '\U00002518', "boxur;": '\U00002514', "boxv;": '\U00002502', "boxvH;": '\U0000256A', "boxvL;": '\U00002561', "boxvR;": '\U0000255E', "boxvh;": '\U0000253C', "boxvl;": '\U00002524', "boxvr;": '\U0000251C', "bprime;": '\U00002035', "breve;": '\U000002D8', "brvbar;": '\U000000A6', "bscr;": '\U0001D4B7', "bsemi;": '\U0000204F', "bsim;": '\U0000223D', "bsime;": '\U000022CD', "bsol;": '\U0000005C', "bsolb;": '\U000029C5', "bsolhsub;": '\U000027C8', "bull;": '\U00002022', "bullet;": '\U00002022', "bump;": '\U0000224E', "bumpE;": '\U00002AAE', "bumpe;": '\U0000224F', "bumpeq;": '\U0000224F', "cacute;": '\U00000107', "cap;": '\U00002229', "capand;": '\U00002A44', "capbrcup;": '\U00002A49', "capcap;": '\U00002A4B', "capcup;": '\U00002A47', "capdot;": '\U00002A40', "caret;": '\U00002041', "caron;": '\U000002C7', "ccaps;": '\U00002A4D', "ccaron;": '\U0000010D', "ccedil;": '\U000000E7', "ccirc;": '\U00000109', "ccups;": '\U00002A4C', "ccupssm;": '\U00002A50', "cdot;": '\U0000010B', "cedil;": '\U000000B8', "cemptyv;": '\U000029B2', "cent;": '\U000000A2', "centerdot;": '\U000000B7', "cfr;": '\U0001D520', "chcy;": '\U00000447', "check;": '\U00002713', "checkmark;": '\U00002713', "chi;": '\U000003C7', "cir;": '\U000025CB', "cirE;": '\U000029C3', "circ;": '\U000002C6', "circeq;": '\U00002257', "circlearrowleft;": '\U000021BA', "circlearrowright;": '\U000021BB', "circledR;": '\U000000AE', "circledS;": '\U000024C8', "circledast;": '\U0000229B', "circledcirc;": '\U0000229A', "circleddash;": '\U0000229D', "cire;": '\U00002257', "cirfnint;": '\U00002A10', "cirmid;": '\U00002AEF', "cirscir;": '\U000029C2', "clubs;": '\U00002663', "clubsuit;": '\U00002663', "colon;": '\U0000003A', "colone;": '\U00002254', "coloneq;": '\U00002254', "comma;": '\U0000002C', "commat;": '\U00000040', "comp;": '\U00002201', "compfn;": '\U00002218', "complement;": '\U00002201', "complexes;": '\U00002102', "cong;": '\U00002245', "congdot;": '\U00002A6D', "conint;": '\U0000222E', "copf;": '\U0001D554', "coprod;": '\U00002210', "copy;": '\U000000A9', "copysr;": '\U00002117', "crarr;": '\U000021B5', "cross;": '\U00002717', "cscr;": '\U0001D4B8', "csub;": '\U00002ACF', "csube;": '\U00002AD1', "csup;": '\U00002AD0', "csupe;": '\U00002AD2', "ctdot;": '\U000022EF', "cudarrl;": '\U00002938', "cudarrr;": '\U00002935', "cuepr;": '\U000022DE', "cuesc;": '\U000022DF', "cularr;": '\U000021B6', "cularrp;": '\U0000293D', "cup;": '\U0000222A', "cupbrcap;": '\U00002A48', "cupcap;": '\U00002A46', "cupcup;": '\U00002A4A', "cupdot;": '\U0000228D', "cupor;": '\U00002A45', "curarr;": '\U000021B7', "curarrm;": '\U0000293C', "curlyeqprec;": '\U000022DE', "curlyeqsucc;": '\U000022DF', "curlyvee;": '\U000022CE', "curlywedge;": '\U000022CF', "curren;": '\U000000A4', "curvearrowleft;": '\U000021B6', "curvearrowright;": '\U000021B7', "cuvee;": '\U000022CE', "cuwed;": '\U000022CF', "cwconint;": '\U00002232', "cwint;": '\U00002231', "cylcty;": '\U0000232D', "dArr;": '\U000021D3', "dHar;": '\U00002965', "dagger;": '\U00002020', "daleth;": '\U00002138', "darr;": '\U00002193', "dash;": '\U00002010', "dashv;": '\U000022A3', "dbkarow;": '\U0000290F', "dblac;": '\U000002DD', "dcaron;": '\U0000010F', "dcy;": '\U00000434', "dd;": '\U00002146', "ddagger;": '\U00002021', "ddarr;": '\U000021CA', "ddotseq;": '\U00002A77', "deg;": '\U000000B0', "delta;": '\U000003B4', "demptyv;": '\U000029B1', "dfisht;": '\U0000297F', "dfr;": '\U0001D521', "dharl;": '\U000021C3', "dharr;": '\U000021C2', "diam;": '\U000022C4', "diamond;": '\U000022C4', "diamondsuit;": '\U00002666', "diams;": '\U00002666', "die;": '\U000000A8', "digamma;": '\U000003DD', "disin;": '\U000022F2', "div;": '\U000000F7', "divide;": '\U000000F7', "divideontimes;": '\U000022C7', "divonx;": '\U000022C7', "djcy;": '\U00000452', "dlcorn;": '\U0000231E', "dlcrop;": '\U0000230D', "dollar;": '\U00000024', "dopf;": '\U0001D555', "dot;": '\U000002D9', "doteq;": '\U00002250', "doteqdot;": '\U00002251', "dotminus;": '\U00002238', "dotplus;": '\U00002214', "dotsquare;": '\U000022A1', "doublebarwedge;": '\U00002306', "downarrow;": '\U00002193', "downdownarrows;": '\U000021CA', "downharpoonleft;": '\U000021C3', "downharpoonright;": '\U000021C2', "drbkarow;": '\U00002910', "drcorn;": '\U0000231F', "drcrop;": '\U0000230C', "dscr;": '\U0001D4B9', "dscy;": '\U00000455', "dsol;": '\U000029F6', "dstrok;": '\U00000111', "dtdot;": '\U000022F1', "dtri;": '\U000025BF', "dtrif;": '\U000025BE', "duarr;": '\U000021F5', "duhar;": '\U0000296F', "dwangle;": '\U000029A6', "dzcy;": '\U0000045F', "dzigrarr;": '\U000027FF', "eDDot;": '\U00002A77', "eDot;": '\U00002251', "eacute;": '\U000000E9', "easter;": '\U00002A6E', "ecaron;": '\U0000011B', "ecir;": '\U00002256', "ecirc;": '\U000000EA', "ecolon;": '\U00002255', "ecy;": '\U0000044D', "edot;": '\U00000117', "ee;": '\U00002147', "efDot;": '\U00002252', "efr;": '\U0001D522', "eg;": '\U00002A9A', "egrave;": '\U000000E8', "egs;": '\U00002A96', "egsdot;": '\U00002A98', "el;": '\U00002A99', "elinters;": '\U000023E7', "ell;": '\U00002113', "els;": '\U00002A95', "elsdot;": '\U00002A97', "emacr;": '\U00000113', "empty;": '\U00002205', "emptyset;": '\U00002205', "emptyv;": '\U00002205', "emsp;": '\U00002003', "emsp13;": '\U00002004', "emsp14;": '\U00002005', "eng;": '\U0000014B', "ensp;": '\U00002002', "eogon;": '\U00000119', "eopf;": '\U0001D556', "epar;": '\U000022D5', "eparsl;": '\U000029E3', "eplus;": '\U00002A71', "epsi;": '\U000003B5', "epsilon;": '\U000003B5', "epsiv;": '\U000003F5', "eqcirc;": '\U00002256', "eqcolon;": '\U00002255', "eqsim;": '\U00002242', "eqslantgtr;": '\U00002A96', "eqslantless;": '\U00002A95', "equals;": '\U0000003D', "equest;": '\U0000225F', "equiv;": '\U00002261', "equivDD;": '\U00002A78', "eqvparsl;": '\U000029E5', "erDot;": '\U00002253', "erarr;": '\U00002971', "escr;": '\U0000212F', "esdot;": '\U00002250', "esim;": '\U00002242', "eta;": '\U000003B7', "eth;": '\U000000F0', "euml;": '\U000000EB', "euro;": '\U000020AC', "excl;": '\U00000021', "exist;": '\U00002203', "expectation;": '\U00002130', "exponentiale;": '\U00002147', "fallingdotseq;": '\U00002252', "fcy;": '\U00000444', "female;": '\U00002640', "ffilig;": '\U0000FB03', "fflig;": '\U0000FB00', "ffllig;": '\U0000FB04', "ffr;": '\U0001D523', "filig;": '\U0000FB01', "flat;": '\U0000266D', "fllig;": '\U0000FB02', "fltns;": '\U000025B1', "fnof;": '\U00000192', "fopf;": '\U0001D557', "forall;": '\U00002200', "fork;": '\U000022D4', "forkv;": '\U00002AD9', "fpartint;": '\U00002A0D', "frac12;": '\U000000BD', "frac13;": '\U00002153', "frac14;": '\U000000BC', "frac15;": '\U00002155', "frac16;": '\U00002159', "frac18;": '\U0000215B', "frac23;": '\U00002154', "frac25;": '\U00002156', "frac34;": '\U000000BE', "frac35;": '\U00002157', "frac38;": '\U0000215C', "frac45;": '\U00002158', "frac56;": '\U0000215A', "frac58;": '\U0000215D', "frac78;": '\U0000215E', "frasl;": '\U00002044', "frown;": '\U00002322', "fscr;": '\U0001D4BB', "gE;": '\U00002267', "gEl;": '\U00002A8C', "gacute;": '\U000001F5', "gamma;": '\U000003B3', "gammad;": '\U000003DD', "gap;": '\U00002A86', "gbreve;": '\U0000011F', "gcirc;": '\U0000011D', "gcy;": '\U00000433', "gdot;": '\U00000121', "ge;": '\U00002265', "gel;": '\U000022DB', "geq;": '\U00002265', "geqq;": '\U00002267', "geqslant;": '\U00002A7E', "ges;": '\U00002A7E', "gescc;": '\U00002AA9', "gesdot;": '\U00002A80', "gesdoto;": '\U00002A82', "gesdotol;": '\U00002A84', "gesles;": '\U00002A94', "gfr;": '\U0001D524', "gg;": '\U0000226B', "ggg;": '\U000022D9', "gimel;": '\U00002137', "gjcy;": '\U00000453', "gl;": '\U00002277', "glE;": '\U00002A92', "gla;": '\U00002AA5', "glj;": '\U00002AA4', "gnE;": '\U00002269', "gnap;": '\U00002A8A', "gnapprox;": '\U00002A8A', "gne;": '\U00002A88', "gneq;": '\U00002A88', "gneqq;": '\U00002269', "gnsim;": '\U000022E7', "gopf;": '\U0001D558', "grave;": '\U00000060', "gscr;": '\U0000210A', "gsim;": '\U00002273', "gsime;": '\U00002A8E', "gsiml;": '\U00002A90', "gt;": '\U0000003E', "gtcc;": '\U00002AA7', "gtcir;": '\U00002A7A', "gtdot;": '\U000022D7', "gtlPar;": '\U00002995', "gtquest;": '\U00002A7C', "gtrapprox;": '\U00002A86', "gtrarr;": '\U00002978', "gtrdot;": '\U000022D7', "gtreqless;": '\U000022DB', "gtreqqless;": '\U00002A8C', "gtrless;": '\U00002277', "gtrsim;": '\U00002273', "hArr;": '\U000021D4', "hairsp;": '\U0000200A', "half;": '\U000000BD', "hamilt;": '\U0000210B', "hardcy;": '\U0000044A', "harr;": '\U00002194', "harrcir;": '\U00002948', "harrw;": '\U000021AD', "hbar;": '\U0000210F', "hcirc;": '\U00000125', "hearts;": '\U00002665', "heartsuit;": '\U00002665', "hellip;": '\U00002026', "hercon;": '\U000022B9', "hfr;": '\U0001D525', "hksearow;": '\U00002925', "hkswarow;": '\U00002926', "hoarr;": '\U000021FF', "homtht;": '\U0000223B', "hookleftarrow;": '\U000021A9', "hookrightarrow;": '\U000021AA', "hopf;": '\U0001D559', "horbar;": '\U00002015', "hscr;": '\U0001D4BD', "hslash;": '\U0000210F', "hstrok;": '\U00000127', "hybull;": '\U00002043', "hyphen;": '\U00002010', "iacute;": '\U000000ED', "ic;": '\U00002063', "icirc;": '\U000000EE', "icy;": '\U00000438', "iecy;": '\U00000435', "iexcl;": '\U000000A1', "iff;": '\U000021D4', "ifr;": '\U0001D526', "igrave;": '\U000000EC', "ii;": '\U00002148', "iiiint;": '\U00002A0C', "iiint;": '\U0000222D', "iinfin;": '\U000029DC', "iiota;": '\U00002129', "ijlig;": '\U00000133', "imacr;": '\U0000012B', "image;": '\U00002111', "imagline;": '\U00002110', "imagpart;": '\U00002111', "imath;": '\U00000131', "imof;": '\U000022B7', "imped;": '\U000001B5', "in;": '\U00002208', "incare;": '\U00002105', "infin;": '\U0000221E', "infintie;": '\U000029DD', "inodot;": '\U00000131', "int;": '\U0000222B', "intcal;": '\U000022BA', "integers;": '\U00002124', "intercal;": '\U000022BA', "intlarhk;": '\U00002A17', "intprod;": '\U00002A3C', "iocy;": '\U00000451', "iogon;": '\U0000012F', "iopf;": '\U0001D55A', "iota;": '\U000003B9', "iprod;": '\U00002A3C', "iquest;": '\U000000BF', "iscr;": '\U0001D4BE', "isin;": '\U00002208', "isinE;": '\U000022F9', "isindot;": '\U000022F5', "isins;": '\U000022F4', "isinsv;": '\U000022F3', "isinv;": '\U00002208', "it;": '\U00002062', "itilde;": '\U00000129', "iukcy;": '\U00000456', "iuml;": '\U000000EF', "jcirc;": '\U00000135', "jcy;": '\U00000439', "jfr;": '\U0001D527', "jmath;": '\U00000237', "jopf;": '\U0001D55B', "jscr;": '\U0001D4BF', "jsercy;": '\U00000458', "jukcy;": '\U00000454', "kappa;": '\U000003BA', "kappav;": '\U000003F0', "kcedil;": '\U00000137', "kcy;": '\U0000043A', "kfr;": '\U0001D528', "kgreen;": '\U00000138', "khcy;": '\U00000445', "kjcy;": '\U0000045C', "kopf;": '\U0001D55C', "kscr;": '\U0001D4C0', "lAarr;": '\U000021DA', "lArr;": '\U000021D0', "lAtail;": '\U0000291B', "lBarr;": '\U0000290E', "lE;": '\U00002266', "lEg;": '\U00002A8B', "lHar;": '\U00002962', "lacute;": '\U0000013A', "laemptyv;": '\U000029B4', "lagran;": '\U00002112', "lambda;": '\U000003BB', "lang;": '\U000027E8', "langd;": '\U00002991', "langle;": '\U000027E8', "lap;": '\U00002A85', "laquo;": '\U000000AB', "larr;": '\U00002190', "larrb;": '\U000021E4', "larrbfs;": '\U0000291F', "larrfs;": '\U0000291D', "larrhk;": '\U000021A9', "larrlp;": '\U000021AB', "larrpl;": '\U00002939', "larrsim;": '\U00002973', "larrtl;": '\U000021A2', "lat;": '\U00002AAB', "latail;": '\U00002919', "late;": '\U00002AAD', "lbarr;": '\U0000290C', "lbbrk;": '\U00002772', "lbrace;": '\U0000007B', "lbrack;": '\U0000005B', "lbrke;": '\U0000298B', "lbrksld;": '\U0000298F', "lbrkslu;": '\U0000298D', "lcaron;": '\U0000013E', "lcedil;": '\U0000013C', "lceil;": '\U00002308', "lcub;": '\U0000007B', "lcy;": '\U0000043B', "ldca;": '\U00002936', "ldquo;": '\U0000201C', "ldquor;": '\U0000201E', "ldrdhar;": '\U00002967', "ldrushar;": '\U0000294B', "ldsh;": '\U000021B2', "le;": '\U00002264', "leftarrow;": '\U00002190', "leftarrowtail;": '\U000021A2', "leftharpoondown;": '\U000021BD', "leftharpoonup;": '\U000021BC', "leftleftarrows;": '\U000021C7', "leftrightarrow;": '\U00002194', "leftrightarrows;": '\U000021C6', "leftrightharpoons;": '\U000021CB', "leftrightsquigarrow;": '\U000021AD', "leftthreetimes;": '\U000022CB', "leg;": '\U000022DA', "leq;": '\U00002264', "leqq;": '\U00002266', "leqslant;": '\U00002A7D', "les;": '\U00002A7D', "lescc;": '\U00002AA8', "lesdot;": '\U00002A7F', "lesdoto;": '\U00002A81', "lesdotor;": '\U00002A83', "lesges;": '\U00002A93', "lessapprox;": '\U00002A85', "lessdot;": '\U000022D6', "lesseqgtr;": '\U000022DA', "lesseqqgtr;": '\U00002A8B', "lessgtr;": '\U00002276', "lesssim;": '\U00002272', "lfisht;": '\U0000297C', "lfloor;": '\U0000230A', "lfr;": '\U0001D529', "lg;": '\U00002276', "lgE;": '\U00002A91', "lhard;": '\U000021BD', "lharu;": '\U000021BC', "lharul;": '\U0000296A', "lhblk;": '\U00002584', "ljcy;": '\U00000459', "ll;": '\U0000226A', "llarr;": '\U000021C7', "llcorner;": '\U0000231E', "llhard;": '\U0000296B', "lltri;": '\U000025FA', "lmidot;": '\U00000140', "lmoust;": '\U000023B0', "lmoustache;": '\U000023B0', "lnE;": '\U00002268', "lnap;": '\U00002A89', "lnapprox;": '\U00002A89', "lne;": '\U00002A87', "lneq;": '\U00002A87', "lneqq;": '\U00002268', "lnsim;": '\U000022E6', "loang;": '\U000027EC', "loarr;": '\U000021FD', "lobrk;": '\U000027E6', "longleftarrow;": '\U000027F5', "longleftrightarrow;": '\U000027F7', "longmapsto;": '\U000027FC', "longrightarrow;": '\U000027F6', "looparrowleft;": '\U000021AB', "looparrowright;": '\U000021AC', "lopar;": '\U00002985', "lopf;": '\U0001D55D', "loplus;": '\U00002A2D', "lotimes;": '\U00002A34', "lowast;": '\U00002217', "lowbar;": '\U0000005F', "loz;": '\U000025CA', "lozenge;": '\U000025CA', "lozf;": '\U000029EB', "lpar;": '\U00000028', "lparlt;": '\U00002993', "lrarr;": '\U000021C6', "lrcorner;": '\U0000231F', "lrhar;": '\U000021CB', "lrhard;": '\U0000296D', "lrm;": '\U0000200E', "lrtri;": '\U000022BF', "lsaquo;": '\U00002039', "lscr;": '\U0001D4C1', "lsh;": '\U000021B0', "lsim;": '\U00002272', "lsime;": '\U00002A8D', "lsimg;": '\U00002A8F', "lsqb;": '\U0000005B', "lsquo;": '\U00002018', "lsquor;": '\U0000201A', "lstrok;": '\U00000142', "lt;": '\U0000003C', "ltcc;": '\U00002AA6', "ltcir;": '\U00002A79', "ltdot;": '\U000022D6', "lthree;": '\U000022CB', "ltimes;": '\U000022C9', "ltlarr;": '\U00002976', "ltquest;": '\U00002A7B', "ltrPar;": '\U00002996', "ltri;": '\U000025C3', "ltrie;": '\U000022B4', "ltrif;": '\U000025C2', "lurdshar;": '\U0000294A', "luruhar;": '\U00002966', "mDDot;": '\U0000223A', "macr;": '\U000000AF', "male;": '\U00002642', "malt;": '\U00002720', "maltese;": '\U00002720', "map;": '\U000021A6', "mapsto;": '\U000021A6', "mapstodown;": '\U000021A7', "mapstoleft;": '\U000021A4', "mapstoup;": '\U000021A5', "marker;": '\U000025AE', "mcomma;": '\U00002A29', "mcy;": '\U0000043C', "mdash;": '\U00002014', "measuredangle;": '\U00002221', "mfr;": '\U0001D52A', "mho;": '\U00002127', "micro;": '\U000000B5', "mid;": '\U00002223', "midast;": '\U0000002A', "midcir;": '\U00002AF0', "middot;": '\U000000B7', "minus;": '\U00002212', "minusb;": '\U0000229F', "minusd;": '\U00002238', "minusdu;": '\U00002A2A', "mlcp;": '\U00002ADB', "mldr;": '\U00002026', "mnplus;": '\U00002213', "models;": '\U000022A7', "mopf;": '\U0001D55E', "mp;": '\U00002213', "mscr;": '\U0001D4C2', "mstpos;": '\U0000223E', "mu;": '\U000003BC', "multimap;": '\U000022B8', "mumap;": '\U000022B8', "nLeftarrow;": '\U000021CD', "nLeftrightarrow;": '\U000021CE', "nRightarrow;": '\U000021CF', "nVDash;": '\U000022AF', "nVdash;": '\U000022AE', "nabla;": '\U00002207', "nacute;": '\U00000144', "nap;": '\U00002249', "napos;": '\U00000149', "napprox;": '\U00002249', "natur;": '\U0000266E', "natural;": '\U0000266E', "naturals;": '\U00002115', "nbsp;": '\U000000A0', "ncap;": '\U00002A43', "ncaron;": '\U00000148', "ncedil;": '\U00000146', "ncong;": '\U00002247', "ncup;": '\U00002A42', "ncy;": '\U0000043D', "ndash;": '\U00002013', "ne;": '\U00002260', "neArr;": '\U000021D7', "nearhk;": '\U00002924', "nearr;": '\U00002197', "nearrow;": '\U00002197', "nequiv;": '\U00002262', "nesear;": '\U00002928', "nexist;": '\U00002204', "nexists;": '\U00002204', "nfr;": '\U0001D52B', "nge;": '\U00002271', "ngeq;": '\U00002271', "ngsim;": '\U00002275', "ngt;": '\U0000226F', "ngtr;": '\U0000226F', "nhArr;": '\U000021CE', "nharr;": '\U000021AE', "nhpar;": '\U00002AF2', "ni;": '\U0000220B', "nis;": '\U000022FC', "nisd;": '\U000022FA', "niv;": '\U0000220B', "njcy;": '\U0000045A', "nlArr;": '\U000021CD', "nlarr;": '\U0000219A', "nldr;": '\U00002025', "nle;": '\U00002270', "nleftarrow;": '\U0000219A', "nleftrightarrow;": '\U000021AE', "nleq;": '\U00002270', "nless;": '\U0000226E', "nlsim;": '\U00002274', "nlt;": '\U0000226E', "nltri;": '\U000022EA', "nltrie;": '\U000022EC', "nmid;": '\U00002224', "nopf;": '\U0001D55F', "not;": '\U000000AC', "notin;": '\U00002209', "notinva;": '\U00002209', "notinvb;": '\U000022F7', "notinvc;": '\U000022F6', "notni;": '\U0000220C', "notniva;": '\U0000220C', "notnivb;": '\U000022FE', "notnivc;": '\U000022FD', "npar;": '\U00002226', "nparallel;": '\U00002226', "npolint;": '\U00002A14', "npr;": '\U00002280', "nprcue;": '\U000022E0', "nprec;": '\U00002280', "nrArr;": '\U000021CF', "nrarr;": '\U0000219B', "nrightarrow;": '\U0000219B', "nrtri;": '\U000022EB', "nrtrie;": '\U000022ED', "nsc;": '\U00002281', "nsccue;": '\U000022E1', "nscr;": '\U0001D4C3', "nshortmid;": '\U00002224', "nshortparallel;": '\U00002226', "nsim;": '\U00002241', "nsime;": '\U00002244', "nsimeq;": '\U00002244', "nsmid;": '\U00002224', "nspar;": '\U00002226', "nsqsube;": '\U000022E2', "nsqsupe;": '\U000022E3', "nsub;": '\U00002284', "nsube;": '\U00002288', "nsubseteq;": '\U00002288', "nsucc;": '\U00002281', "nsup;": '\U00002285', "nsupe;": '\U00002289', "nsupseteq;": '\U00002289', "ntgl;": '\U00002279', "ntilde;": '\U000000F1', "ntlg;": '\U00002278', "ntriangleleft;": '\U000022EA', "ntrianglelefteq;": '\U000022EC', "ntriangleright;": '\U000022EB', "ntrianglerighteq;": '\U000022ED', "nu;": '\U000003BD', "num;": '\U00000023', "numero;": '\U00002116', "numsp;": '\U00002007', "nvDash;": '\U000022AD', "nvHarr;": '\U00002904', "nvdash;": '\U000022AC', "nvinfin;": '\U000029DE', "nvlArr;": '\U00002902', "nvrArr;": '\U00002903', "nwArr;": '\U000021D6', "nwarhk;": '\U00002923', "nwarr;": '\U00002196', "nwarrow;": '\U00002196', "nwnear;": '\U00002927', "oS;": '\U000024C8', "oacute;": '\U000000F3', "oast;": '\U0000229B', "ocir;": '\U0000229A', "ocirc;": '\U000000F4', "ocy;": '\U0000043E', "odash;": '\U0000229D', "odblac;": '\U00000151', "odiv;": '\U00002A38', "odot;": '\U00002299', "odsold;": '\U000029BC', "oelig;": '\U00000153', "ofcir;": '\U000029BF', "ofr;": '\U0001D52C', "ogon;": '\U000002DB', "ograve;": '\U000000F2', "ogt;": '\U000029C1', "ohbar;": '\U000029B5', "ohm;": '\U000003A9', "oint;": '\U0000222E', "olarr;": '\U000021BA', "olcir;": '\U000029BE', "olcross;": '\U000029BB', "oline;": '\U0000203E', "olt;": '\U000029C0', "omacr;": '\U0000014D', "omega;": '\U000003C9', "omicron;": '\U000003BF', "omid;": '\U000029B6', "ominus;": '\U00002296', "oopf;": '\U0001D560', "opar;": '\U000029B7', "operp;": '\U000029B9', "oplus;": '\U00002295', "or;": '\U00002228', "orarr;": '\U000021BB', "ord;": '\U00002A5D', "order;": '\U00002134', "orderof;": '\U00002134', "ordf;": '\U000000AA', "ordm;": '\U000000BA', "origof;": '\U000022B6', "oror;": '\U00002A56', "orslope;": '\U00002A57', "orv;": '\U00002A5B', "oscr;": '\U00002134', "oslash;": '\U000000F8', "osol;": '\U00002298', "otilde;": '\U000000F5', "otimes;": '\U00002297', "otimesas;": '\U00002A36', "ouml;": '\U000000F6', "ovbar;": '\U0000233D', "par;": '\U00002225', "para;": '\U000000B6', "parallel;": '\U00002225', "parsim;": '\U00002AF3', "parsl;": '\U00002AFD', "part;": '\U00002202', "pcy;": '\U0000043F', "percnt;": '\U00000025', "period;": '\U0000002E', "permil;": '\U00002030', "perp;": '\U000022A5', "pertenk;": '\U00002031', "pfr;": '\U0001D52D', "phi;": '\U000003C6', "phiv;": '\U000003D5', "phmmat;": '\U00002133', "phone;": '\U0000260E', "pi;": '\U000003C0', "pitchfork;": '\U000022D4', "piv;": '\U000003D6', "planck;": '\U0000210F', "planckh;": '\U0000210E', "plankv;": '\U0000210F', "plus;": '\U0000002B', "plusacir;": '\U00002A23', "plusb;": '\U0000229E', "pluscir;": '\U00002A22', "plusdo;": '\U00002214', "plusdu;": '\U00002A25', "pluse;": '\U00002A72', "plusmn;": '\U000000B1', "plussim;": '\U00002A26', "plustwo;": '\U00002A27', "pm;": '\U000000B1', "pointint;": '\U00002A15', "popf;": '\U0001D561', "pound;": '\U000000A3', "pr;": '\U0000227A', "prE;": '\U00002AB3', "prap;": '\U00002AB7', "prcue;": '\U0000227C', "pre;": '\U00002AAF', "prec;": '\U0000227A', "precapprox;": '\U00002AB7', "preccurlyeq;": '\U0000227C', "preceq;": '\U00002AAF', "precnapprox;": '\U00002AB9', "precneqq;": '\U00002AB5', "precnsim;": '\U000022E8', "precsim;": '\U0000227E', "prime;": '\U00002032', "primes;": '\U00002119', "prnE;": '\U00002AB5', "prnap;": '\U00002AB9', "prnsim;": '\U000022E8', "prod;": '\U0000220F', "profalar;": '\U0000232E', "profline;": '\U00002312', "profsurf;": '\U00002313', "prop;": '\U0000221D', "propto;": '\U0000221D', "prsim;": '\U0000227E', "prurel;": '\U000022B0', "pscr;": '\U0001D4C5', "psi;": '\U000003C8', "puncsp;": '\U00002008', "qfr;": '\U0001D52E', "qint;": '\U00002A0C', "qopf;": '\U0001D562', "qprime;": '\U00002057', "qscr;": '\U0001D4C6', "quaternions;": '\U0000210D', "quatint;": '\U00002A16', "quest;": '\U0000003F', "questeq;": '\U0000225F', "quot;": '\U00000022', "rAarr;": '\U000021DB', "rArr;": '\U000021D2', "rAtail;": '\U0000291C', "rBarr;": '\U0000290F', "rHar;": '\U00002964', "racute;": '\U00000155', "radic;": '\U0000221A', "raemptyv;": '\U000029B3', "rang;": '\U000027E9', "rangd;": '\U00002992', "range;": '\U000029A5', "rangle;": '\U000027E9', "raquo;": '\U000000BB', "rarr;": '\U00002192', "rarrap;": '\U00002975', "rarrb;": '\U000021E5', "rarrbfs;": '\U00002920', "rarrc;": '\U00002933', "rarrfs;": '\U0000291E', "rarrhk;": '\U000021AA', "rarrlp;": '\U000021AC', "rarrpl;": '\U00002945', "rarrsim;": '\U00002974', "rarrtl;": '\U000021A3', "rarrw;": '\U0000219D', "ratail;": '\U0000291A', "ratio;": '\U00002236', "rationals;": '\U0000211A', "rbarr;": '\U0000290D', "rbbrk;": '\U00002773', "rbrace;": '\U0000007D', "rbrack;": '\U0000005D', "rbrke;": '\U0000298C', "rbrksld;": '\U0000298E', "rbrkslu;": '\U00002990', "rcaron;": '\U00000159', "rcedil;": '\U00000157', "rceil;": '\U00002309', "rcub;": '\U0000007D', "rcy;": '\U00000440', "rdca;": '\U00002937', "rdldhar;": '\U00002969', "rdquo;": '\U0000201D', "rdquor;": '\U0000201D', "rdsh;": '\U000021B3', "real;": '\U0000211C', "realine;": '\U0000211B', "realpart;": '\U0000211C', "reals;": '\U0000211D', "rect;": '\U000025AD', "reg;": '\U000000AE', "rfisht;": '\U0000297D', "rfloor;": '\U0000230B', "rfr;": '\U0001D52F', "rhard;": '\U000021C1', "rharu;": '\U000021C0', "rharul;": '\U0000296C', "rho;": '\U000003C1', "rhov;": '\U000003F1', "rightarrow;": '\U00002192', "rightarrowtail;": '\U000021A3', "rightharpoondown;": '\U000021C1', "rightharpoonup;": '\U000021C0', "rightleftarrows;": '\U000021C4', "rightleftharpoons;": '\U000021CC', "rightrightarrows;": '\U000021C9', "rightsquigarrow;": '\U0000219D', "rightthreetimes;": '\U000022CC', "ring;": '\U000002DA', "risingdotseq;": '\U00002253', "rlarr;": '\U000021C4', "rlhar;": '\U000021CC', "rlm;": '\U0000200F', "rmoust;": '\U000023B1', "rmoustache;": '\U000023B1', "rnmid;": '\U00002AEE', "roang;": '\U000027ED', "roarr;": '\U000021FE', "robrk;": '\U000027E7', "ropar;": '\U00002986', "ropf;": '\U0001D563', "roplus;": '\U00002A2E', "rotimes;": '\U00002A35', "rpar;": '\U00000029', "rpargt;": '\U00002994', "rppolint;": '\U00002A12', "rrarr;": '\U000021C9', "rsaquo;": '\U0000203A', "rscr;": '\U0001D4C7', "rsh;": '\U000021B1', "rsqb;": '\U0000005D', "rsquo;": '\U00002019', "rsquor;": '\U00002019', "rthree;": '\U000022CC', "rtimes;": '\U000022CA', "rtri;": '\U000025B9', "rtrie;": '\U000022B5', "rtrif;": '\U000025B8', "rtriltri;": '\U000029CE', "ruluhar;": '\U00002968', "rx;": '\U0000211E', "sacute;": '\U0000015B', "sbquo;": '\U0000201A', "sc;": '\U0000227B', "scE;": '\U00002AB4', "scap;": '\U00002AB8', "scaron;": '\U00000161', "sccue;": '\U0000227D', "sce;": '\U00002AB0', "scedil;": '\U0000015F', "scirc;": '\U0000015D', "scnE;": '\U00002AB6', "scnap;": '\U00002ABA', "scnsim;": '\U000022E9', "scpolint;": '\U00002A13', "scsim;": '\U0000227F', "scy;": '\U00000441', "sdot;": '\U000022C5', "sdotb;": '\U000022A1', "sdote;": '\U00002A66', "seArr;": '\U000021D8', "searhk;": '\U00002925', "searr;": '\U00002198', "searrow;": '\U00002198', "sect;": '\U000000A7', "semi;": '\U0000003B', "seswar;": '\U00002929', "setminus;": '\U00002216', "setmn;": '\U00002216', "sext;": '\U00002736', "sfr;": '\U0001D530', "sfrown;": '\U00002322', "sharp;": '\U0000266F', "shchcy;": '\U00000449', "shcy;": '\U00000448', "shortmid;": '\U00002223', "shortparallel;": '\U00002225', "shy;": '\U000000AD', "sigma;": '\U000003C3', "sigmaf;": '\U000003C2', "sigmav;": '\U000003C2', "sim;": '\U0000223C', "simdot;": '\U00002A6A', "sime;": '\U00002243', "simeq;": '\U00002243', "simg;": '\U00002A9E', "simgE;": '\U00002AA0', "siml;": '\U00002A9D', "simlE;": '\U00002A9F', "simne;": '\U00002246', "simplus;": '\U00002A24', "simrarr;": '\U00002972', "slarr;": '\U00002190', "smallsetminus;": '\U00002216', "smashp;": '\U00002A33', "smeparsl;": '\U000029E4', "smid;": '\U00002223', "smile;": '\U00002323', "smt;": '\U00002AAA', "smte;": '\U00002AAC', "softcy;": '\U0000044C', "sol;": '\U0000002F', "solb;": '\U000029C4', "solbar;": '\U0000233F', "sopf;": '\U0001D564', "spades;": '\U00002660', "spadesuit;": '\U00002660', "spar;": '\U00002225', "sqcap;": '\U00002293', "sqcup;": '\U00002294', "sqsub;": '\U0000228F', "sqsube;": '\U00002291', "sqsubset;": '\U0000228F', "sqsubseteq;": '\U00002291', "sqsup;": '\U00002290', "sqsupe;": '\U00002292', "sqsupset;": '\U00002290', "sqsupseteq;": '\U00002292', "squ;": '\U000025A1', "square;": '\U000025A1', "squarf;": '\U000025AA', "squf;": '\U000025AA', "srarr;": '\U00002192', "sscr;": '\U0001D4C8', "ssetmn;": '\U00002216', "ssmile;": '\U00002323', "sstarf;": '\U000022C6', "star;": '\U00002606', "starf;": '\U00002605', "straightepsilon;": '\U000003F5', "straightphi;": '\U000003D5', "strns;": '\U000000AF', "sub;": '\U00002282', "subE;": '\U00002AC5', "subdot;": '\U00002ABD', "sube;": '\U00002286', "subedot;": '\U00002AC3', "submult;": '\U00002AC1', "subnE;": '\U00002ACB', "subne;": '\U0000228A', "subplus;": '\U00002ABF', "subrarr;": '\U00002979', "subset;": '\U00002282', "subseteq;": '\U00002286', "subseteqq;": '\U00002AC5', "subsetneq;": '\U0000228A', "subsetneqq;": '\U00002ACB', "subsim;": '\U00002AC7', "subsub;": '\U00002AD5', "subsup;": '\U00002AD3', "succ;": '\U0000227B', "succapprox;": '\U00002AB8', "succcurlyeq;": '\U0000227D', "succeq;": '\U00002AB0', "succnapprox;": '\U00002ABA', "succneqq;": '\U00002AB6', "succnsim;": '\U000022E9', "succsim;": '\U0000227F', "sum;": '\U00002211', "sung;": '\U0000266A', "sup;": '\U00002283', "sup1;": '\U000000B9', "sup2;": '\U000000B2', "sup3;": '\U000000B3', "supE;": '\U00002AC6', "supdot;": '\U00002ABE', "supdsub;": '\U00002AD8', "supe;": '\U00002287', "supedot;": '\U00002AC4', "suphsol;": '\U000027C9', "suphsub;": '\U00002AD7', "suplarr;": '\U0000297B', "supmult;": '\U00002AC2', "supnE;": '\U00002ACC', "supne;": '\U0000228B', "supplus;": '\U00002AC0', "supset;": '\U00002283', "supseteq;": '\U00002287', "supseteqq;": '\U00002AC6', "supsetneq;": '\U0000228B', "supsetneqq;": '\U00002ACC', "supsim;": '\U00002AC8', "supsub;": '\U00002AD4', "supsup;": '\U00002AD6', "swArr;": '\U000021D9', "swarhk;": '\U00002926', "swarr;": '\U00002199', "swarrow;": '\U00002199', "swnwar;": '\U0000292A', "szlig;": '\U000000DF', "target;": '\U00002316', "tau;": '\U000003C4', "tbrk;": '\U000023B4', "tcaron;": '\U00000165', "tcedil;": '\U00000163', "tcy;": '\U00000442', "tdot;": '\U000020DB', "telrec;": '\U00002315', "tfr;": '\U0001D531', "there4;": '\U00002234', "therefore;": '\U00002234', "theta;": '\U000003B8', "thetasym;": '\U000003D1', "thetav;": '\U000003D1', "thickapprox;": '\U00002248', "thicksim;": '\U0000223C', "thinsp;": '\U00002009', "thkap;": '\U00002248', "thksim;": '\U0000223C', "thorn;": '\U000000FE', "tilde;": '\U000002DC', "times;": '\U000000D7', "timesb;": '\U000022A0', "timesbar;": '\U00002A31', "timesd;": '\U00002A30', "tint;": '\U0000222D', "toea;": '\U00002928', "top;": '\U000022A4', "topbot;": '\U00002336', "topcir;": '\U00002AF1', "topf;": '\U0001D565', "topfork;": '\U00002ADA', "tosa;": '\U00002929', "tprime;": '\U00002034', "trade;": '\U00002122', "triangle;": '\U000025B5', "triangledown;": '\U000025BF', "triangleleft;": '\U000025C3', "trianglelefteq;": '\U000022B4', "triangleq;": '\U0000225C', "triangleright;": '\U000025B9', "trianglerighteq;": '\U000022B5', "tridot;": '\U000025EC', "trie;": '\U0000225C', "triminus;": '\U00002A3A', "triplus;": '\U00002A39', "trisb;": '\U000029CD', "tritime;": '\U00002A3B', "trpezium;": '\U000023E2', "tscr;": '\U0001D4C9', "tscy;": '\U00000446', "tshcy;": '\U0000045B', "tstrok;": '\U00000167', "twixt;": '\U0000226C', "twoheadleftarrow;": '\U0000219E', "twoheadrightarrow;": '\U000021A0', "uArr;": '\U000021D1', "uHar;": '\U00002963', "uacute;": '\U000000FA', "uarr;": '\U00002191', "ubrcy;": '\U0000045E', "ubreve;": '\U0000016D', "ucirc;": '\U000000FB', "ucy;": '\U00000443', "udarr;": '\U000021C5', "udblac;": '\U00000171', "udhar;": '\U0000296E', "ufisht;": '\U0000297E', "ufr;": '\U0001D532', "ugrave;": '\U000000F9', "uharl;": '\U000021BF', "uharr;": '\U000021BE', "uhblk;": '\U00002580', "ulcorn;": '\U0000231C', "ulcorner;": '\U0000231C', "ulcrop;": '\U0000230F', "ultri;": '\U000025F8', "umacr;": '\U0000016B', "uml;": '\U000000A8', "uogon;": '\U00000173', "uopf;": '\U0001D566', "uparrow;": '\U00002191', "updownarrow;": '\U00002195', "upharpoonleft;": '\U000021BF', "upharpoonright;": '\U000021BE', "uplus;": '\U0000228E', "upsi;": '\U000003C5', "upsih;": '\U000003D2', "upsilon;": '\U000003C5', "upuparrows;": '\U000021C8', "urcorn;": '\U0000231D', "urcorner;": '\U0000231D', "urcrop;": '\U0000230E', "uring;": '\U0000016F', "urtri;": '\U000025F9', "uscr;": '\U0001D4CA', "utdot;": '\U000022F0', "utilde;": '\U00000169', "utri;": '\U000025B5', "utrif;": '\U000025B4', "uuarr;": '\U000021C8', "uuml;": '\U000000FC', "uwangle;": '\U000029A7', "vArr;": '\U000021D5', "vBar;": '\U00002AE8', "vBarv;": '\U00002AE9', "vDash;": '\U000022A8', "vangrt;": '\U0000299C', "varepsilon;": '\U000003F5', "varkappa;": '\U000003F0', "varnothing;": '\U00002205', "varphi;": '\U000003D5', "varpi;": '\U000003D6', "varpropto;": '\U0000221D', "varr;": '\U00002195', "varrho;": '\U000003F1', "varsigma;": '\U000003C2', "vartheta;": '\U000003D1', "vartriangleleft;": '\U000022B2', "vartriangleright;": '\U000022B3', "vcy;": '\U00000432', "vdash;": '\U000022A2', "vee;": '\U00002228', "veebar;": '\U000022BB', "veeeq;": '\U0000225A', "vellip;": '\U000022EE', "verbar;": '\U0000007C', "vert;": '\U0000007C', "vfr;": '\U0001D533', "vltri;": '\U000022B2', "vopf;": '\U0001D567', "vprop;": '\U0000221D', "vrtri;": '\U000022B3', "vscr;": '\U0001D4CB', "vzigzag;": '\U0000299A', "wcirc;": '\U00000175', "wedbar;": '\U00002A5F', "wedge;": '\U00002227', "wedgeq;": '\U00002259', "weierp;": '\U00002118', "wfr;": '\U0001D534', "wopf;": '\U0001D568', "wp;": '\U00002118', "wr;": '\U00002240', "wreath;": '\U00002240', "wscr;": '\U0001D4CC', "xcap;": '\U000022C2', "xcirc;": '\U000025EF', "xcup;": '\U000022C3', "xdtri;": '\U000025BD', "xfr;": '\U0001D535', "xhArr;": '\U000027FA', "xharr;": '\U000027F7', "xi;": '\U000003BE', "xlArr;": '\U000027F8', "xlarr;": '\U000027F5', "xmap;": '\U000027FC', "xnis;": '\U000022FB', "xodot;": '\U00002A00', "xopf;": '\U0001D569', "xoplus;": '\U00002A01', "xotime;": '\U00002A02', "xrArr;": '\U000027F9', "xrarr;": '\U000027F6', "xscr;": '\U0001D4CD', "xsqcup;": '\U00002A06', "xuplus;": '\U00002A04', "xutri;": '\U000025B3', "xvee;": '\U000022C1', "xwedge;": '\U000022C0', "yacute;": '\U000000FD', "yacy;": '\U0000044F', "ycirc;": '\U00000177', "ycy;": '\U0000044B', "yen;": '\U000000A5', "yfr;": '\U0001D536', "yicy;": '\U00000457', "yopf;": '\U0001D56A', "yscr;": '\U0001D4CE', "yucy;": '\U0000044E', "yuml;": '\U000000FF', "zacute;": '\U0000017A', "zcaron;": '\U0000017E', "zcy;": '\U00000437', "zdot;": '\U0000017C', "zeetrf;": '\U00002128', "zeta;": '\U000003B6', "zfr;": '\U0001D537', "zhcy;": '\U00000436', "zigrarr;": '\U000021DD', "zopf;": '\U0001D56B', "zscr;": '\U0001D4CF', "zwj;": '\U0000200D', "zwnj;": '\U0000200C', "AElig": '\U000000C6', "AMP": '\U00000026', "Aacute": '\U000000C1', "Acirc": '\U000000C2', "Agrave": '\U000000C0', "Aring": '\U000000C5', "Atilde": '\U000000C3', "Auml": '\U000000C4', "COPY": '\U000000A9', "Ccedil": '\U000000C7', "ETH": '\U000000D0', "Eacute": '\U000000C9', "Ecirc": '\U000000CA', "Egrave": '\U000000C8', "Euml": '\U000000CB', "GT": '\U0000003E', "Iacute": '\U000000CD', "Icirc": '\U000000CE', "Igrave": '\U000000CC', "Iuml": '\U000000CF', "LT": '\U0000003C', "Ntilde": '\U000000D1', "Oacute": '\U000000D3', "Ocirc": '\U000000D4', "Ograve": '\U000000D2', "Oslash": '\U000000D8', "Otilde": '\U000000D5', "Ouml": '\U000000D6', "QUOT": '\U00000022', "REG": '\U000000AE', "THORN": '\U000000DE', "Uacute": '\U000000DA', "Ucirc": '\U000000DB', "Ugrave": '\U000000D9', "Uuml": '\U000000DC', "Yacute": '\U000000DD', "aacute": '\U000000E1', "acirc": '\U000000E2', "acute": '\U000000B4', "aelig": '\U000000E6', "agrave": '\U000000E0', "amp": '\U00000026', "aring": '\U000000E5', "atilde": '\U000000E3', "auml": '\U000000E4', "brvbar": '\U000000A6', "ccedil": '\U000000E7', "cedil": '\U000000B8', "cent": '\U000000A2', "copy": '\U000000A9', "curren": '\U000000A4', "deg": '\U000000B0', "divide": '\U000000F7', "eacute": '\U000000E9', "ecirc": '\U000000EA', "egrave": '\U000000E8', "eth": '\U000000F0', "euml": '\U000000EB', "frac12": '\U000000BD', "frac14": '\U000000BC', "frac34": '\U000000BE', "gt": '\U0000003E', "iacute": '\U000000ED', "icirc": '\U000000EE', "iexcl": '\U000000A1', "igrave": '\U000000EC', "iquest": '\U000000BF', "iuml": '\U000000EF', "laquo": '\U000000AB', "lt": '\U0000003C', "macr": '\U000000AF', "micro": '\U000000B5', "middot": '\U000000B7', "nbsp": '\U000000A0', "not": '\U000000AC', "ntilde": '\U000000F1', "oacute": '\U000000F3', "ocirc": '\U000000F4', "ograve": '\U000000F2', "ordf": '\U000000AA', "ordm": '\U000000BA', "oslash": '\U000000F8', "otilde": '\U000000F5', "ouml": '\U000000F6', "para": '\U000000B6', "plusmn": '\U000000B1', "pound": '\U000000A3', "quot": '\U00000022', "raquo": '\U000000BB', "reg": '\U000000AE', "sect": '\U000000A7', "shy": '\U000000AD', "sup1": '\U000000B9', "sup2": '\U000000B2', "sup3": '\U000000B3', "szlig": '\U000000DF', "thorn": '\U000000FE', "times": '\U000000D7', "uacute": '\U000000FA', "ucirc": '\U000000FB', "ugrave": '\U000000F9', "uml": '\U000000A8', "uuml": '\U000000FC', "yacute": '\U000000FD', "yen": '\U000000A5', "yuml": '\U000000FF', } entity2 = map[string][2]rune{ // TODO(nigeltao): Handle replacements that are wider than their names. // "nLt;": {'\u226A', '\u20D2'}, // "nGt;": {'\u226B', '\u20D2'}, "NotEqualTilde;": {'\u2242', '\u0338'}, "NotGreaterFullEqual;": {'\u2267', '\u0338'}, "NotGreaterGreater;": {'\u226B', '\u0338'}, "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, "NotHumpDownHump;": {'\u224E', '\u0338'}, "NotHumpEqual;": {'\u224F', '\u0338'}, "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, "NotLessLess;": {'\u226A', '\u0338'}, "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, "NotNestedLessLess;": {'\u2AA1', '\u0338'}, "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, "NotRightTriangleBar;": {'\u29D0', '\u0338'}, "NotSquareSubset;": {'\u228F', '\u0338'}, "NotSquareSuperset;": {'\u2290', '\u0338'}, "NotSubset;": {'\u2282', '\u20D2'}, "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, "NotSucceedsTilde;": {'\u227F', '\u0338'}, "NotSuperset;": {'\u2283', '\u20D2'}, "ThickSpace;": {'\u205F', '\u200A'}, "acE;": {'\u223E', '\u0333'}, "bne;": {'\u003D', '\u20E5'}, "bnequiv;": {'\u2261', '\u20E5'}, "caps;": {'\u2229', '\uFE00'}, "cups;": {'\u222A', '\uFE00'}, "fjlig;": {'\u0066', '\u006A'}, "gesl;": {'\u22DB', '\uFE00'}, "gvertneqq;": {'\u2269', '\uFE00'}, "gvnE;": {'\u2269', '\uFE00'}, "lates;": {'\u2AAD', '\uFE00'}, "lesg;": {'\u22DA', '\uFE00'}, "lvertneqq;": {'\u2268', '\uFE00'}, "lvnE;": {'\u2268', '\uFE00'}, "nGg;": {'\u22D9', '\u0338'}, "nGtv;": {'\u226B', '\u0338'}, "nLl;": {'\u22D8', '\u0338'}, "nLtv;": {'\u226A', '\u0338'}, "nang;": {'\u2220', '\u20D2'}, "napE;": {'\u2A70', '\u0338'}, "napid;": {'\u224B', '\u0338'}, "nbump;": {'\u224E', '\u0338'}, "nbumpe;": {'\u224F', '\u0338'}, "ncongdot;": {'\u2A6D', '\u0338'}, "nedot;": {'\u2250', '\u0338'}, "nesim;": {'\u2242', '\u0338'}, "ngE;": {'\u2267', '\u0338'}, "ngeqq;": {'\u2267', '\u0338'}, "ngeqslant;": {'\u2A7E', '\u0338'}, "nges;": {'\u2A7E', '\u0338'}, "nlE;": {'\u2266', '\u0338'}, "nleqq;": {'\u2266', '\u0338'}, "nleqslant;": {'\u2A7D', '\u0338'}, "nles;": {'\u2A7D', '\u0338'}, "notinE;": {'\u22F9', '\u0338'}, "notindot;": {'\u22F5', '\u0338'}, "nparsl;": {'\u2AFD', '\u20E5'}, "npart;": {'\u2202', '\u0338'}, "npre;": {'\u2AAF', '\u0338'}, "npreceq;": {'\u2AAF', '\u0338'}, "nrarrc;": {'\u2933', '\u0338'}, "nrarrw;": {'\u219D', '\u0338'}, "nsce;": {'\u2AB0', '\u0338'}, "nsubE;": {'\u2AC5', '\u0338'}, "nsubset;": {'\u2282', '\u20D2'}, "nsubseteqq;": {'\u2AC5', '\u0338'}, "nsucceq;": {'\u2AB0', '\u0338'}, "nsupE;": {'\u2AC6', '\u0338'}, "nsupset;": {'\u2283', '\u20D2'}, "nsupseteqq;": {'\u2AC6', '\u0338'}, "nvap;": {'\u224D', '\u20D2'}, "nvge;": {'\u2265', '\u20D2'}, "nvgt;": {'\u003E', '\u20D2'}, "nvle;": {'\u2264', '\u20D2'}, "nvlt;": {'\u003C', '\u20D2'}, "nvltrie;": {'\u22B4', '\u20D2'}, "nvrtrie;": {'\u22B5', '\u20D2'}, "nvsim;": {'\u223C', '\u20D2'}, "race;": {'\u223D', '\u0331'}, "smtes;": {'\u2AAC', '\uFE00'}, "sqcaps;": {'\u2293', '\uFE00'}, "sqcups;": {'\u2294', '\uFE00'}, "varsubsetneq;": {'\u228A', '\uFE00'}, "varsubsetneqq;": {'\u2ACB', '\uFE00'}, "varsupsetneq;": {'\u228B', '\uFE00'}, "varsupsetneqq;": {'\u2ACC', '\uFE00'}, "vnsub;": {'\u2282', '\u20D2'}, "vnsup;": {'\u2283', '\u20D2'}, "vsubnE;": {'\u2ACB', '\uFE00'}, "vsubne;": {'\u228A', '\uFE00'}, "vsupnE;": {'\u2ACC', '\uFE00'}, "vsupne;": {'\u228B', '\uFE00'}, } return entity, entity2 })
go
github
https://github.com/golang/go
src/html/entity.go
# frozen_string_literal: true require "cases/helper" module ActiveRecord module Type class UnsignedIntegerTest < ActiveRecord::TestCase test "unsigned int max value is in range" do assert_equal(4294967295, UnsignedInteger.new.serialize(4294967295)) end test "minus value is out of range" do assert_raises(ActiveModel::RangeError) do UnsignedInteger.new.serialize(-1) end end test "serialize_cast_value enforces range" do type = UnsignedInteger.new assert_raises(ActiveModel::RangeError) do type.serialize_cast_value(-1) end assert_raises(ActiveModel::RangeError) do type.serialize_cast_value(4294967296) end end end end end
ruby
github
https://github.com/rails/rails
activerecord/test/cases/type/unsigned_integer_test.rb
# -*- coding: utf-8 -*- """ ======================== Title of Example ======================== This example <verb> <active tense> <does something>. The example uses <packages> to <do something> and <other package> to <do other thing>. Include links to referenced packages like this: `astropy.io.fits` to show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits' *By: <names>* *License: BSD* """ ############################################################################## # Make print work the same in all versions of Python, set up numpy, # matplotlib, and use a nicer set of plot parameters: import numpy as np import matplotlib.pyplot as plt from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) # uncomment if including figures: # import matplotlib.pyplot as plt # from astropy.visualization import astropy_mpl_style # plt.style.use(astropy_mpl_style) ############################################################################## # This code block is executed, although it produces no output. Lines starting # with a simple hash are code comment and get treated as part of the code # block. To include this new comment string we started the new block with a # long line of hashes. # # The sphinx-gallery parser will assume everything after this splitter and that # continues to start with a **comment hash and space** (respecting code style) # is text that has to be rendered in # html format. Keep in mind to always keep your comments always together by # comment hashes. That means to break a paragraph you still need to commend # that line break. # # In this example the next block of code produces some plotable data. Code is # executed, figure is saved and then code is presented next, followed by the # inlined figure. x = np.linspace(-np.pi, np.pi, 300) xx, yy = np.meshgrid(x, x) z = np.cos(xx) + np.cos(yy) plt.figure() plt.imshow(z) plt.colorbar() plt.xlabel('$x$') plt.ylabel('$y$') ########################################################################### # Again it is possible to continue the discussion with a new Python string. This # time to introduce the next code block generates 2 separate figures. plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('hot')) plt.figure() plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none') ########################################################################## # There's some subtle differences between rendered html rendered comment # strings and code comment strings which I'll demonstrate below. (Some of this # only makes sense if you look at the # :download:`raw Python script <plot_notebook.py>`) # # Comments in comment blocks remain nested in the text. def dummy(): """Dummy function to make sure docstrings don't get rendered as text""" pass # Code comments not preceded by the hash splitter are left in code blocks. string = """ Triple-quoted string which tries to break parser but doesn't. """ ############################################################################ # Output of the script is captured: print('Some output from Python') ############################################################################ # Finally, I'll call ``show`` at the end just so someone running the Python # code directly will see the plots; this is not necessary for creating the docs plt.show()
unknown
codeparrot/codeparrot-clean
- hosts: testhost # the following commands should all parse fine and execute fine # and represent quoting scenarios that should be legit gather_facts: False roles: - { role: test_good_parsing, tags: test_good_parsing }
unknown
github
https://github.com/ansible/ansible
test/integration/targets/parsing/good_parsing.yml
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Uses the closure compiler to check syntax and semantics of a js module with dependencies.''' import os import re import subprocess import sys _SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) _CHROME_SOURCE_DIR = os.path.normpath( os.path.join( _SCRIPT_DIR, *[os.path.pardir] * 6)) # Compiler path. _CLOSURE_COMPILER_JAR = os.path.join( _CHROME_SOURCE_DIR, 'third_party', 'closure_compiler', 'compiler', 'compiler.jar') # List of compilation errors to enable with the --jscomp_errors flag. _JSCOMP_ERRORS = [ 'accessControls', 'checkTypes', 'checkVars', 'invalidCasts', 'missingProperties', 'undefinedNames', 'undefinedVars', 'visibility' ] _java_executable = 'java' def _Error(msg): print >>sys.stderr, msg sys.exit(1) def _ExecuteCommand(args, ignore_exit_status=False): try: return subprocess.check_output(args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if ignore_exit_status and e.returncode > 0: return e.output _Error('%s\nCommand \'%s\' returned non-zero exit status %d' % (e.output, ' '.join(e.cmd), e.returncode)) except (OSError, IOError) as e: _Error('Error executing %s: %s' % (_java_executable, str(e))) def _CheckJava(): global _java_executable java_home = os.environ.get('JAVAHOME') if java_home is not None: _java_executable = os.path.join(java_home, 'bin', 'java') output = _ExecuteCommand([_java_executable, '-version']) match = re.search(r'version "(?:\d+)\.(\d+)', output) if match is None or int(match.group(1)) < 7: _Error('Java 7 or later is required: \n%s' % output) _CheckJava() def RunCompiler(js_files, externs=[]): args = [_java_executable, '-jar', _CLOSURE_COMPILER_JAR] args.extend(['--compilation_level', 'SIMPLE_OPTIMIZATIONS']) args.extend(['--jscomp_error=%s' % error for error in _JSCOMP_ERRORS]) args.extend(['--language_in', 'ECMASCRIPT5']) args.extend(['--externs=%s' % extern for extern in externs]) args.extend(['--js=%s' % js for js in js_files]) args.extend(['--js_output_file', '/dev/null']) output = _ExecuteCommand(args, ignore_exit_status=True) success = len(output) == 0 return success, output
unknown
codeparrot/codeparrot-clean
# vim: sw=4:expandtab:foldmethod=marker # # Copyright (c) 2007-2009, Mathieu Fenniak # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. __author__ = "Mathieu Fenniak" import datetime import decimal import struct import math from errors import (NotSupportedError, ArrayDataParseError, InternalError, ArrayContentEmptyError, ArrayContentNotHomogenousError, ArrayContentNotSupportedError, ArrayDimensionsNotConsistentError) try: from pytz import utc except ImportError: ZERO = datetime.timedelta(0) class UTC(datetime.tzinfo): def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO utc = UTC() class Bytea(str): pass class Interval(object): def __init__(self, microseconds=0, days=0, months=0): self.microseconds = microseconds self.days = days self.months = months def _setMicroseconds(self, value): if not isinstance(value, int) and not isinstance(value, long): raise TypeError("microseconds must be an int or long") elif not (min_int8 < value < max_int8): raise OverflowError("microseconds must be representable as a 64-bit integer") else: self._microseconds = value def _setDays(self, value): if not isinstance(value, int) and not isinstance(value, long): raise TypeError("days must be an int or long") elif not (min_int4 < value < max_int4): raise OverflowError("days must be representable as a 32-bit integer") else: self._days = value def _setMonths(self, value): if not isinstance(value, int) and not isinstance(value, long): raise TypeError("months must be an int or long") elif not (min_int4 < value < max_int4): raise OverflowError("months must be representable as a 32-bit integer") else: self._months = value microseconds = property(lambda self: self._microseconds, _setMicroseconds) days = property(lambda self: self._days, _setDays) months = property(lambda self: self._months, _setMonths) def __repr__(self): return "<Interval %s months %s days %s microseconds>" % (self.months, self.days, self.microseconds) def __cmp__(self, other): if other == None: return -1 c = cmp(self.months, other.months) if c != 0: return c c = cmp(self.days, other.days) if c != 0: return c return cmp(self.microseconds, other.microseconds) def pg_type_info(typ): value = None if isinstance(typ, dict): value = typ["value"] typ = typ["type"] data = py_types.get(typ) if data == None: raise NotSupportedError("type %r not mapped to pg type" % typ) # permit the type data to be determined by the value, if provided inspect_func = data.get("inspect") if value != None and inspect_func != None: data = inspect_func(value) type_oid = data.get("typeoid") if type_oid == None: raise InternalError("type %r has no type_oid" % typ) elif type_oid == -1: # special case: NULL values return type_oid, 0 # prefer bin, but go with whatever exists if data.get("bin_out"): format = 1 elif data.get("txt_out"): format = 0 else: raise InternalError("no conversion fuction for type %r" % typ) return type_oid, format def pg_value(value, fc, **kwargs): typ = type(value) data = py_types.get(typ) if data == None: raise NotSupportedError("type %r not mapped to pg type" % typ) # permit the type conversion to be determined by the value, if provided inspect_func = data.get("inspect") if value != None and inspect_func != None: data = inspect_func(value) # special case: NULL values if data.get("typeoid") == -1: return None if fc == 0: func = data.get("txt_out") elif fc == 1: func = data.get("bin_out") else: raise InternalError("unrecognized format code %r" % fc) if func == None: raise NotSupportedError("type %r, format code %r not supported" % (typ, fc)) return func(value, **kwargs) def py_type_info(description): type_oid = description['type_oid'] data = pg_types.get(type_oid) if data == None: raise NotSupportedError("type oid %r not mapped to py type" % type_oid) # prefer bin, but go with whatever exists if data.get("bin_in"): format = 1 elif data.get("txt_in"): format = 0 else: raise InternalError("no conversion fuction for type oid %r" % type_oid) return format def py_value(v, description, **kwargs): if v == None: # special case - NULL value return None type_oid = description['type_oid'] format = description['format'] data = pg_types.get(type_oid) if data == None: raise NotSupportedError("type oid %r not supported" % type_oid) if format == 0: func = data.get("txt_in") elif format == 1: func = data.get("bin_in") else: raise NotSupportedError("format code %r not supported" % format) if func == None: raise NotSupportedError("data response format %r, type %r not supported" % (format, type_oid)) return func(v, **kwargs) def boolrecv(data, **kwargs): return data == "\x01" def boolsend(v, **kwargs): if v: return "\x01" else: return "\x00" min_int2, max_int2 = -2 ** 15, 2 ** 15 min_int4, max_int4 = -2 ** 31, 2 ** 31 min_int8, max_int8 = -2 ** 63, 2 ** 63 def int_inspect(value): if min_int2 < value < max_int2: return {"typeoid": 21, "bin_out": int2send} elif min_int4 < value < max_int4: return {"typeoid": 23, "bin_out": int4send} elif min_int8 < value < max_int8: return {"typeoid": 20, "bin_out": int8send} else: return {"typeoid": 1700, "bin_out": numeric_send} def int2recv(data, **kwargs): return struct.unpack("!h", data)[0] def int2send(v, **kwargs): return struct.pack("!h", v) def int4recv(data, **kwargs): return struct.unpack("!i", data)[0] def int4send(v, **kwargs): return struct.pack("!i", v) def int8recv(data, **kwargs): return struct.unpack("!q", data)[0] def int8send(v, **kwargs): return struct.pack("!q", v) def float4recv(data, **kwargs): return struct.unpack("!f", data)[0] def float8recv(data, **kwargs): return struct.unpack("!d", data)[0] def float8send(v, **kwargs): return struct.pack("!d", v) def datetime_inspect(value): if value.tzinfo != None: # send as timestamptz if timezone is provided return {"typeoid": 1184, "bin_out": timestamptz_send} else: # otherwise send as timestamp return {"typeoid": 1114, "bin_out": timestamp_send} def timestamp_recv(data, integer_datetimes, **kwargs): if integer_datetimes: # data is 64-bit integer representing milliseconds since 2000-01-01 val = struct.unpack("!q", data)[0] return datetime.datetime(2000, 1, 1) + datetime.timedelta(microseconds = val) else: # data is double-precision float representing seconds since 2000-01-01 val = struct.unpack("!d", data)[0] return datetime.datetime(2000, 1, 1) + datetime.timedelta(seconds = val) # return a timezone-aware datetime instance if we're reading from a # "timestamp with timezone" type. The timezone returned will always be UTC, # but providing that additional information can permit conversion to local. def timestamptz_recv(data, **kwargs): return timestamp_recv(data, **kwargs).replace(tzinfo=utc) def timestamp_send(v, integer_datetimes, **kwargs): delta = v - datetime.datetime(2000, 1, 1) val = delta.microseconds + (delta.seconds * 1000000) + (delta.days * 86400000000) if integer_datetimes: # data is 64-bit integer representing milliseconds since 2000-01-01 return struct.pack("!q", val) else: # data is double-precision float representing seconds since 2000-01-01 return struct.pack("!d", val / 1000.0 / 1000.0) def timestamptz_send(v, **kwargs): # timestamps should be sent as UTC. If they have zone info, # convert them. return timestamp_send(v.astimezone(utc).replace(tzinfo=None), **kwargs) def date_in(data, **kwargs): year = int(data[0:4]) month = int(data[5:7]) day = int(data[8:10]) return datetime.date(year, month, day) def date_out(v, **kwargs): return v.isoformat() def time_in(data, **kwargs): hour = int(data[0:2]) minute = int(data[3:5]) sec = decimal.Decimal(data[6:]) return datetime.time(hour, minute, int(sec), int((sec - int(sec)) * 1000000)) def time_out(v, **kwargs): return v.isoformat() def numeric_in(data, **kwargs): if data.find(".") == -1: return int(data) else: return decimal.Decimal(data) def numeric_recv(data, **kwargs): num_digits, weight, sign, scale = struct.unpack("!hhhh", data[:8]) data = data[8:] digits = struct.unpack("!" + ("h" * num_digits), data) weight = decimal.Decimal(weight) retval = 0 for d in digits: d = decimal.Decimal(d) retval += d * (10000 ** weight) weight -= 1 if sign: retval *= -1 return retval DEC_DIGITS = 4 def numeric_send(d, **kwargs): # This is a very straight port of src/backend/utils/adt/numeric.c set_var_from_str() s = str(d) pos = 0 sign = 0 if s[0] == '-': sign = 0x4000 # NEG pos=1 elif s[0] == '+': sign = 0 # POS pos=1 have_dp = False decdigits = [0, 0, 0, 0] dweight = -1 dscale = 0 for char in s[pos:]: if char.isdigit(): decdigits.append(int(char)) if not have_dp: dweight += 1 else: dscale += 1 pos+=1 elif char == '.': have_dp = True pos+=1 else: break if len(s) > pos: char = s[pos] if char == 'e' or char == 'E': pos+=1 exponent = int(s[pos:]) dweight += exponent dscale -= exponent if dscale < 0: dscale = 0 if dweight >= 0: weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1 else: weight = -((-dweight - 1) / DEC_DIGITS + 1) offset = (weight + 1) * DEC_DIGITS - (dweight + 1) ndigits = (len(decdigits)-DEC_DIGITS + offset + DEC_DIGITS - 1) / DEC_DIGITS i = DEC_DIGITS - offset decdigits.extend([0, 0, 0]) ndigits_ = ndigits digits = '' while ndigits_ > 0: # ifdef DEC_DIGITS == 4 digits += struct.pack("!h", ((decdigits[i] * 10 + decdigits[i + 1]) * 10 + decdigits[i + 2]) * 10 + decdigits[i + 3]) ndigits_ -= 1 i += DEC_DIGITS # strip_var() if ndigits == 0: sign = 0x4000 # pos weight = 0 # ---------- retval = struct.pack("!hhhh", ndigits, weight, sign, dscale) + digits return retval def numeric_out(v, **kwargs): return str(v) # PostgreSQL encodings: # http://www.postgresql.org/docs/8.3/interactive/multibyte.html # Python encodings: # http://www.python.org/doc/2.4/lib/standard-encodings.html # # Commented out encodings don't require a name change between PostgreSQL and # Python. If the py side is None, then the encoding isn't supported. pg_to_py_encodings = { # Not supported: "mule_internal": None, "euc_tw": None, # Name fine as-is: #"euc_jp", #"euc_jis_2004", #"euc_kr", #"gb18030", #"gbk", #"johab", #"sjis", #"shift_jis_2004", #"uhc", #"utf8", # Different name: "euc_cn": "gb2312", "iso_8859_5": "is8859_5", "iso_8859_6": "is8859_6", "iso_8859_7": "is8859_7", "iso_8859_8": "is8859_8", "koi8": "koi8_r", "latin1": "iso8859-1", "latin2": "iso8859_2", "latin3": "iso8859_3", "latin4": "iso8859_4", "latin5": "iso8859_9", "latin6": "iso8859_10", "latin7": "iso8859_13", "latin8": "iso8859_14", "latin9": "iso8859_15", "sql_ascii": "ascii", "win866": "cp886", "win874": "cp874", "win1250": "cp1250", "win1251": "cp1251", "win1252": "cp1252", "win1253": "cp1253", "win1254": "cp1254", "win1255": "cp1255", "win1256": "cp1256", "win1257": "cp1257", "win1258": "cp1258", } def encoding_convert(encoding): return pg_to_py_encodings.get(encoding.lower(), encoding) def varcharin(data, client_encoding, **kwargs): return unicode(data, encoding_convert(client_encoding)) def textout(v, client_encoding, **kwargs): if isinstance(v, unicode): return v.encode(encoding_convert(client_encoding)) else: return v def byteasend(v, **kwargs): return str(v) def bytearecv(data, **kwargs): return Bytea(data) # interval support does not provide a Python-usable interval object yet def interval_recv(data, integer_datetimes, **kwargs): if integer_datetimes: microseconds, days, months = struct.unpack("!qii", data) else: seconds, days, months = struct.unpack("!dii", data) microseconds = int(seconds * 1000 * 1000) return Interval(microseconds, days, months) def interval_send(data, integer_datetimes, **kwargs): if integer_datetimes: return struct.pack("!qii", data.microseconds, data.days, data.months) else: return struct.pack("!dii", data.microseconds / 1000.0 / 1000.0, data.days, data.months) def array_recv(data, **kwargs): dim, hasnull, typeoid = struct.unpack("!iii", data[:12]) data = data[12:] # get type conversion method for typeoid conversion = pg_types[typeoid]["bin_in"] # Read dimension info dim_lengths = [] element_count = 1 for idim in range(dim): dim_len, dim_lbound = struct.unpack("!ii", data[:8]) data = data[8:] dim_lengths.append(dim_len) element_count *= dim_len # Read all array values array_values = [] for i in range(element_count): if len(data): element_len, = struct.unpack("!i", data[:4]) data = data[4:] if element_len == -1: array_values.append(None) else: array_values.append(conversion(data[:element_len], **kwargs)) data = data[element_len:] if data != "": raise ArrayDataParseError("unexpected data left over after array read") # at this point, {{1,2,3},{4,5,6}}::int[][] looks like [1,2,3,4,5,6]. # go through the dimensions and fix up the array contents to match # expected dimensions for dim_length in reversed(dim_lengths[1:]): val = [] while array_values: val.append(array_values[:dim_length]) array_values = array_values[dim_length:] array_values = val return array_values def array_inspect(value): # Check if array has any values. If not, we can't determine the proper # array typeoid. first_element = array_find_first_element(value) if first_element == None: raise ArrayContentEmptyError("array has no values") # supported array output typ = type(first_element) if issubclass(typ, int) or issubclass(typ, long): # special int array support -- send as smallest possible array type special_int_support = True int2_ok, int4_ok, int8_ok = True, True, True for v in array_flatten(value): if v == None: continue if min_int2 < v < max_int2: continue int2_ok = False if min_int4 < v < max_int4: continue int4_ok = False if min_int8 < v < max_int8: continue int8_ok = False if int2_ok: array_typeoid = 1005 # INT2[] elif int4_ok: array_typeoid = 1007 # INT4[] elif int8_ok: array_typeoid = 1016 # INT8[] else: raise ArrayContentNotSupportedError("numeric not supported as array contents") else: special_int_support = False array_typeoid = py_array_types.get(typ) if array_typeoid == None: raise ArrayContentNotSupportedError("type %r not supported as array contents" % typ) # check for homogenous array for v in array_flatten(value): if v != None and not (isinstance(v, typ) or (typ == long and isinstance(v, int)) or (typ == int and isinstance(v, long))): raise ArrayContentNotHomogenousError("not all array elements are of type %r" % typ) # check that all array dimensions are consistent array_check_dimensions(value) type_data = py_types[typ] if special_int_support: if array_typeoid == 1005: type_data = {"typeoid": 21, "bin_out": int2send} elif array_typeoid == 1007: type_data = {"typeoid": 23, "bin_out": int4send} elif array_typeoid == 1016: type_data = {"typeoid": 20, "bin_out": int8send} else: type_data = py_types[typ] return { "typeoid": array_typeoid, "bin_out": array_send(type_data["typeoid"], type_data["bin_out"]) } def array_find_first_element(arr): for v in array_flatten(arr): if v != None: return v return None def array_flatten(arr): for v in arr: if isinstance(v, list): for v2 in array_flatten(v): yield v2 else: yield v def array_check_dimensions(arr): v0 = arr[0] if isinstance(v0, list): req_len = len(v0) req_inner_lengths = array_check_dimensions(v0) for v in arr: inner_lengths = array_check_dimensions(v) if len(v) != req_len or inner_lengths != req_inner_lengths: raise ArrayDimensionsNotConsistentError("array dimensions not consistent") retval = [req_len] retval.extend(req_inner_lengths) return retval else: # make sure nothing else at this level is a list for v in arr: if isinstance(v, list): raise ArrayDimensionsNotConsistentError("array dimensions not consistent") return [] def array_has_null(arr): for v in array_flatten(arr): if v == None: return True return False def array_dim_lengths(arr): v0 = arr[0] if isinstance(v0, list): retval = [len(v0)] retval.extend(array_dim_lengths(v0)) else: return [len(arr)] return retval class array_send(object): def __init__(self, typeoid, bin_out_func): self.typeoid = typeoid self.bin_out_func = bin_out_func def __call__(self, arr, **kwargs): has_null = array_has_null(arr) dim_lengths = array_dim_lengths(arr) data = struct.pack("!iii", len(dim_lengths), has_null, self.typeoid) for i in dim_lengths: data += struct.pack("!ii", i, 1) for v in array_flatten(arr): if v == None: data += struct.pack("!i", -1) else: inner_data = self.bin_out_func(v, **kwargs) data += struct.pack("!i", len(inner_data)) data += inner_data return data py_types = { bool: {"typeoid": 16, "bin_out": boolsend}, int: {"inspect": int_inspect}, long: {"inspect": int_inspect}, str: {"typeoid": 25, "bin_out": textout}, unicode: {"typeoid": 25, "bin_out": textout}, float: {"typeoid": 701, "bin_out": float8send}, decimal.Decimal: {"typeoid": 1700, "bin_out": numeric_send}, Bytea: {"typeoid": 17, "bin_out": byteasend}, datetime.datetime: {"typeoid": 1114, "bin_out": timestamp_send, "inspect": datetime_inspect}, datetime.date: {"typeoid": 1082, "txt_out": date_out}, datetime.time: {"typeoid": 1083, "txt_out": time_out}, Interval: {"typeoid": 1186, "bin_out": interval_send}, type(None): {"typeoid": -1}, list: {"inspect": array_inspect}, } # py type -> pg array typeoid py_array_types = { float: 1022, bool: 1000, str: 1009, # TEXT[] unicode: 1009, # TEXT[] decimal.Decimal: 1231, # NUMERIC[] } pg_types = { 16: {"bin_in": boolrecv}, 17: {"bin_in": bytearecv}, 19: {"bin_in": varcharin}, # name type 20: {"bin_in": int8recv}, 21: {"bin_in": int2recv}, 23: {"bin_in": int4recv, "txt_in": numeric_in}, 25: {"bin_in": varcharin, "txt_in": varcharin}, # TEXT type 26: {"txt_in": numeric_in}, # oid type 142: {"bin_in": varcharin, "txt_in": varcharin}, # XML 194: {"bin_in": varcharin}, # "string representing an internal node tree" 700: {"bin_in": float4recv}, 701: {"bin_in": float8recv}, 705: {"txt_in": varcharin}, # UNKNOWN 829: {"txt_in": varcharin}, # MACADDR type 1000: {"bin_in": array_recv}, # BOOL[] 1003: {"bin_in": array_recv}, # NAME[] 1005: {"bin_in": array_recv}, # INT2[] 1007: {"bin_in": array_recv, "txt_in": varcharin}, # INT4[] 1009: {"bin_in": array_recv}, # TEXT[] 1014: {"bin_in": array_recv}, # CHAR[] 1015: {"bin_in": array_recv}, # VARCHAR[] 1016: {"bin_in": array_recv}, # INT8[] 1021: {"bin_in": array_recv}, # FLOAT4[] 1022: {"bin_in": array_recv}, # FLOAT8[] 1042: {"bin_in": varcharin}, # CHAR type 1043: {"bin_in": varcharin}, # VARCHAR type 1082: {"txt_in": date_in}, 1083: {"txt_in": time_in}, 1114: {"bin_in": timestamp_recv}, 1184: {"bin_in": timestamptz_recv}, # timestamp w/ tz 1186: {"bin_in": interval_recv}, 1231: {"bin_in": array_recv}, # NUMERIC[] 1263: {"bin_in": array_recv}, # cstring[] 1700: {"bin_in": numeric_recv}, 2275: {"bin_in": varcharin}, # cstring }
unknown
codeparrot/codeparrot-clean
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for BridgeTower. """ from ...processing_utils import ProcessingKwargs, ProcessorMixin from ...utils import auto_docstring class BridgeTowerProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_length": False, "verbose": True, }, "images_kwargs": { "do_normalize": True, "do_center_crop": True, }, } @auto_docstring class BridgeTowerProcessor(ProcessorMixin): valid_processor_kwargs = BridgeTowerProcessorKwargs def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) __all__ = ["BridgeTowerProcessor"]
python
github
https://github.com/huggingface/transformers
src/transformers/models/bridgetower/processing_bridgetower.py
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by the OPF Experiment Generator to generate the actual description.py file by replacing $XXXXXXXX tokens with desired values. This description.py file was generated by: '/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' """ from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI from nupic.frameworks.opf.expdescriptionhelpers import ( updateConfigFromSubConfig, applyValueGettersToContainer, DeferredDictLookup) from nupic.frameworks.opf.clamodelcallbacks import * from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.opfutils import (InferenceType, InferenceElement) from nupic.support import aggregationDivide from nupic.frameworks.opf.opftaskdriver import ( IterationPhaseSpecLearnOnly, IterationPhaseSpecInferOnly, IterationPhaseSpecLearnAndInfer) # Model Configuration Dictionary: # # Define the model parameters and adjust for any modifications if imported # from a sub-experiment. # # These fields might be modified by a sub-experiment; this dict is passed # between the sub-experiment and base experiment # # # NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements # within the config dictionary may be assigned futures derived from the # ValueGetterBase class, such as DeferredDictLookup. # This facility is particularly handy for enabling substitution of values in # the config dictionary from other values in the config dictionary, which is # needed by permutation.py-based experiments. These values will be resolved # during the call to applyValueGettersToContainer(), # which we call after the base experiment's config dictionary is updated from # the sub-experiment. See ValueGetterBase and # DeferredDictLookup for more details about value-getters. # # For each custom encoder parameter to be exposed to the sub-experiment/ # permutation overrides, define a variable in this section, using key names # beginning with a single underscore character to avoid collisions with # pre-defined keys (e.g., _dsEncoderFieldName2_N). # # Example: # config = dict( # _dsEncoderFieldName2_N = 70, # _dsEncoderFieldName2_W = 5, # dsEncoderSchema = [ # base=dict( # fieldname='Name2', type='ScalarEncoder', # name='Name2', minval=0, maxval=270, clipInput=True, # n=DeferredDictLookup('_dsEncoderFieldName2_N'), # w=DeferredDictLookup('_dsEncoderFieldName2_W')), # ], # ) # updateConfigFromSubConfig(config) # applyValueGettersToContainer(config) config = { # Type of model that the rest of these parameters apply to. 'model': "CLA", # Version that specifies the format of the config. 'version': 1, # Intermediate variables used to compute fields in modelParams and also # referenced from the control section. 'aggregationInfo': { 'days': 0, 'fields': [], 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}, 'predictAheadTime': None, # Model parameter dictionary. 'modelParams': { # The type of inference that this model will perform 'inferenceType': 'TemporalNextStep', 'sensorParams': { # Sensor diagnostic output verbosity control; # if > 0: sensor region will print out on screen what it's sensing # at each step 0: silent; >=1: some info; >=2: more info; # >=3: even more info (see compute() in py/regions/RecordSensor.py) 'verbosity' : 0, # Example: # dsEncoderSchema = [ # DeferredDictLookup('__field_name_encoder'), # ], # # (value generated from DS_ENCODER_SCHEMA) 'encoders': { u'attendance': { 'clipInput': True, 'fieldname': u'attendance', 'maxval': 36067, 'minval': 0, 'n': 150, 'name': u'attendance', 'type': 'AdaptiveScalarEncoder', 'w': 21}, u'daynight': { 'fieldname': u'daynight', 'n': 300, 'name': u'daynight', 'type': 'SDRCategoryEncoder', 'w': 21}, u'home_winloss': { 'clipInput': True, 'fieldname': u'home_winloss', 'maxval': 0.69999999999999996, 'minval': 0.0, 'n': 150, 'name': u'home_winloss', 'type': 'AdaptiveScalarEncoder', 'w': 21}, u'precip': { 'fieldname': u'precip', 'n': 300, 'name': u'precip', 'type': 'SDRCategoryEncoder', 'w': 21}, u'timestamp_dayOfWeek': { 'dayOfWeek': (7, 1), 'fieldname': u'timestamp', 'name': u'timestamp_dayOfWeek', 'type': 'DateEncoder'}, u'timestamp_timeOfDay': { 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay', 'timeOfDay': (7, 1), 'type': 'DateEncoder'}, u'visitor_winloss': { 'clipInput': True, 'fieldname': u'visitor_winloss', 'maxval': 0.78600000000000003, 'minval': 0.0, 'n': 150, 'name': u'visitor_winloss', 'type': 'AdaptiveScalarEncoder', 'w': 21}}, # A dictionary specifying the period for automatically-generated # resets from a RecordSensor; # # None = disable automatically-generated resets (also disabled if # all of the specified values evaluate to 0). # Valid keys is the desired combination of the following: # days, hours, minutes, seconds, milliseconds, microseconds, weeks # # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), # # (value generated from SENSOR_AUTO_RESET) 'sensorAutoReset' : None, }, 'spEnable': True, 'spParams': { # SP diagnostic output verbosity control; # 0: silent; >=1: some info; >=2: more info; 'spVerbosity' : 0, 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, 'inputWidth': 0, # SP inhibition control (absolute value); # Maximum number of active columns in the SP region's output (when # there are more, the weaker ones are suppressed) 'numActiveColumnsPerInhArea': 40, 'seed': 1956, # potentialPct # What percent of the columns's receptive field is available # for potential synapses. At initialization time, we will # choose potentialPct * (2*potentialRadius+1)^2 'potentialPct': 1.0, # The default connected threshold. Any synapse whose # permanence value is above the connected threshold is # a "connected synapse", meaning it can contribute to the # cell's firing. Typical value is 0.10. Cells whose activity # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. # (This concept applies to both SP and TP and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, 'synPermActiveInc': 0.1, 'synPermInactiveDec': 0.01, }, # Controls whether TP is enabled or disabled; # TP is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tpEnable' : True, 'tpParams': { # TP diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, # The number of cells (i.e., states), allocated per column. 'cellsPerColumn': 32, 'inputWidth': 2048, 'seed': 1960, # Temporal Pooler implementation selector (see _getTPClass in # CLARegion.py). 'temporalImp': 'cpp', # New Synapse formation count # NOTE: If None, use spNumActivePerInhArea # # TODO: need better explanation 'newSynapseCount': 15, # Maximum number of synapses per segment # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, # Maximum number of segments per cell # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, # Initial Permanence # TODO: need better explanation 'initialPerm': 0.21, # Permanence Increment 'permanenceInc': 0.1, # Permanence Decrement # If set to None, will automatically default to tpPermanenceInc # value. 'permanenceDec' : 0.1, 'globalDecay': 0.0, 'maxAge': 0, # Minimum number of active synapses for a segment to be considered # during search for the best-matching segments. # None=use default # Replaces: tpMinThreshold 'minThreshold': 12, # Segment activation threshold. # A segment is active if it has >= tpSegmentActivationThreshold # connected synapses that are active due to infActiveState # None=use default # Replaces: tpActivationThreshold 'activationThreshold': 16, 'outputType': 'normal', # "Pay Attention Mode" length. This tells the TP how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. 'pamLength': 1, }, 'clParams': { 'regionName' : 'CLAClassifierRegion', # Classifier diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'clVerbosity' : 0, # This controls how fast the classifier learns/forgets. Higher values # make it adapt faster and forget older patterns faster. 'alpha': 0.001, # This is set after the call to updateConfigFromSubConfig and is # computed from the aggregationInfo and predictAheadTime. 'steps': '1', }, 'trainSPNetOnlyIfRequested': False, }, } # end of config dictionary # Adjust base config dictionary for any modifications if imported from a # sub-experiment updateConfigFromSubConfig(config) # Compute predictionSteps based on the predictAheadTime and the aggregation # period, which may be permuted over. if config['predictAheadTime'] is not None: predictionSteps = int(round(aggregationDivide( config['predictAheadTime'], config['aggregationInfo']))) assert (predictionSteps >= 1) config['modelParams']['clParams']['steps'] = str(predictionSteps) # Adjust config by applying ValueGetterBase-derived # futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) control = { # The environment that the current model is being run in "environment": 'nupic', # Input stream specification per py/nupicengine/cluster/database/StreamDef.json. # 'dataset' : { u'info': u'baseball benchmark test', u'streams': [ { u'columns': [ u'daynight', u'precip', u'home_winloss', u'visitor_winloss', u'attendance', u'timestamp'], u'info': u'OAK01.csv', u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}], u'version': 1}, # Iteration count: maximum number of iterations. Each iteration corresponds # to one record from the (possibly aggregated) dataset. The task is # terminated when either number of iterations reaches iterationCount or # all records in the (possibly aggregated) database have been processed, # whichever occurs first. # # iterationCount of -1 = iterate over the entire dataset #'iterationCount' : ITERATION_COUNT, # Metrics: A list of MetricSpecs that instantiate the metrics that are # computed for this experiment 'metrics':[ MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='aae', params={'window': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='trivial_aae', params={'window': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.encodings, metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.encodings, metric='nupicScore_scalar', params={'frequencyWindow': 1000}) ], # Logged Metrics: A sequence of regular expressions that specify which of # the metrics from the Inference Specifications section MUST be logged for # every prediction. The regex's correspond to the automatically generated # metric labels. This is similar to the way the optimization metric is # specified in permutations.py. 'loggedMetrics': ['.*nupicScore.*'], } descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, control=control)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SpreadsheetsService extends the GDataService to streamline Google Spreadsheets operations. SpreadsheetService: Provides methods to query feeds and manipulate items. Extends GDataService. DictionaryToParamList: Function which converts a dictionary into a list of URL arguments (represented as strings). This is a utility function used in CRUD operations. """ __author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' import gdata import atom.service import gdata.service import gdata.spreadsheet import atom class Error(Exception): """Base class for exceptions in this module.""" pass class RequestError(Error): pass class SpreadsheetsService(gdata.service.GDataService): """Client for the Google Spreadsheets service.""" def __init__(self, email=None, password=None, source=None, server='spreadsheets.google.com', additional_headers=None, **kwargs): """Creates a client for the Google Spreadsheets service. Args: email: string (optional) The user's email address, used for authentication. password: string (optional) The user's password. source: string (optional) The name of the user's application. server: string (optional) The name of the server to which a connection will be opened. Default value: 'spreadsheets.google.com'. **kwargs: The other parameters to pass to gdata.service.GDataService constructor. """ gdata.service.GDataService.__init__( self, email=email, password=password, service='wise', source=source, server=server, additional_headers=additional_headers, **kwargs) def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', projection='full'): """Gets a spreadsheets feed or a specific entry if a key is defined Args: key: string (optional) The spreadsheet key defined in /ccc?key= query: DocumentQuery (optional) Query parameters Returns: If there is no key, then a SpreadsheetsSpreadsheetsFeed. If there is a key, then a SpreadsheetsSpreadsheet. """ base_uri = 'https://%s/feeds/spreadsheets' % self.server uri = ('%s/%s/%s' % (base_uri, visibility, projection)) if key is not None: uri = '%s/%s' % (uri, key) if query != None: query.feed = base_uri query.visibility = visibility query.projection = projection uri = query.ToUri() if key: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString) else: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString) def GetWorksheetsFeed(self, key, wksht_id=None, query=None, visibility='private', projection='full'): """Gets a worksheets feed or a specific entry if a wksht is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string (optional) The id for a specific worksheet entry query: DocumentQuery (optional) Query parameters Returns: If there is no wksht_id, then a SpreadsheetsWorksheetsFeed. If there is a wksht_id, then a SpreadsheetsWorksheet. """ uri = ('https://%s/feeds/worksheets/%s/%s/%s' % (self.server, key, visibility, projection)) if wksht_id != None: uri = '%s/%s' % (uri, wksht_id) if query != None: query.feed = uri uri = query.ToUri() if wksht_id: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) else: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString) def AddWorksheet(self, title, row_count, col_count, key): """Creates a new worksheet in the desired spreadsheet. The new worksheet is appended to the end of the list of worksheets. The new worksheet will only have the available number of columns and cells specified. Args: title: str The title which will be displayed in the list of worksheets. row_count: int or str The number of rows in the new worksheet. col_count: int or str The number of columns in the new worksheet. key: str The spreadsheet key to the spreadsheet to which the new worksheet should be added. Returns: A SpreadsheetsWorksheet if the new worksheet was created succesfully. """ new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet( title=atom.Title(text=title), row_count=gdata.spreadsheet.RowCount(text=str(row_count)), col_count=gdata.spreadsheet.ColCount(text=str(col_count))) return self.Post(new_worksheet, 'https://%s/feeds/worksheets/%s/private/full' % (self.server, key), converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) def UpdateWorksheet(self, worksheet_entry, url=None): """Changes the size and/or title of the desired worksheet. Args: worksheet_entry: SpreadsheetWorksheet The new contents of the worksheet. url: str (optional) The URL to which the edited worksheet entry should be sent. If the url is None, the edit URL from the worksheet will be used. Returns: A SpreadsheetsWorksheet with the new information about the worksheet. """ target_url = url or worksheet_entry.GetEditLink().href return self.Put(worksheet_entry, target_url, converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) def DeleteWorksheet(self, worksheet_entry=None, url=None): """Removes the desired worksheet from the spreadsheet Args: worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to be deleted. If this is none, then the DELETE reqest is sent to the url specified in the url parameter. url: str (optaional) The URL to which the DELETE request should be sent. If left as None, the worksheet's edit URL is used. Returns: True if the worksheet was deleted successfully. """ if url: target_url = url else: target_url = worksheet_entry.GetEditLink().href return self.Delete(target_url) def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, visibility='private', projection='full'): """Gets a cells feed or a specific entry if a cell is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string The id for a specific worksheet entry cell: string (optional) The R1C1 address of the cell query: DocumentQuery (optional) Query parameters Returns: If there is no cell, then a SpreadsheetsCellsFeed. If there is a cell, then a SpreadsheetsCell. """ uri = ('https://%s/feeds/cells/%s/%s/%s/%s' % (self.server, key, wksht_id, visibility, projection)) if cell != None: uri = '%s/%s' % (uri, cell) if query != None: query.feed = uri uri = query.ToUri() if cell: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsCellFromString) else: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, visibility='private', projection='full'): """Gets a list feed or a specific entry if a row_id is defined Args: key: string The spreadsheet key defined in /ccc?key= wksht_id: string The id for a specific worksheet entry row_id: string (optional) The row_id of a row in the list query: DocumentQuery (optional) Query parameters Returns: If there is no row_id, then a SpreadsheetsListFeed. If there is a row_id, then a SpreadsheetsList. """ uri = ('https://%s/feeds/list/%s/%s/%s/%s' % (self.server, key, wksht_id, visibility, projection)) if row_id is not None: uri = '%s/%s' % (uri, row_id) if query is not None: query.feed = uri uri = query.ToUri() if row_id: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsListFromString) else: return self.Get(uri, converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) def UpdateCell(self, row, col, inputValue, key, wksht_id='default'): """Updates an existing cell. Args: row: int The row the cell to be editted is in col: int The column the cell to be editted is in inputValue: str the new value of the cell key: str The key of the spreadsheet in which this cell resides. wksht_id: str The ID of the worksheet which holds this cell. Returns: The updated cell entry """ row = str(row) col = str(col) # make the new cell new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue) # get the edit uri and PUT cell = 'R%sC%s' % (row, col) entry = self.GetCellsFeed(key, wksht_id, cell) for a_link in entry.link: if a_link.rel == 'edit': entry.cell = new_cell return self.Put(entry, a_link.href, converter=gdata.spreadsheet.SpreadsheetsCellFromString) def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id): return ('https://spreadsheets.google.com/feeds/cells/%s/%s/' 'private/full/batch' % (spreadsheet_key, worksheet_id)) def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, worksheet_id=None, converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString): """Sends a batch request feed to the server. The batch request needs to be sent to the batch URL for a particular worksheet. You can specify the worksheet by providing the spreadsheet_key and worksheet_id, or by sending the URL from the cells feed's batch link. Args: batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing BatchEntry elements which contain the desired CRUD operation and any necessary data to modify a cell. url: str (optional) The batch URL for the cells feed to which these changes should be applied. This can be found by calling cells_feed.GetBatchLink().href. spreadsheet_key: str (optional) Used to generate the batch request URL if the url argument is None. If using the spreadsheet key to generate the URL, the worksheet id is also required. worksheet_id: str (optional) Used if the url is not provided, it is oart of the batch feed target URL. This is used with the spreadsheet key. converter: Function (optional) Function to be executed on the server's response. This function should take one string as a parameter. The default value is SpreadsheetsCellsFeedFromString which will turn the result into a gdata.spreadsheet.SpreadsheetsCellsFeed object. Returns: A gdata.BatchFeed containing the results. """ if url is None: url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id) return self.Post(batch_feed, url, converter=converter) def InsertRow(self, row_data, key, wksht_id='default'): """Inserts a new row with the provided data Args: uri: string The post uri of the list feed row_data: dict A dictionary of column header to row data Returns: The inserted row """ new_entry = gdata.spreadsheet.SpreadsheetsList() for k, v in row_data.items(): new_custom = gdata.spreadsheet.Custom() new_custom.column = k new_custom.text = v new_entry.custom[new_custom.column] = new_custom # Generate the post URL for the worksheet which will receive the new entry. post_url = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/full'%( key, wksht_id) return self.Post(new_entry, post_url, converter=gdata.spreadsheet.SpreadsheetsListFromString) def UpdateRow(self, entry, new_row_data): """Updates a row with the provided data If you want to add additional information to a row, it is often easier to change the values in entry.custom, then use the Put method instead of UpdateRow. This UpdateRow method will replace the contents of the row with new_row_data - it will change all columns not just the columns specified in the new_row_data dict. Args: entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated new_row_data: dict A dictionary of column header to row data Returns: The updated row """ entry.custom = {} for k, v in new_row_data.items(): new_custom = gdata.spreadsheet.Custom() new_custom.column = k new_custom.text = v entry.custom[k] = new_custom for a_link in entry.link: if a_link.rel == 'edit': return self.Put(entry, a_link.href, converter=gdata.spreadsheet.SpreadsheetsListFromString) def DeleteRow(self, entry): """Deletes a row, the provided entry Args: entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted Returns: The delete response """ for a_link in entry.link: if a_link.rel == 'edit': return self.Delete(a_link.href) class DocumentQuery(gdata.service.Query): def _GetTitleQuery(self): return self['title'] def _SetTitleQuery(self, document_query): self['title'] = document_query title = property(_GetTitleQuery, _SetTitleQuery, doc="""The title query parameter""") def _GetTitleExactQuery(self): return self['title-exact'] def _SetTitleExactQuery(self, document_query): self['title-exact'] = document_query title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery, doc="""The title-exact query parameter""") class CellQuery(gdata.service.Query): def _GetMinRowQuery(self): return self['min-row'] def _SetMinRowQuery(self, cell_query): self['min-row'] = cell_query min_row = property(_GetMinRowQuery, _SetMinRowQuery, doc="""The min-row query parameter""") def _GetMaxRowQuery(self): return self['max-row'] def _SetMaxRowQuery(self, cell_query): self['max-row'] = cell_query max_row = property(_GetMaxRowQuery, _SetMaxRowQuery, doc="""The max-row query parameter""") def _GetMinColQuery(self): return self['min-col'] def _SetMinColQuery(self, cell_query): self['min-col'] = cell_query min_col = property(_GetMinColQuery, _SetMinColQuery, doc="""The min-col query parameter""") def _GetMaxColQuery(self): return self['max-col'] def _SetMaxColQuery(self, cell_query): self['max-col'] = cell_query max_col = property(_GetMaxColQuery, _SetMaxColQuery, doc="""The max-col query parameter""") def _GetRangeQuery(self): return self['range'] def _SetRangeQuery(self, cell_query): self['range'] = cell_query range = property(_GetRangeQuery, _SetRangeQuery, doc="""The range query parameter""") def _GetReturnEmptyQuery(self): return self['return-empty'] def _SetReturnEmptyQuery(self, cell_query): self['return-empty'] = cell_query return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery, doc="""The return-empty query parameter""") class ListQuery(gdata.service.Query): def _GetSpreadsheetQuery(self): return self['sq'] def _SetSpreadsheetQuery(self, list_query): self['sq'] = list_query sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery, doc="""The sq query parameter""") def _GetOrderByQuery(self): return self['orderby'] def _SetOrderByQuery(self, list_query): self['orderby'] = list_query orderby = property(_GetOrderByQuery, _SetOrderByQuery, doc="""The orderby query parameter""") def _GetReverseQuery(self): return self['reverse'] def _SetReverseQuery(self, list_query): self['reverse'] = list_query reverse = property(_GetReverseQuery, _SetReverseQuery, doc="""The reverse query parameter""")
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility to decode a crash dump generated by untrusted_crash_dump.[ch] Currently this produces a simple stack trace. """ import json import optparse import os import posixpath import subprocess import sys class CoreDecoder(object): """Class to process core dumps.""" def __init__(self, main_nexe, nmf_filename, addr2line, library_paths, platform): """Construct and object to process core dumps. Args: main_nexe: nexe to resolve NaClMain references from. nmf_filename: nmf to resolve references from. addr2line: path to appropriate addr2line. library_paths: list of paths to search for libraries. platform: platform string to use in nmf files. """ self.main_nexe = main_nexe self.nmf_filename = nmf_filename if nmf_filename == '-': self.nmf_data = {} else: self.nmf_data = json.load(open(nmf_filename)) self.addr2line = addr2line self.library_paths = library_paths self.platform = platform def _SelectModulePath(self, filename): """Select which path to get a module from. Args: filename: filename of a module (as appears in phdrs). Returns: Full local path to the file. Derived by consulting the manifest. """ # For some names try the main nexe. # NaClMain is the argv[0] setup in sel_main.c # (null) shows up in chrome. if self.main_nexe is not None and filename in ['NaClMain', '(null)']: return self.main_nexe filepart = posixpath.basename(filename) nmf_entry = self.nmf_data.get('files', {}).get(filepart, {}) nmf_url = nmf_entry.get(self.platform, {}).get('url') # Try filename directly if not in manifest. if nmf_url is None: return filename # Look for the module relative to the manifest (if any), # then in other search paths. paths = [] if self.nmf_filename != '-': paths.append(os.path.dirname(self.nmf_filename)) paths.extend(self.library_paths) for path in paths: pfilename = os.path.join(path, nmf_url) if os.path.exists(pfilename): return pfilename # If nothing else, try the path directly. return filename def _DecodeAddressSegment(self, segments, address): """Convert an address to a segment relative one, plus filename. Args: segments: a list of phdr segments. address: a process wide code address. Returns: A tuple of filename and segment relative address. """ for segment in segments: for phdr in segment['dlpi_phdr']: start = segment['dlpi_addr'] + phdr['p_vaddr'] end = start + phdr['p_memsz'] if address >= start and address < end: return (segment['dlpi_name'], address - segment['dlpi_addr']) return ('(null)', address) def _Addr2Line(self, segments, address): """Use addr2line to decode a code address. Args: segments: A list of phdr segments. address: a code address. Returns: A list of dicts containing: function, filename, lineno. """ filename, address = self._DecodeAddressSegment(segments, address) filename = self._SelectModulePath(filename) if not os.path.exists(filename): return [{ 'function': 'Unknown_function', 'filename': 'unknown_file', 'lineno': -1, }] # Use address - 1 to get the call site instead of the line after. address -= 1 cmd = [ self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address, ] process = subprocess.Popen(cmd, stdout=subprocess.PIPE) process_stdout, _ = process.communicate() assert process.returncode == 0 lines = process_stdout.splitlines() assert len(lines) % 2 == 0 results = [] for index in xrange(len(lines) / 2): func = lines[index * 2] afilename, lineno = lines[index * 2 + 1].split(':', 1) results.append({ 'function': func, 'filename': afilename, 'lineno': int(lineno), }) return results def Decode(self, text): core = json.loads(text) for frame in core['frames']: frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr']) return core def LoadAndDecode(self, core_path): """Given a core.json file, load and embellish with decoded addresses. Args: core_path: source file containing a dump. Returns: An embellished core dump dict (decoded code addresses). """ core = json.load(open(core_path)) for frame in core['frames']: frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr']) return core def StackTrace(self, info): """Convert a decoded core.json dump to a simple stack trace. Args: info: core.json info with decoded code addresses. Returns: A list of dicts with filename, lineno, function (deepest first). """ trace = [] for frame in info['frames']: for scope in frame['scopes']: trace.append(scope) return trace def PrintTrace(self, trace, out): """Print a trace to a file like object. Args: trace: A list of [filename, lineno, function] (deepest first). out: file like object to output the trace to. """ for scope in trace: out.write('%s at %s:%d\n' % ( scope['function'], scope['filename'], scope['lineno'])) def Main(args): parser = optparse.OptionParser( usage='USAGE: %prog [options] <core.json>') parser.add_option('-m', '--main-nexe', dest='main_nexe', help='nexe to resolve NaClMain references from') parser.add_option('-n', '--nmf', dest='nmf_filename', default='-', help='nmf to resolve references from') parser.add_option('-a', '--addr2line', dest='addr2line', help='path to appropriate addr2line') parser.add_option('-L', '--library-path', dest='library_paths', action='append', default=[], help='path to search for shared libraries') parser.add_option('-p', '--platform', dest='platform', help='platform in a style match nmf files') options, args = parser.parse_args(args) if len(args) != 1: parser.print_help() sys.exit(1) decoder = CoreDecoder( main_nexe=options.main_nexe, nmf_filename=options.nmf_filename, addr2line=options.add2line, library_paths=options.library_paths, platform=options.platform) info = decoder.LoadAndDecode(args[0]) trace = decoder.StackTrace(info) decoder.PrintTrace(trace, sys.stdout) if __name__ == '__main__': Main(sys.argv[1:])
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Tests\Support; use ArrayIterator; use Illuminate\Contracts\Support\Arrayable; use Illuminate\Contracts\Support\Jsonable; use IteratorAggregate; use JsonSerializable; use Traversable; class TestArrayableObject implements Arrayable { public function toArray() { return ['foo' => 'bar']; } } class TestJsonableObject implements Jsonable { public function toJson($options = 0) { return '{"foo":"bar"}'; } } class TestJsonSerializeObject implements JsonSerializable { public function jsonSerialize(): array { return ['foo' => 'bar']; } } class TestJsonSerializeWithScalarValueObject implements JsonSerializable { public function jsonSerialize(): string { return 'foo'; } } class TestTraversableAndJsonSerializableObject implements IteratorAggregate, JsonSerializable { public $items; public function __construct($items = []) { $this->items = $items; } public function getIterator(): Traversable { return new ArrayIterator($this->items); } public function jsonSerialize(): array { return json_decode(json_encode($this->items), true); } }
php
github
https://github.com/laravel/framework
tests/Support/Common.php
# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011-2014 OpenStack Foundation # Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A scheduler host manager which subclasses the new location in the Nova tree. This is a placeholder so that end users can gradually upgrade to use the new settings. TODO: remove in the K release """ from ironic.common import i18n from nova.openstack.common import log as logging from nova.scheduler import ironic_host_manager LOG = logging.getLogger(__name__) class IronicHostManager(ironic_host_manager.IronicHostManager): """Ironic HostManager class that subclasses the Nova in-tree version.""" def _do_deprecation_warning(self): LOG.warning(i18n._LW( 'This class (ironic.nova.scheduler.ironic_host_manager.' 'IronicHostManager) is deprecated and has moved into the Nova ' 'tree. Please set scheduler_host_manager = ' 'nova.scheduler.ironic_host_manager.IronicHostManager.')) def __init__(self): super(IronicHostManager, self).__init__() self._do_deprecation_warning()
unknown
codeparrot/codeparrot-clean
""" This module contains a quick ident server implentation: ``IdentServer``. IdentServer is a functional ident server that returns fake data and can be used with IRC bots or clients. This is useful for two reasons; it speeds up connection time, and some servers require an ident response for security purposes. """ import asyncore, asynchat import os import socket import uuid def get_operating_system(): """ Retreives an RFC-1340 compliant name of the operating system. If a name doesn't seem to be available, then UNKNOWN is returned instead. This function is limited by the possibilities of os.name values. """ os_map = { "nt": "WIN32", "posix": "UNIX", "mac": "MACOS", "os2": "OS/2", "ce": "WIN32" } return os_map.get(os.name, "UNKNOWN") def generate_fake_userid(): """ Create a fake user id. This gets used when FakeIdentServer is being used and the userid was initially set to None. It creates a random UUID as specified by RFC 4122. """ return str(uuid.uuid4()) class _IdentChannel(asynchat.async_chat): """ An instance of _IdentChannel represents a single request from a client to the IdentServer. It isn't designed to be used directly. """ def __init__(self, userid, sock, addr): """ Set up the object by specifying the terminator and initializing the input buffer and using the socket passed from the dispatcher. The terminator for the ident protocol is CR+LF. """ asynchat.async_chat.__init__(self, sock) self.set_terminator("\r\n") self.userid = userid self.collect_incoming_data = self._collect_incoming_data def found_terminator(self): """ When this is activated, it means that the terminator (\r\n) has been read. When that happens, we get the input data, clear the buffer, and then handle the data collected. """ request = "".join(self.incoming) sysos = get_operating_system() response = (request, "USERID", sysos, self.userid) self.incoming = [] self.push(":".join(response)) self.close_when_done() class IdentServer(asyncore.dispatcher): """ A quick and easy ident server. In order to run the ident server inline with an IRC bot or client, be sure to use ``start_all()`` instead of calling the ``start()`` method. """ def __init__(self, port=113, userid=None): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.bind(("", port)) self.listen(5) self.userid = userid or generate_fake_userid() def handle_accept(self): """ Dispatch a request onto an _IdentChannel instance. """ _IdentChannel(self.userid, *self.accept()) def start(self): """ Begin serving ident requests on the port specified. """ asyncore.loop(map=self._map)
unknown
codeparrot/codeparrot-clean
""" This script performs an out of core groupby operation for different datasets. The datasets to be processed are normally in CSV files and the key and value to be used for the grouping are defined programatically via small functions (see toy_stream() and statsmodel_stream() for examples). Those datasets included in statsmodel will require this package installed (it is available in Anaconda, so it should be an easy dependency to solve). Usage: $ `script` dataset_class dataset_filename `dataset_class` can be either 'toy', 'randhie' or 'contributions'. 'toy' is a self-contained dataset and is meant for debugging mainly. The 'randhie' implements suport for the dataset with the same name included in the statsmodel package. Finally 'contributions' is meant to compute aggregations on the contributions to the different US campaigns. This latter requires a second argument (datatset_filename) which is a CSV file downloaded from: http://data.influenceexplorer.com/bulk/ """ import sys from itertools import islice import io import csv import numpy as np from dynd import nd, ndt import blz # Number of lines to read per each iteration LPC = 1000 # Max number of chars to map for a bytes or string in NumPy MAXCHARS = 64 def get_nptype(dtype, val): """Convert the `val` field in dtype into a numpy dtype.""" dytype = dtype[nd.as_py(dtype.field_names).index(val)] # strings and bytes cannot be natively represented in numpy if dytype == ndt.string: nptype = np.dtype("U%d" % MAXCHARS) elif dytype == ndt.bytes: nptype = np.dtype("S%d" % MAXCHARS) else: # There should be no problems with the rest nptype = dytype.as_numpy() return nptype def groupby(sreader, key, val, dtype, path=None, lines_per_chunk=LPC): """Group the `val` field in `sreader` stream of lines by `key` index. Parameters ---------- sreader : iterator Iterator over a stream of CSV lines. key : string The name of the field to be grouped by. val : string The field name with the values that have to be grouped. dtype : dynd dtype The DyND data type with all the fields of the CSV lines, including the `key` and `val` names. path : string The path of the file where the BLZ array with the final grouping will be stored. If None (default), the BLZ will be stored in-memory (and hence non-persistent). lines_per_chunk : int The number of chunks that have to be read to be grouped by in-memory. For optimal perfomance, some experimentation should be needed. The default value should work reasonably well, though. Returns ------- output : BLZ table Returns a BLZ table with column names that are the groups resulting from the groupby operation. The columns are filled with the `val` field of the lines delivered by `sreader`. """ try: nptype = get_nptype(dtype, val) except ValueError: raise ValueError("`val` should be a valid field") # Start reading chunks prev_keys = set() while True: ndbuf = nd.array(islice(sreader, lines_per_chunk), dtype) if len(ndbuf) == 0: break # CSV data exhausted # Do the groupby for this chunk keys = getattr(ndbuf, key) if val is None: vals = ndbuf else: vals = getattr(ndbuf, val) sby = nd.groupby(vals, keys) lkeys = nd.as_py(sby.groups) skeys = set(lkeys) # BLZ does not understand dynd objects (yet) sby = nd.as_py(sby.eval()) if len(prev_keys) == 0: # Add the initial keys to a BLZ table columns = [np.array(sby[i], nptype) for i in range(len(lkeys))] ssby = blz.btable(columns=columns, names=lkeys, rootdir=path, mode='w') else: # Have we new keys? new_keys = skeys.difference(prev_keys) for new_key in new_keys: # Get the index of the new key idx = lkeys.index(new_key) # and add the values as a new columns ssby.addcol(sby[idx], new_key, dtype=nptype) # Now fill the pre-existing keys existing_keys = skeys.intersection(prev_keys) for existing_key in existing_keys: # Get the index of the existing key idx = lkeys.index(existing_key) # and append the values here ssby[existing_key].append(sby[idx]) # Add the new keys to the existing ones prev_keys |= skeys # Before returning, flush all data into disk if path is not None: ssby.flush() return ssby # A CSV toy example csvbuf = u"""k1,v1,1,u1 k2,v2,2,u2 k3,v3,3,u3 k4,v4,4,u4 k5,v5,5,u5 k5,v6,6,u6 k4,v7,7,u7 k4,v8,8,u8 k4,v9,9,u9 k1,v10,10,u9 k5,v11,11,u11 """ def toy_stream(): sreader = csv.reader(io.StringIO(csvbuf)) # The dynd dtype for the CSV file above dt = ndt.type('{key: string, val1: string, val2: int32, val3: bytes}') # The name of the persisted table where the groupby will be stored return sreader, dt # This access different datasets in statsmodel package def statsmodel_stream(stream): import statsmodels.api as sm data = getattr(sm.datasets, stream) f = open(data.PATH, 'rb') if stream == 'randhie': # For a description of this dataset, see: # http://statsmodels.sourceforge.net/devel/datasets/generated/randhie.html f.readline() # read out the headers line dtypes = ('{mdvis: string, lncoins: float32, idp: int32,' ' lpi:float32, fmde: float32, physlm: float32,' ' disea: float32, hlthg: int32, hlthf: int32,' ' hlthp: int32}') else: raise NotImplementedError( "Importing this dataset has not been implemented yet") sreader = csv.reader(f) dtype = ndt.type(dtypes) return sreader, dtype # For contributions to state and federal US campaings. # CSV files can be downloaded from: # http://data.influenceexplorer.com/bulk/ def contributions_stream(stream_file): f = open(stream_file, 'rb') # Description of this dataset headers = f.readline().strip() # read out the headers line headers = headers.split(',') # The types for the different fields htypes = [ ndt.int32, ndt.int16, ndt.int16] + \ [ ndt.string ] * 4 + \ [ ndt.bool, ndt.float64 ] + \ [ ndt.string ] * 33 # Build the DyND data type dtype = ndt.make_struct(htypes, headers) sreader = csv.reader(f) return sreader, dtype if __name__ == "__main__": if len(sys.argv) == 1: print("Specify a dataset from: [toy, randhie, contributions]") sys.exit() # Which dataset do we want to group? which = sys.argv[1] if which == "toy": # Get the CSV iterator and dtype of fields sreader, dt = toy_stream() # Do the actual sortby ssby = groupby(sreader, 'key', 'val1', dtype=dt, path=None, lines_per_chunk=2) elif which == "randhie": # Get the CSV iterator and dtype of fields sreader, dt = statsmodel_stream(which) # Do the actual sortby ssby = groupby(sreader, 'mdvis', 'lncoins', dtype=dt, path=None) elif which == "contributions": # Get the CSV iterator and dtype of fields if len(sys.argv) < 3: print("Please specify a contributions file downloaded from: " "http://data.influenceexplorer.com/bulk/") sys.exit() stream_file = sys.argv[2] sreader, dt = contributions_stream(stream_file) # Do the actual sortby ssby = groupby( sreader, 'recipient_party', 'amount', dtype=dt, path='contribs.blz') else: raise NotImplementedError( "parsing for `%s` dataset not implemented" % which) # Retrieve the data in the BLZ structure #ssby = blz.from_blz(path) # open from disk, if ssby is persistent for key in ssby.names: values = ssby[key] if which in ('toy', 'randhie'): print "key:", key, values elif which == 'contributions': print "Party: '%s'\tAmount: %13.2f\t#contribs: %8d" % \ (key, values.sum(), len(values))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: junos_facts version_added: "2.1" author: "Nathaniel Case (@qalthos)" short_description: Collect facts from remote devices running Juniper Junos description: - Collects fact information from a remote device running the Junos operating system. By default, the module will collect basic fact information from the device to be included with the hostvars. Additional fact information can be collected based on the configured set of arguments. extends_documentation_fragment: junos options: gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. To maintain backward compatbility old style facts can be retrieved using all value, this reqires junos-eznc to be installed as a prerequisite. required: false default: "!config" version_added: "2.3" config_format: description: - The I(config_format) argument specifies the format of the configuration when serializing output from the device. This argument is applicable only when C(config) value is present in I(gather_subset). The I(config_format) should be supported by the junos version running on device. required: false default: text choices: ['xml', 'set', 'text', 'json'] version_added: "2.3" requirements: - ncclient (>=v0.5.2) notes: - Ensure I(config_format) used to retrieve configuration from device is supported by junos version running on device. - This module requires the netconf system service be enabled on the remote device being managed. - Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4. """ EXAMPLES = """ - name: collect default set of facts junos_facts: - name: collect default set of facts and configuration junos_facts: gather_subset: config """ RETURN = """ ansible_facts: description: Returns the facts collect from the device returned: always type: dict """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.junos import junos_argument_spec, check_args, get_param from ansible.module_utils.junos import get_configuration from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.netconf import send_request from ansible.module_utils.six import iteritems try: from lxml.etree import Element, SubElement, tostring except ImportError: from xml.etree.ElementTree import Element, SubElement, tostring try: from jnpr.junos import Device from jnpr.junos.exception import ConnectError HAS_PYEZ = True except ImportError: HAS_PYEZ = False USE_PERSISTENT_CONNECTION = True class FactsBase(object): def __init__(self, module): self.module = module self.facts = dict() def populate(self): raise NotImplementedError def cli(self, command): reply = command(self.module, command) output = reply.find('.//output') if not output: self.module.fail_json(msg='failed to retrieve facts for command %s' % command) return str(output.text).strip() def rpc(self, rpc): return send_request(self.module, Element(rpc)) def get_text(self, ele, tag): try: return str(ele.find(tag).text).strip() except AttributeError: pass class Default(FactsBase): def populate(self): reply = self.rpc('get-software-information') data = reply.find('.//software-information') self.facts.update({ 'hostname': self.get_text(data, 'host-name'), 'version': self.get_text(data, 'junos-version'), 'model': self.get_text(data, 'product-model') }) reply = self.rpc('get-chassis-inventory') data = reply.find('.//chassis-inventory/chassis') self.facts['serialnum'] = self.get_text(data, 'serial-number') class Config(FactsBase): def populate(self): config_format = self.module.params['config_format'] reply = get_configuration(self.module, format=config_format) if config_format == 'xml': config = tostring(reply.find('configuration')).strip() elif config_format == 'text': config = self.get_text(reply, 'configuration-text') elif config_format == 'json': config = str(reply.text).strip() elif config_format == 'set': config = self.get_text(reply, 'configuration-set') self.facts['config'] = config class Hardware(FactsBase): def populate(self): reply = self.rpc('get-system-memory-information') data = reply.find('.//system-memory-information/system-memory-summary-information') self.facts.update({ 'memfree_mb': int(self.get_text(data, 'system-memory-free')), 'memtotal_mb': int(self.get_text(data, 'system-memory-total')) }) reply = self.rpc('get-system-storage') data = reply.find('.//system-storage-information') filesystems = list() for obj in data: filesystems.append(self.get_text(obj, 'filesystem-name')) self.facts['filesystems'] = filesystems reply = self.rpc('get-route-engine-information') data = reply.find('.//route-engine-information') routing_engines = dict() for obj in data: slot = self.get_text(obj, 'slot') routing_engines.update({slot: {}}) routing_engines[slot].update({'slot': slot}) for child in obj: if child.text != "\n": routing_engines[slot].update({child.tag.replace("-", "_"): child.text}) self.facts['routing_engines'] = routing_engines if len(data) > 1: self.facts['has_2RE'] = True else: self.facts['has_2RE'] = False reply = self.rpc('get-chassis-inventory') data = reply.findall('.//chassis-module') modules = list() for obj in data: mod = dict() for child in obj: if child.text != "\n": mod.update({child.tag.replace("-", "_"): child.text}) modules.append(mod) self.facts['modules'] = modules class Interfaces(FactsBase): def populate(self): ele = Element('get-interface-information') SubElement(ele, 'detail') reply = send_request(self.module, ele) interfaces = {} for item in reply[0]: name = self.get_text(item, 'name') obj = { 'oper-status': self.get_text(item, 'oper-status'), 'admin-status': self.get_text(item, 'admin-status'), 'speed': self.get_text(item, 'speed'), 'macaddress': self.get_text(item, 'hardware-physical-address'), 'mtu': self.get_text(item, 'mtu'), 'type': self.get_text(item, 'if-type'), } interfaces[name] = obj self.facts['interfaces'] = interfaces class Facts(FactsBase): def _connect(self, module): host = get_param(module, 'host') kwargs = { 'port': get_param(module, 'port') or 830, 'user': get_param(module, 'username') } if get_param(module, 'password'): kwargs['passwd'] = get_param(module, 'password') if get_param(module, 'ssh_keyfile'): kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile') kwargs['gather_facts'] = False try: device = Device(host, **kwargs) device.open() device.timeout = get_param(module, 'timeout') or 10 except ConnectError: exc = get_exception() module.fail_json('unable to connect to %s: %s' % (host, str(exc))) return device def populate(self): device = self._connect(self.module) facts = dict(device.facts) if '2RE' in facts: facts['has_2RE'] = facts['2RE'] del facts['2RE'] facts['version_info'] = dict(facts['version_info']) if 'junos_info' in facts: for key, value in facts['junos_info'].items(): if 'object' in value: value['object'] = dict(value['object']) return facts FACT_SUBSETS = dict( default=Default, hardware=Hardware, config=Config, interfaces=Interfaces ) VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) def main(): """ Main entry point for AnsibleModule """ argument_spec = dict( gather_subset=dict(default=['!config'], type='list'), config_format=dict(default='text', choices=['xml', 'text', 'set', 'json']), ) argument_spec.update(junos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) gather_subset = module.params['gather_subset'] ofacts = False runable_subsets = set() exclude_subsets = set() for subset in gather_subset: if subset == 'all': runable_subsets.update(VALID_SUBSETS) ofacts = True continue if subset.startswith('!'): subset = subset[1:] if subset == 'all': exclude_subsets.update(VALID_SUBSETS) ofacts = False continue exclude = True else: exclude = False if subset not in VALID_SUBSETS: module.fail_json(msg='Subset must be one of [%s], got %s' % (', '.join(VALID_SUBSETS), subset)) if exclude: exclude_subsets.add(subset) else: runable_subsets.add(subset) if not runable_subsets: runable_subsets.update(VALID_SUBSETS) runable_subsets.difference_update(exclude_subsets) runable_subsets.add('default') facts = dict() facts['gather_subset'] = list(runable_subsets) instances = list() for key in runable_subsets: instances.append(FACT_SUBSETS[key](module)) for inst in instances: inst.populate() facts.update(inst.facts) ansible_facts = dict() for key, value in iteritems(facts): key = 'ansible_net_%s' % key ansible_facts[key] = value if ofacts: if HAS_PYEZ: ansible_facts.update(Facts(module).populate()) else: warnings += ['junos-eznc is required to gather old style facts but does not appear to be installed. ' 'It can be installed using `pip install junos-eznc`'] module.exit_json(ansible_facts=ansible_facts, warnings=warnings) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
import sys import getopt import re import string # # Originally written by Einar Lielmanis et al., # Conversion to python by Einar Lielmanis, einar@jsbeautifier.org, # MIT licence, enjoy. # # Python is not my native language, feel free to push things around. # # Use either from command line (script displays its usage when run # without any parameters), # # # or, alternatively, use it as a module: # # import jsbeautifier # res = jsbeautifier.beautify('your javascript string') # res = jsbeautifier.beautify_file('some_file.js') # # you may specify some options: # # opts = jsbeautifier.default_options() # opts.indent_size = 2 # res = jsbeautifier.beautify('some javascript', opts) # # # Here are the available options: (read source) class BeautifierOptions: def __init__(self): self.indent_size = 4 self.indent_char = ' ' self.indent_with_tabs = False self.preserve_newlines = True self.max_preserve_newlines = 10. self.jslint_happy = False self.brace_style = 'collapse' self.keep_array_indentation = False self.keep_function_indentation = False self.eval_code = False def __repr__(self): return \ """indent_size = %d indent_char = [%s] preserve_newlines = %s max_preserve_newlines = %d jslint_happy = %s indent_with_tabs = %s brace_style = %s keep_array_indentation = %s eval_code = %s """ % ( self.indent_size, self.indent_char, self.preserve_newlines, self.max_preserve_newlines, self.jslint_happy, self.indent_with_tabs, self.brace_style, self.keep_array_indentation, self.eval_code, ) class BeautifierFlags: def __init__(self, mode): self.previous_mode = 'BLOCK' self.mode = mode self.var_line = False self.var_line_tainted = False self.var_line_reindented = False self.in_html_comment = False self.if_line = False self.in_case = False self.eat_next_space = False self.indentation_baseline = -1 self.indentation_level = 0 self.ternary_depth = 0 def default_options(): return BeautifierOptions() def beautify(string, opts = default_options() ): b = Beautifier() return b.beautify(string, opts) def beautify_file(file_name, opts = default_options() ): if file_name == '-': # stdin f = sys.stdin else: try: f = open(file_name) except Exception as ex: return 'The file could not be opened' b = Beautifier() return b.beautify(''.join(f.readlines()), opts) def usage(): print("""Javascript beautifier (http://jsbeautifier.org/) Usage: jsbeautifier.py [options] <infile> <infile> can be "-", which means stdin. <outfile> defaults to stdout Input options: -i, --stdin read input from stdin Output options: -s, --indent-size=NUMBER indentation size. (default 4). -c, --indent-char=CHAR character to indent with. (default space). -t, --indent-with-tabs Indent with tabs, overrides -s and -c -d, --disable-preserve-newlines do not preserve existing line breaks. -j, --jslint-happy more jslint-compatible output -b, --brace-style=collapse brace style (collapse, expand, end-expand) -k, --keep-array-indentation keep array indentation. -o, --outfile=FILE specify a file to output to (default stdout) -f, --keep-function-indentation Do not re-indent function bodies defined in var lines. Rarely needed options: --eval-code evaluate code if a JS interpreter is installed. May be useful with some obfuscated script but poses a potential security issue. -l, --indent-level=NUMBER initial indentation level. (default 0). -h, --help, --usage prints this help statement. """) class Beautifier: def __init__(self, opts = default_options() ): self.opts = opts self.blank_state() def blank_state(self): # internal flags self.flags = BeautifierFlags('BLOCK') self.flag_store = [] self.wanted_newline = False self.just_added_newline = False self.do_block_just_closed = False if self.opts.indent_with_tabs: self.indent_string = "\t" else: self.indent_string = self.opts.indent_char * self.opts.indent_size self.preindent_string = '' self.last_word = '' # last TK_WORD seen self.last_type = 'TK_START_EXPR' # last token type self.last_text = '' # last token text self.last_last_text = '' # pre-last token text self.input = None self.output = [] # formatted javascript gets built here self.whitespace = ["\n", "\r", "\t", " "] self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$' self.digits = '0123456789' self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! !! , : ? ^ ^= |= ::' self.punct += ' <?= <? ?> <%= <% %>' self.punct = self.punct.split(' ') # Words which always should start on a new line self.line_starters = 'continue,try,throw,return,var,if,switch,case,default,for,while,break,function'.split(',') self.set_mode('BLOCK') global parser_pos parser_pos = 0 def beautify(self, s, opts = None ): if opts != None: self.opts = opts if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']: raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".')) self.blank_state() while s and s[0] in [' ', '\t']: self.preindent_string += s[0] s = s[1:] #self.input = self.unpack(s, opts.eval_code) # CORTESI self.input = s parser_pos = 0 while True: token_text, token_type = self.get_next_token() #print (token_text, token_type, self.flags.mode) if token_type == 'TK_EOF': break handlers = { 'TK_START_EXPR': self.handle_start_expr, 'TK_END_EXPR': self.handle_end_expr, 'TK_START_BLOCK': self.handle_start_block, 'TK_END_BLOCK': self.handle_end_block, 'TK_WORD': self.handle_word, 'TK_SEMICOLON': self.handle_semicolon, 'TK_STRING': self.handle_string, 'TK_EQUALS': self.handle_equals, 'TK_OPERATOR': self.handle_operator, 'TK_BLOCK_COMMENT': self.handle_block_comment, 'TK_INLINE_COMMENT': self.handle_inline_comment, 'TK_COMMENT': self.handle_comment, 'TK_UNKNOWN': self.handle_unknown, } handlers[token_type](token_text) self.last_last_text = self.last_text self.last_type = token_type self.last_text = token_text sweet_code = self.preindent_string + re.sub('[\n ]+$', '', ''.join(self.output)) return sweet_code def unpack(self, source, evalcode=False): import jsbeautifier.unpackers as unpackers try: return unpackers.run(source, evalcode) except unpackers.UnpackingError as error: print('error:', error) return '' def trim_output(self, eat_newlines = False): while len(self.output) \ and ( self.output[-1] == ' '\ or self.output[-1] == self.indent_string \ or self.output[-1] == self.preindent_string \ or (eat_newlines and self.output[-1] in ['\n', '\r'])): self.output.pop() def is_special_word(self, s): return s in ['case', 'return', 'do', 'if', 'throw', 'else']; def is_array(self, mode): return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]'] def is_expression(self, mode): return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]', '(EXPRESSION)', '(FOR-EXPRESSION)', '(COND-EXPRESSION)'] def append_newline_forced(self): old_array_indentation = self.opts.keep_array_indentation self.opts.keep_array_indentation = False self.append_newline() self.opts.keep_array_indentation = old_array_indentation def append_newline(self, ignore_repeated = True): self.flags.eat_next_space = False if self.opts.keep_array_indentation and self.is_array(self.flags.mode): return self.flags.if_line = False self.trim_output() if len(self.output) == 0: # no newline on start of file return if self.output[-1] != '\n' or not ignore_repeated: self.just_added_newline = True self.output.append('\n') if self.preindent_string: self.output.append(self.preindent_string) for i in range(self.flags.indentation_level): self.output.append(self.indent_string) if self.flags.var_line and self.flags.var_line_reindented: self.output.append(self.indent_string) def append(self, s): if s == ' ': # do not add just a single space after the // comment, ever if self.last_type == 'TK_COMMENT': return self.append_newline() # make sure only single space gets drawn if self.flags.eat_next_space: self.flags.eat_next_space = False elif len(self.output) and self.output[-1] not in [' ', '\n', self.indent_string]: self.output.append(' ') else: self.just_added_newline = False self.flags.eat_next_space = False self.output.append(s) def indent(self): self.flags.indentation_level = self.flags.indentation_level + 1 def remove_indent(self): if len(self.output) and self.output[-1] in [self.indent_string, self.preindent_string]: self.output.pop() def set_mode(self, mode): prev = BeautifierFlags('BLOCK') if self.flags: self.flag_store.append(self.flags) prev = self.flags self.flags = BeautifierFlags(mode) if len(self.flag_store) == 1: self.flags.indentation_level = 0 else: self.flags.indentation_level = prev.indentation_level if prev.var_line and prev.var_line_reindented: self.flags.indentation_level = self.flags.indentation_level + 1 self.flags.previous_mode = prev.mode def restore_mode(self): self.do_block_just_closed = self.flags.mode == 'DO_BLOCK' if len(self.flag_store) > 0: mode = self.flags.mode self.flags = self.flag_store.pop() self.flags.previous_mode = mode def get_next_token(self): global parser_pos self.n_newlines = 0 if parser_pos >= len(self.input): return '', 'TK_EOF' self.wanted_newline = False c = self.input[parser_pos] parser_pos += 1 keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode) if keep_whitespace: # slight mess to allow nice preservation of array indentation and reindent that correctly # first time when we get to the arrays: # var a = [ # ....'something' # we make note of whitespace_count = 4 into flags.indentation_baseline # so we know that 4 whitespaces in original source match indent_level of reindented source # # and afterwards, when we get to # 'something, # .......'something else' # we know that this should be indented to indent_level + (7 - indentation_baseline) spaces whitespace_count = 0 while c in self.whitespace: if c == '\n': self.trim_output() self.output.append('\n') self.just_added_newline = True whitespace_count = 0 elif c == '\t': whitespace_count += 4 elif c == '\r': pass else: whitespace_count += 1 if parser_pos >= len(self.input): return '', 'TK_EOF' c = self.input[parser_pos] parser_pos += 1 if self.flags.indentation_baseline == -1: self.flags.indentation_baseline = whitespace_count if self.just_added_newline: for i in range(self.flags.indentation_level + 1): self.output.append(self.indent_string) if self.flags.indentation_baseline != -1: for i in range(whitespace_count - self.flags.indentation_baseline): self.output.append(' ') else: # not keep_whitespace while c in self.whitespace: if c == '\n': if self.opts.max_preserve_newlines == 0 or self.opts.max_preserve_newlines > self.n_newlines: self.n_newlines += 1 if parser_pos >= len(self.input): return '', 'TK_EOF' c = self.input[parser_pos] parser_pos += 1 if self.opts.preserve_newlines and self.n_newlines > 1: for i in range(self.n_newlines): self.append_newline(i == 0) self.just_added_newline = True self.wanted_newline = self.n_newlines > 0 if c in self.wordchar: if parser_pos < len(self.input): while self.input[parser_pos] in self.wordchar: c = c + self.input[parser_pos] parser_pos += 1 if parser_pos == len(self.input): break # small and surprisingly unugly hack for 1E-10 representation if parser_pos != len(self.input) and self.input[parser_pos] in '+-' \ and re.match('^[0-9]+[Ee]$', c): sign = self.input[parser_pos] parser_pos += 1 t = self.get_next_token() c += sign + t[0] return c, 'TK_WORD' if c == 'in': # in is an operator, need to hack return c, 'TK_OPERATOR' if self.wanted_newline and \ self.last_type != 'TK_OPERATOR' and\ self.last_type != 'TK_EQUALS' and\ not self.flags.if_line and \ (self.opts.preserve_newlines or self.last_text != 'var'): self.append_newline() return c, 'TK_WORD' if c in '([': return c, 'TK_START_EXPR' if c in ')]': return c, 'TK_END_EXPR' if c == '{': return c, 'TK_START_BLOCK' if c == '}': return c, 'TK_END_BLOCK' if c == ';': return c, 'TK_SEMICOLON' if c == '/': comment = '' inline_comment = True comment_mode = 'TK_INLINE_COMMENT' if self.input[parser_pos] == '*': # peek /* .. */ comment parser_pos += 1 if parser_pos < len(self.input): while not (self.input[parser_pos] == '*' and \ parser_pos + 1 < len(self.input) and \ self.input[parser_pos + 1] == '/')\ and parser_pos < len(self.input): c = self.input[parser_pos] comment += c if c in '\r\n': comment_mode = 'TK_BLOCK_COMMENT' parser_pos += 1 if parser_pos >= len(self.input): break parser_pos += 2 return '/*' + comment + '*/', comment_mode if self.input[parser_pos] == '/': # peek // comment comment = c while self.input[parser_pos] not in '\r\n': comment += self.input[parser_pos] parser_pos += 1 if parser_pos >= len(self.input): break parser_pos += 1 if self.wanted_newline: self.append_newline() return comment, 'TK_COMMENT' if c == "'" or c == '"' or \ (c == '/' and ((self.last_type == 'TK_WORD' and self.is_special_word(self.last_text)) or \ (self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(FOR-EXPRESSION)', '(COND-EXPRESSION)']) or \ (self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', 'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON']))): sep = c esc = False resulting_string = c in_char_class = False if parser_pos < len(self.input): if sep == '/': # handle regexp in_char_class = False while esc or in_char_class or self.input[parser_pos] != sep: resulting_string += self.input[parser_pos] if not esc: esc = self.input[parser_pos] == '\\' if self.input[parser_pos] == '[': in_char_class = True elif self.input[parser_pos] == ']': in_char_class = False else: esc = False parser_pos += 1 if parser_pos >= len(self.input): # incomplete regex when end-of-file reached # bail out with what has received so far return resulting_string, 'TK_STRING' else: # handle string while esc or self.input[parser_pos] != sep: resulting_string += self.input[parser_pos] if not esc: esc = self.input[parser_pos] == '\\' else: esc = False parser_pos += 1 if parser_pos >= len(self.input): # incomplete string when end-of-file reached # bail out with what has received so far return resulting_string, 'TK_STRING' parser_pos += 1 resulting_string += sep if sep == '/': # regexps may have modifiers /regexp/MOD, so fetch those too while parser_pos < len(self.input) and self.input[parser_pos] in self.wordchar: resulting_string += self.input[parser_pos] parser_pos += 1 return resulting_string, 'TK_STRING' if c == '#': # she-bang if len(self.output) == 0 and len(self.input) > 1 and self.input[parser_pos] == '!': resulting_string = c while parser_pos < len(self.input) and c != '\n': c = self.input[parser_pos] resulting_string += c parser_pos += 1 self.output.append(resulting_string.strip() + "\n") self.append_newline() return self.get_next_token() # Spidermonkey-specific sharp variables for circular references # https://developer.mozilla.org/En/Sharp_variables_in_JavaScript # http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935 sharp = '#' if parser_pos < len(self.input) and self.input[parser_pos] in self.digits: while True: c = self.input[parser_pos] sharp += c parser_pos += 1 if parser_pos >= len(self.input) or c == '#' or c == '=': break if c == '#' or parser_pos >= len(self.input): pass elif self.input[parser_pos] == '[' and self.input[parser_pos + 1] == ']': sharp += '[]' parser_pos += 2 elif self.input[parser_pos] == '{' and self.input[parser_pos + 1] == '}': sharp += '{}' parser_pos += 2 return sharp, 'TK_WORD' if c == '<' and self.input[parser_pos - 1 : parser_pos + 3] == '<!--': parser_pos += 3 c = '<!--' while parser_pos < len(self.input) and self.input[parser_pos] != '\n': c += self.input[parser_pos] parser_pos += 1 self.flags.in_html_comment = True return c, 'TK_COMMENT' if c == '-' and self.flags.in_html_comment and self.input[parser_pos - 1 : parser_pos + 2] == '-->': self.flags.in_html_comment = False parser_pos += 2 if self.wanted_newline: self.append_newline() return '-->', 'TK_COMMENT' if c in self.punct: while parser_pos < len(self.input) and c + self.input[parser_pos] in self.punct: c += self.input[parser_pos] parser_pos += 1 if parser_pos >= len(self.input): break if c == '=': return c, 'TK_EQUALS' else: return c, 'TK_OPERATOR' return c, 'TK_UNKNOWN' def handle_start_expr(self, token_text): if token_text == '[': if self.last_type == 'TK_WORD' or self.last_text == ')': if self.last_text in self.line_starters: self.append(' ') self.set_mode('(EXPRESSION)') self.append(token_text) return if self.flags.mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']: if self.last_last_text == ']' and self.last_text == ',': # ], [ goes to a new line if self.flags.mode == '[EXPRESSION]': self.flags.mode = '[INDENTED-EXPRESSION]' if not self.opts.keep_array_indentation: self.indent() self.set_mode('[EXPRESSION]') if not self.opts.keep_array_indentation: self.append_newline() elif self.last_text == '[': if self.flags.mode == '[EXPRESSION]': self.flags.mode = '[INDENTED-EXPRESSION]' if not self.opts.keep_array_indentation: self.indent() self.set_mode('[EXPRESSION]') if not self.opts.keep_array_indentation: self.append_newline() else: self.set_mode('[EXPRESSION]') else: self.set_mode('[EXPRESSION]') else: if self.last_text == 'for': self.set_mode('(FOR-EXPRESSION)') elif self.last_text in ['if', 'while']: self.set_mode('(COND-EXPRESSION)') else: self.set_mode('(EXPRESSION)') if self.last_text == ';' or self.last_type == 'TK_START_BLOCK': self.append_newline() elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.last_text == '.': # do nothing on (( and )( and ][ and ]( and .( if self.wanted_newline: self.append_newline(); elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']: self.append(' ') elif self.last_word == 'function' or self.last_word == 'typeof': # function() vs function (), typeof() vs typeof () if self.opts.jslint_happy: self.append(' ') elif self.last_text in self.line_starters or self.last_text == 'catch': self.append(' ') self.append(token_text) def handle_end_expr(self, token_text): if token_text == ']': if self.opts.keep_array_indentation: if self.last_text == '}': self.remove_indent() self.append(token_text) self.restore_mode() return else: if self.flags.mode == '[INDENTED-EXPRESSION]': if self.last_text == ']': self.restore_mode() self.append_newline() self.append(token_text) return self.restore_mode() self.append(token_text) def handle_start_block(self, token_text): if self.last_word == 'do': self.set_mode('DO_BLOCK') else: self.set_mode('BLOCK') if self.opts.brace_style == 'expand': if self.last_type != 'TK_OPERATOR': if self.last_text == '=' or (self.is_special_word(self.last_text) and self.last_text != 'else'): self.append(' ') else: self.append_newline(True) self.append(token_text) self.indent() else: if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']: if self.last_type == 'TK_START_BLOCK': self.append_newline() else: self.append(' ') else: # if TK_OPERATOR or TK_START_EXPR if self.is_array(self.flags.previous_mode) and self.last_text == ',': if self.last_last_text == '}': self.append(' ') else: self.append_newline() self.indent() self.append(token_text) def handle_end_block(self, token_text): self.restore_mode() if self.opts.brace_style == 'expand': if self.last_text != '{': self.append_newline() else: if self.last_type == 'TK_START_BLOCK': if self.just_added_newline: self.remove_indent() else: # {} self.trim_output() else: if self.is_array(self.flags.mode) and self.opts.keep_array_indentation: self.opts.keep_array_indentation = False self.append_newline() self.opts.keep_array_indentation = True else: self.append_newline() self.append(token_text) def handle_word(self, token_text): if self.do_block_just_closed: self.append(' ') self.append(token_text) self.append(' ') self.do_block_just_closed = False return if token_text == 'function': if self.flags.var_line: self.flags.var_line_reindented = not self.opts.keep_function_indentation if (self.just_added_newline or self.last_text == ';') and self.last_text != '{': # make sure there is a nice clean space of at least one blank line # before a new function definition have_newlines = self.n_newlines if not self.just_added_newline: have_newlines = 0 if not self.opts.preserve_newlines: have_newlines = 1 for i in range(2 - have_newlines): self.append_newline(False) if token_text in ['case', 'default']: if self.last_text == ':': self.remove_indent() else: self.flags.indentation_level -= 1 self.append_newline() self.flags.indentation_level += 1 self.append(token_text) self.flags.in_case = True return prefix = 'NONE' if self.last_type == 'TK_END_BLOCK': if token_text not in ['else', 'catch', 'finally']: prefix = 'NEWLINE' else: if self.opts.brace_style in ['expand', 'end-expand']: prefix = 'NEWLINE' else: prefix = 'SPACE' self.append(' ') elif self.last_type == 'TK_SEMICOLON' and self.flags.mode in ['BLOCK', 'DO_BLOCK']: prefix = 'NEWLINE' elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode): prefix = 'SPACE' elif self.last_type == 'TK_STRING': prefix = 'NEWLINE' elif self.last_type == 'TK_WORD': if self.last_text == 'else': # eat newlines between ...else *** some_op... # won't preserve extra newlines in this place (if any), but don't care that much self.trim_output(True) prefix = 'SPACE' elif self.last_type == 'TK_START_BLOCK': prefix = 'NEWLINE' elif self.last_type == 'TK_END_EXPR': self.append(' ') prefix = 'NEWLINE' if self.flags.if_line and self.last_type == 'TK_END_EXPR': self.flags.if_line = False if token_text in self.line_starters: if self.last_text == 'else': prefix = 'SPACE' else: prefix = 'NEWLINE' if token_text == 'function' and self.last_text in ['get', 'set']: prefix = 'SPACE' if token_text in ['else', 'catch', 'finally']: if self.last_type != 'TK_END_BLOCK' \ or self.opts.brace_style == 'expand' \ or self.opts.brace_style == 'end-expand': self.append_newline() else: self.trim_output(True) self.append(' ') elif prefix == 'NEWLINE': if token_text == 'function' and (self.last_type == 'TK_START_EXPR' or self.last_text in '=,'): # no need to force newline on "function" - # (function... pass elif token_text == 'function' and self.last_text == 'new': self.append(' ') elif self.is_special_word(self.last_text): # no newline between return nnn self.append(' ') elif self.last_type != 'TK_END_EXPR': if (self.last_type != 'TK_START_EXPR' or token_text != 'var') and self.last_text != ':': # no need to force newline on VAR - # for (var x = 0... if token_text == 'if' and self.last_word == 'else' and self.last_text != '{': self.append(' ') else: self.flags.var_line = False self.flags.var_line_reindented = False self.append_newline() elif token_text in self.line_starters and self.last_text != ')': self.flags.var_line = False self.flags.var_line_reindented = False self.append_newline() elif self.is_array(self.flags.mode) and self.last_text == ',' and self.last_last_text == '}': self.append_newline() # }, in lists get a newline elif prefix == 'SPACE': self.append(' ') self.append(token_text) self.last_word = token_text if token_text == 'var': self.flags.var_line = True self.flags.var_line_reindented = False self.flags.var_line_tainted = False if token_text == 'if': self.flags.if_line = True if token_text == 'else': self.flags.if_line = False def handle_semicolon(self, token_text): self.append(token_text) self.flags.var_line = False self.flags.var_line_reindented = False if self.flags.mode == 'OBJECT': # OBJECT mode is weird and doesn't get reset too well. self.flags.mode = 'BLOCK' def handle_string(self, token_text): if self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(COND-EXPRESSION)', '(FOR-EXPRESSION)']: self.append(' ') if self.last_type in ['TK_STRING', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_SEMICOLON']: self.append_newline() elif self.last_type == 'TK_WORD': self.append(' ') # Try to replace readable \x-encoded characters with their equivalent, # if it is possible (e.g. '\x41\x42\x43\x01' becomes 'ABC\x01'). def unescape(match): block, code = match.group(0, 1) char = chr(int(code, 16)) if block.count('\\') == 1 and char in string.printable: return char return block token_text = re.sub(r'\\{1,2}x([a-fA-F0-9]{2})', unescape, token_text) self.append(token_text) def handle_equals(self, token_text): if self.flags.var_line: # just got an '=' in a var-line, different line breaking rules will apply self.flags.var_line_tainted = True self.append(' ') self.append(token_text) self.append(' ') def handle_operator(self, token_text): space_before = True space_after = True if self.flags.var_line and token_text == ',' and self.is_expression(self.flags.mode): # do not break on comma, for ( var a = 1, b = 2 self.flags.var_line_tainted = False if self.flags.var_line and token_text == ',': if self.flags.var_line_tainted: self.append(token_text) self.flags.var_line_reindented = True self.flags.var_line_tainted = False self.append_newline() return else: self.flags.var_line_tainted = False if self.is_special_word(self.last_text): # return had a special handling in TK_WORD self.append(' ') self.append(token_text) return if token_text == ':' and self.flags.in_case: self.append(token_text) self.append_newline() self.flags.in_case = False return if token_text == '::': # no spaces around the exotic namespacing syntax operator self.append(token_text) return if token_text == ',': if self.flags.var_line: if self.flags.var_line_tainted: # This never happens, as it's handled previously, right? self.append(token_text) self.append_newline() self.flags.var_line_tainted = False else: self.append(token_text) self.append(' ') elif self.last_type == 'TK_END_BLOCK' and self.flags.mode != '(EXPRESSION)': self.append(token_text) if self.flags.mode == 'OBJECT' and self.last_text == '}': self.append_newline() else: self.append(' ') else: if self.flags.mode == 'OBJECT': self.append(token_text) self.append_newline() else: # EXPR or DO_BLOCK self.append(token_text) self.append(' ') # comma handled return elif token_text in ['--', '++', '!'] \ or (token_text in ['+', '-'] \ and self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']) \ or self.last_text in self.line_starters: space_before = False space_after = False if self.last_text == ';' and self.is_expression(self.flags.mode): # for (;; ++i) # ^^ space_before = True if self.last_type == 'TK_WORD' and self.last_text in self.line_starters: space_before = True if self.flags.mode == 'BLOCK' and self.last_text in ['{', ';']: # { foo: --i } # foo(): --bar self.append_newline() elif token_text == '.': # decimal digits or object.property space_before = False elif token_text == ':': if self.flags.ternary_depth == 0: self.flags.mode = 'OBJECT' space_before = False else: self.flags.ternary_depth -= 1 elif token_text == '?': self.flags.ternary_depth += 1 if space_before: self.append(' ') self.append(token_text) if space_after: self.append(' ') def handle_block_comment(self, token_text): lines = token_text.replace('\x0d', '').split('\x0a') # all lines start with an asterisk? that's a proper box comment if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')): self.append_newline() self.append(lines[0]) for line in lines[1:]: self.append_newline() self.append(' ' + line.strip()) else: # simple block comment: leave intact if len(lines) > 1: # multiline comment starts on a new line self.append_newline() else: # single line /* ... */ comment stays on the same line self.append(' ') for line in lines: self.append(line) self.append('\n') self.append_newline() def handle_inline_comment(self, token_text): self.append(' ') self.append(token_text) if self.is_expression(self.flags.mode): self.append(' ') else: self.append_newline_forced() def handle_comment(self, token_text): if self.wanted_newline: self.append_newline() else: self.append(' ') self.append(token_text) self.append_newline_forced() def handle_unknown(self, token_text): if self.last_text in ['return', 'throw']: self.append(' ') self.append(token_text) def main(): argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, "s:c:o:djbkil:htf", ['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines', 'jslint-happy', 'brace-style=', 'keep-array-indentation', 'indent-level=', 'help', 'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation']) except getopt.GetoptError: return usage() js_options = default_options() file = None outfile = 'stdout' if len(args) == 1: file = args[0] for opt, arg in opts: if opt in ('--keep-array-indentation', '-k'): js_options.keep_array_indentation = True if opt in ('--keep-function-indentation','-f'): js_options.keep_function_indentation = True elif opt in ('--outfile', '-o'): outfile = arg elif opt in ('--indent-size', '-s'): js_options.indent_size = int(arg) elif opt in ('--indent-char', '-c'): js_options.indent_char = arg elif opt in ('--indent-with-tabs', '-t'): js_options.indent_with_tabs = True elif opt in ('--disable-preserve_newlines', '-d'): js_options.preserve_newlines = False elif opt in ('--jslint-happy', '-j'): js_options.jslint_happy = True elif opt in ('--eval-code'): js_options.eval_code = True elif opt in ('--brace-style', '-b'): js_options.brace_style = arg elif opt in ('--stdin', '-i'): file = '-' elif opt in ('--help', '--usage', '-h'): return usage() if not file: return usage() else: if outfile == 'stdout': print(beautify_file(file, js_options)) else: with open(outfile, 'w') as f: f.write(beautify_file(file, js_options) + '\n')
unknown
codeparrot/codeparrot-clean
# pylint: disable=missing-docstring,too-few-public-methods class AbstractFoo: def kwonly_1(self, first, *, second, third): "Normal positional with two positional only params." def kwonly_2(self, *, first, second): "Two positional only parameter." def kwonly_3(self, *, first, second): "Two positional only params." def kwonly_4(self, *, first, second=None): "One positional only and another with a default." def kwonly_5(self, *, first, **kwargs): "Keyword only and keyword variadics." class Foo(AbstractFoo): def kwonly_1(self, first, *, second): # [arguments-differ] "One positional and only one positional only param." def kwonly_2(self, first): # [arguments-differ] "Only one positional parameter instead of two positional only parameters." def kwonly_3(self, first, second): # [arguments-differ] "Two positional params." def kwonly_4(self, first, second): # [arguments-differ] "Two positional params." def kwonly_5(self, *, first): # [arguments-differ] "Keyword only, but no variadics."
unknown
codeparrot/codeparrot-clean
import sys import os import time base = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.insert(0,base) import polyglotdb.io as pgio from polyglotdb.io.parsers import FilenameSpeakerParser from polyglotdb import CorpusContext path_to_gp = r'D:\Data\GP_aligned\TH' graph_db = {'host':'localhost', 'port': 7474, 'user': 'neo4j', 'password': 'test'} def call_back(*args): args = [x for x in args if isinstance(x, str)] if args: print(' '.join(args)) reset = True if __name__ == '__main__': if reset: print("Getting annotation types..") parser = pgio.inspect_textgrid(path_to_gp) parser.speaker_parser = FilenameSpeakerParser(5) parser.call_back = print print('Loading corpus...') with CorpusContext('gp_thai', **graph_db) as c: c.reset() beg = time.time() c.load(parser, path_to_gp) end = time.time() print('Time taken: {}'.format(end - beg)) with CorpusContext('gp_thai', **graph_db) as g: q = g.query_graph(g.phones).filter(g.phones.label == 'd') print(q.cypher()) print(q.count())
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An Example of a custom Estimator for the Iris dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import tensorflow as tf import iris_data parser = argparse.ArgumentParser() parser.add_argument('--batch_size', default=100, type=int, help='batch size') parser.add_argument('--train_steps', default=1000, type=int, help='number of training steps') def my_model(features, labels, mode, params): """DNN with three hidden layers and learning_rate=0.1.""" # Create three fully connected layers. net = tf.feature_column.input_layer(features, params['feature_columns']) for units in params['hidden_units']: net = tf.layers.dense(net, units=units, activation=tf.nn.relu) # Compute logits (1 per class). logits = tf.layers.dense(net, params['n_classes'], activation=None) # Compute predictions. predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class_ids': predicted_classes[:, tf.newaxis], 'probabilities': tf.nn.softmax(logits), 'logits': logits, } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Compute loss. loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Compute evaluation metrics. accuracy = tf.metrics.accuracy(labels=labels, predictions=predicted_classes, name='acc_op') metrics = {'accuracy': accuracy} tf.summary.scalar('accuracy', accuracy[1]) if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=metrics) # Create training op. assert mode == tf.estimator.ModeKeys.TRAIN optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.Estimator( model_fn=my_model, params={ 'feature_columns': my_feature_columns, # Two hidden layers of 10 nodes each. 'hidden_units': [10, 10], # The model must choose between 3 classes. 'n_classes': 3, }) # Train the Model. classifier.train( input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate( input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict( input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec)) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) tf.app.run(main)
unknown
codeparrot/codeparrot-clean
# # An attempt at re-implementing LZJB compression in native Python. # # Created in May 2014 by Emil Brink <emil@obsession.se>. See LICENSE. # # --------------------------------------------------------------------- # # Copyright (c) 2014-2016, Emil Brink # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided # that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and # the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions # and the following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. BYTE_BITS = 8 MATCH_BITS = 6 MATCH_MIN = 3 MATCH_MAX = (1 << MATCH_BITS) + (MATCH_MIN - 1) MATCH_RANGE = range(MATCH_MIN, MATCH_MAX + 1) # Length 64, fine on 2.x. OFFSET_MASK = (1 << (16 - MATCH_BITS)) - 1 LEMPEL_SIZE = 1024 def size_encode(size, dst=None): """ Encodes the given size in little-endian variable-length encoding. The dst argument can be an existing bytearray to append the size. If it's omitted (or None), a new bytearray is created and used. Returns the destination bytearray. """ if dst is None: dst = bytearray() done = False while not done: dst.append(size & 0x7f) size >>= 7 done = size == 0 dst[-1] |= 0x80 return dst def size_decode(src): """ Decodes a size (encoded with size_encode()) from the start of src. Returns a tuple (size, len) where size is the size that was decoded, and len is the number of bytes from src that were consumed. """ dst_size = 0 pos = 0 # Extract prefixed encoded size, if present. val = 1 while True: c = src[pos] pos += 1 if c & 0x80: dst_size += val * (c & 0x7f) break dst_size += val * c val <<= 7 return dst_size, pos def lzjb_compress(src, dst=None): """ Compresses src, the source bytearray. If dst is not None, it's assumed to be the output bytearray and bytes are appended to it using dst.append(). If it is None, a new bytearray is created. The destination bytearray is returned. """ if dst is None: dst = bytearray() lempel = [0] * LEMPEL_SIZE copymap = 0 copymask = 1 << (BYTE_BITS - 1) pos = 0 # Current input offset. while pos < len(src): copymask <<= 1 if copymask == (1 << BYTE_BITS): copymask = 1 copymap = len(dst) dst.append(0) if pos > len(src) - MATCH_MAX: dst.append(src[pos]) pos += 1 continue hsh = (src[pos] << 16) + (src[pos + 1] << 8) + src[pos + 2] hsh += hsh >> 9 hsh += hsh >> 5 hsh &= LEMPEL_SIZE - 1 offset = (pos - lempel[hsh]) & OFFSET_MASK lempel[hsh] = pos cpy = pos - offset if cpy >= 0 and cpy != pos and src[pos:pos + 3] == src[cpy:cpy + 3]: dst[copymap] |= copymask for mlen in MATCH_RANGE: if src[pos + mlen] != src[cpy + mlen]: break dst.append(((mlen - MATCH_MIN) << (BYTE_BITS - MATCH_BITS)) | (offset >> BYTE_BITS)) dst.append(offset & 255) pos += mlen else: dst.append(src[pos]) pos += 1 return dst def lzjb_decompress(src, dlen, dst=None): """ Decompresses src, a bytearray of compressed data. The dst argument can be an optional bytearray which will have the output appended. If it's None, a new bytearray is created. The output bytearray is returned. """ if dst is None: dst = bytearray() pos = 0 dpos = 0 copymap = 0 copymask = 1 << (BYTE_BITS - 1) while pos < len(src): copymask <<= 1 if copymask == (1 << BYTE_BITS): copymask = 1 copymap = src[pos] pos += 1 if copymap & copymask: mlen = (src[pos] >> (BYTE_BITS - MATCH_BITS)) + MATCH_MIN offset = ((src[pos] << BYTE_BITS) | src[pos + 1]) & OFFSET_MASK pos += 2 cpy = dpos - offset if cpy < 0: return None while mlen > 0 and dpos < dlen: dst.append(dst[cpy]) dpos += 1 cpy += 1 mlen -= 1 elif dpos < dlen: dst.append(src[pos]) dpos += 1 pos += 1 return dst
unknown
codeparrot/codeparrot-clean
""" pyggle.camera This library (PYGGEL) is licensed under the LGPL by Matthew Roe and PYGGEL contributors. The camera module defines a Base camera class other cameras should inherit from, and two common cameras: LookFromCamera - which is basically a FPS camera, and the LookAtCamera - which is basically a third-person camera """ from .include import * import numpy from math import sqrt class Base(object): """camera.Base camera object all other inherit from...""" def __init__(self, pos=[0,0,0], rotation=[0,0,0]): """create the camera pos = position of the camera rotation = rotation of camera""" self.posx, self.posy, self.posz = pos self.rotx, self.roty, self.rotz = rotation def push(self): """Activate the camera - anything rendered after this uses the cameras transformations.""" glPushMatrix() def pop(self): """Deactivate the camera - must be called after push or will raise an OpenGL error""" glPopMatrix() def get_pos(self): """Return the position of the camera as a tuple""" return self.posx, self.posy, self.posz def set_pos(self, pos): """Set the position of the camera from a tuple""" self.posx, self.posy, self.posz = pos def get_rotation(self): """Return the rotation of the camera as a tuple""" return self.rotx, self.roty, self.rotz def set_facing_matrix(self): """Transforms the matrix so that all objects are facing camera - used in Image3D (billboard sprites)""" pass def set_skybox_data(self): """Transforms the view only for a skybox, ie only rotation is taken into account, not position""" pass class LookFromCamera(Base): """camera.LookFromCamera is a FPS camera""" def __init__(self, pos=(0,0,0), rotation=(0,0,0)): Base.__init__(self, pos, rotation) __init__.__doc__ = Base.__init__.__doc__ def push(self): glPushMatrix() glRotatef(self.rotx, 1, 0, 0) glRotatef(self.roty, 0, 1, 0) glRotatef(self.rotz, 0, 0, 1) glTranslatef(-self.posx, -self.posy, self.posz) push.__doc__ = Base.push.__doc__ def pop(self): glPopMatrix() pop.__doc__ = Base.pop.__doc__ def get_pos(self): return self.posx, self.posy, self.posz get_pos.doc = Base.get_pos.__doc__ def get_rotation(self): return self.rotx, self.roty, self.rotz get_rotation.__doc__ = Base.get_rotation.__doc__ def set_facing_matrix(self): glRotatef(-self.rotz, 0, 0, 1) glRotatef(-self.roty, 0, 1, 0) glRotatef(-self.rotx, 1, 0, 0) set_facing_matrix.__doc__ = Base.set_facing_matrix.__doc__ def set_skybox_data(self): glRotatef(self.rotx, 1, 0, 0) glRotatef(self.roty, 0, 1, 0) glRotatef(self.rotz, 0, 0, 1) set_skybox_data.__doc__ = Base.set_skybox_data.__doc__ class LookAtCamera(Base): """camera.LookAtCamera is a third-person camera""" def __init__(self, pos=[0,0,0], rotation=[0,0,0], distance=0): """create the camera pos is the position the camera is looking at rotation is how much we are rotated around the object distance is how far back from the object we are""" Base.__init__(self, pos, rotation) self.distance = distance def push(self): glPushMatrix() glTranslatef(0, 0, -self.distance) glRotatef(-self.rotx, 1, 0, 0) glRotatef(-self.roty, 0, 1, 0) glRotatef(self.rotz, 0, 0, 1) glTranslatef(-self.posx, -self.posy, self.posz) push.__doc__ = Base.push.__doc__ def set_facing_matrix(self): glRotatef(-self.rotz, 0, 0, 1) glRotatef(self.roty, 0, 1, 0) glRotatef(self.rotx, 1, 0, 0) set_facing_matrix.__doc__ = Base.set_facing_matrix.__doc__ def set_skybox_data(self): glRotatef(-self.rotx, 1, 0, 0) glRotatef(-self.roty, 0, 1, 0) glRotatef(self.rotz, 0, 0, 1) set_skybox_data.__doc__ = Base.set_skybox_data.__doc__
unknown
codeparrot/codeparrot-clean
//// [tests/cases/compiler/accessorWithRestParam.ts] //// //// [accessorWithRestParam.ts] class C { set X(...v) { } static set X(...v2) { } } //// [accessorWithRestParam.js] "use strict"; var C = /** @class */ (function () { function C() { } Object.defineProperty(C.prototype, "X", { set: function () { var v = []; for (var _i = 0; _i < arguments.length; _i++) { v[_i] = arguments[_i]; } }, enumerable: false, configurable: true }); Object.defineProperty(C, "X", { set: function () { var v2 = []; for (var _i = 0; _i < arguments.length; _i++) { v2[_i] = arguments[_i]; } }, enumerable: false, configurable: true }); return C; }());
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/accessorWithRestParam(target=es5).js
--- title: Contact Information --- Contact Information
html
github
https://github.com/jekyll/jekyll
test/source/contacts.html
from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property from .base import Database try: import pytz except ImportError: pytz = None class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () update_can_self_select = False allows_group_by_pk = True related_fields_match_type = True allow_sliced_subqueries = False has_bulk_insert = True has_select_for_update = True has_select_for_update_nowait = False supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False can_introspect_autofield = True can_introspect_binary_field = False can_introspect_small_integer_field = True supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False uses_savepoints = True can_release_savepoints = True atomic_transactions = False supports_column_check_constraints = False can_clone_databases = True supports_temporal_subtraction = True @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" with self.connection.cursor() as cursor: cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'") result = cursor.fetchone() return result[0] @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def supports_microsecond_precision(self): # See https://github.com/farcepest/MySQLdb1/issues/24 for the reason # about requiring MySQLdb 1.2.5 return self.connection.mysql_version >= (5, 6, 4) and Database.version_info >= (1, 2, 5) @cached_property def has_zoneinfo_database(self): # MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects # abbreviations (eg. EAT). When pytz isn't installed and the current # time zone is LocalTimezone (the only sensible value in this # context), the current time zone name will be an abbreviation. As a # consequence, MySQL cannot perform time zone conversions reliably. if pytz is None: return False # Test if the time zone definitions are installed. with self.connection.cursor() as cursor: cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1") return cursor.fetchone() is not None def introspected_boolean_field_type(self, *args, **kwargs): return 'IntegerField' @cached_property def is_sql_auto_is_null_enabled(self): with self.connection.cursor() as cursor: cursor.execute('SELECT @@SQL_AUTO_IS_NULL') result = cursor.fetchone() return result and result[0] == 1
unknown
codeparrot/codeparrot-clean
""" Tests for L{eliot._action}. """ from __future__ import unicode_literals from unittest import TestCase from threading import Thread from warnings import catch_warnings, simplefilter from hypothesis import given from hypothesis.strategies import integers, lists from .._action import ( Action, _ExecutionContext, currentAction, startTask, startAction, TaskLevel) from .._output import MemoryLogger from .._validation import ActionType, Field, _ActionSerializers from ..testing import assertContainsFields from .. import _action, add_destination, remove_destination class ExecutionContextTests(TestCase): """ Tests for L{_ExecutionContext}. """ def test_nothingPushed(self): """ If no action has been pushed, L{_ExecutionContext.current} returns C{None}. """ ctx = _ExecutionContext() self.assertIs(ctx.current(), None) def test_pushSingle(self): """ L{_ExecutionContext.current} returns the action passed to L{_ExecutionContext.push} (assuming no pops). """ ctx = _ExecutionContext() a = object() ctx.push(a) self.assertIs(ctx.current(), a) def test_pushMultiple(self): """ L{_ExecutionContext.current} returns the action passed to the last call to L{_ExecutionContext.push} (assuming no pops). """ ctx = _ExecutionContext() a = object() b = object() ctx.push(a) ctx.push(b) self.assertIs(ctx.current(), b) def test_multipleCurrent(self): """ Multiple calls to L{_ExecutionContext.current} return the same result. """ ctx = _ExecutionContext() a = object() ctx.push(a) ctx.current() self.assertIs(ctx.current(), a) def test_popSingle(self): """ L{_ExecutionContext.pop} cancels a L{_ExecutionContext.push}, leading to an empty context. """ ctx = _ExecutionContext() a = object() ctx.push(a) ctx.pop() self.assertIs(ctx.current(), None) def test_popMultiple(self): """ L{_ExecutionContext.pop} cancels the last L{_ExecutionContext.push}, resulting in current context being whatever was pushed before that. """ ctx = _ExecutionContext() a = object() b = object() ctx.push(a) ctx.push(b) ctx.pop() self.assertIs(ctx.current(), a) def test_threadStart(self): """ Each thread starts with an empty execution context. """ ctx = _ExecutionContext() first = object() ctx.push(first) valuesInThread = [] def inthread(): valuesInThread.append(ctx.current()) thread = Thread(target=inthread) thread.start() thread.join() self.assertEqual(valuesInThread, [None]) def test_threadSafety(self): """ Each thread gets its own execution context. """ ctx = _ExecutionContext() first = object() ctx.push(first) second = object() valuesInThread = [] def inthread(): ctx.push(second) valuesInThread.append(ctx.current()) thread = Thread(target=inthread) thread.start() thread.join() # Neither thread was affected by the other: self.assertEqual(valuesInThread, [second]) self.assertIs(ctx.current(), first) def test_globalInstance(self): """ A global L{_ExecutionContext} is exposed in the L{eliot._action} module. """ self.assertIsInstance(_action._context, _ExecutionContext) self.assertEqual(_action.currentAction, _action._context.current) class ActionTests(TestCase): """ Tests for L{Action}. """ def test_start(self): """ L{Action._start} logs an C{action_status="started"} message. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action._start({"key": "value"}) assertContainsFields(self, logger.messages[0], {"task_uuid": "unique", "task_level": [1], "action_type": "sys:thename", "action_status": "started", "key": "value"}) def test_startMessageSerialization(self): """ The start message logged by L{Action._start} is created with the appropriate start message L{eliot._validation._MessageSerializer}. """ serializers = ActionType("sys:thename", [Field("key", lambda x: x, "")], [], "")._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename", serializers) action._start({"key": "value"}) self.assertIs(logger[0], serializers.start) def test_child(self): """ L{Action.child} returns a new L{Action} with the given logger, system and name, and a task_uuid taken from the parent L{Action}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") logger2 = MemoryLogger() child = action.child(logger2, "newsystem:newname") self.assertEqual([child._logger, child._identification, child._task_level], [logger2, {"task_uuid": "unique", "action_type": "newsystem:newname"}, TaskLevel(level=[1])]) def test_childLevel(self): """ Each call to L{Action.child} increments the new sub-level set on the child. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") child1 = action.child(logger, "newsystem:newname") child2 = action.child(logger, "newsystem:newname") child1_1 = child1.child(logger, "newsystem:other") self.assertEqual(child1._task_level, TaskLevel(level=[1])) self.assertEqual(child2._task_level, TaskLevel(level=[2])) self.assertEqual(child1_1._task_level, TaskLevel(level=[1, 1])) def test_childSerializers(self): """ L{Action.child} returns a new L{Action} with the serializers passed to it, rather than the parent's. """ logger = MemoryLogger() serializers = object() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename", serializers) childSerializers = object() child = action.child(logger, "newsystem:newname", childSerializers) self.assertIs(child._serializers, childSerializers) def test_run(self): """ L{Action.run} runs the given function with given arguments, returning its result. """ action = Action(None, "", TaskLevel(level=[]), "") def f(*args, **kwargs): return args, kwargs result = action.run(f, 1, 2, x=3) self.assertEqual(result, ((1, 2), {"x": 3})) def test_runContext(self): """ L{Action.run} runs the given function with the action set as the current action. """ result = [] action = Action(None, "", TaskLevel(level=[]), "") action.run(lambda: result.append(currentAction())) self.assertEqual(result, [action]) def test_runContextUnsetOnReturn(self): """ L{Action.run} unsets the action once the given function returns. """ action = Action(None, "", TaskLevel(level=[]), "") action.run(lambda: None) self.assertIs(currentAction(), None) def test_runContextUnsetOnRaise(self): """ L{Action.run} unsets the action once the given function raises an exception. """ action = Action(None, "", TaskLevel(level=[]), "") self.assertRaises(ZeroDivisionError, action.run, lambda: 1/0) self.assertIs(currentAction(), None) def test_withSetsContext(self): """ L{Action.__enter__} sets the action as the current action. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action: self.assertIs(currentAction(), action) def test_withUnsetOnReturn(self): """ L{Action.__exit__} unsets the action on successful block finish. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action: pass self.assertIs(currentAction(), None) def test_withUnsetOnRaise(self): """ L{Action.__exit__} unsets the action if the block raises an exception. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") try: with action: 1/0 except ZeroDivisionError: pass else: self.fail("no exception") self.assertIs(currentAction(), None) def test_withContextSetsContext(self): """ L{Action.context().__enter__} sets the action as the current action. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action.context(): self.assertIs(currentAction(), action) def test_withContextUnsetOnReturn(self): """ L{Action.context().__exit__} unsets the action on successful block finish. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") with action.context(): pass self.assertIs(currentAction(), None) def test_withContextNoLogging(self): """ L{Action.context().__exit__} does not log any messages. """ logger = MemoryLogger() action = Action(logger, "", TaskLevel(level=[]), "") with action.context(): pass self.assertFalse(logger.messages) def test_withContextUnsetOnRaise(self): """ L{Action.conext().__exit__} unsets the action if the block raises an exception. """ action = Action(MemoryLogger(), "", TaskLevel(level=[]), "") try: with action.context(): 1/0 except ZeroDivisionError: pass else: self.fail("no exception") self.assertIs(currentAction(), None) def test_finish(self): """ L{Action.finish} with no exception logs an C{action_status="succeeded"} message. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action.finish() assertContainsFields(self, logger.messages[0], {"task_uuid": "unique", "task_level": [1], "action_type": "sys:thename", "action_status": "succeeded"}) def test_successfulFinishSerializer(self): """ L{Action.finish} with no exception passes the success L{eliot._validation._MessageSerializer} to the message it creates. """ serializers = ActionType("sys:thename", [], [Field("key", lambda x: x, "")], "")._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename", serializers) action.finish() self.assertIs(logger[0], serializers.success) def test_failureFinishSerializer(self): """ L{Action.finish} with an exception passes the failure L{eliot._validation._MessageSerializer} to the message it creates. """ serializers = ActionType("sys:thename", [], [Field("key", lambda x: x, "")], "")._serializers class Logger(list): def write(self, msg, serializer): self.append(serializer) logger = Logger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename", serializers) action.finish(Exception()) self.assertIs(logger[0], serializers.failure) def test_startFieldsNotInFinish(self): """ L{Action.finish} logs a message without the fields from L{Action._start}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") action._start({"key": "value"}) action.finish() self.assertNotIn("key", logger.messages[1]) def test_finishWithBadException(self): """ L{Action.finish} still logs a message if the given exception raises another exception when called with C{unicode()}. """ logger = MemoryLogger() action = Action(logger, "unique", TaskLevel(level=[]), "sys:thename") class BadException(Exception): def __str__(self): raise TypeError() action.finish(BadException()) self.assertEqual(logger.messages[0]["reason"], "eliot: unknown, unicode() raised exception") def test_withLogsSuccessfulFinishMessage(self): """ L{Action.__exit__} logs an action finish message on a successful block finish. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action: pass # Start message is only created if we use the action()/task() utility # functions, the intended public APIs. self.assertEqual(len(logger.messages), 1) assertContainsFields(self, logger.messages[0], {"task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "succeeded"}) def test_withLogsExceptionMessage(self): """ L{Action.__exit__} logs an action finish message on an exception raised from the block. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") exception = RuntimeError("because") try: with action: raise exception except RuntimeError: pass else: self.fail("no exception") self.assertEqual(len(logger.messages), 1) assertContainsFields(self, logger.messages[0], {"task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:me", "action_status": "failed", "reason": "because", "exception": "%s.RuntimeError" % ( RuntimeError.__module__,)}) def test_withReturnValue(self): """ L{Action.__enter__} returns the action itself. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: self.assertIs(action, act) def test_addSuccessFields(self): """ On a successful finish, L{Action.__exit__} adds fields from L{Action.addSuccessFields} to the result message. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: act.addSuccessFields(x=1, y=2) act.addSuccessFields(z=3) assertContainsFields(self, logger.messages[0], {"x": 1, "y": 2, "z": 3}) def test_nextTaskLevel(self): """ Each call to L{Action._nextTaskLevel()} increments a counter. """ action = Action(MemoryLogger(), "uuid", TaskLevel(level=[1]), "sys:me") self.assertEqual([action._nextTaskLevel() for i in range(5)], [TaskLevel(level=level) for level in ([1, 1], [1, 2], [1, 3], [1, 4], [1, 5])]) def test_multipleFinishCalls(self): """ If L{Action.finish} is called, subsequent calls to L{Action.finish} have no effect. """ logger = MemoryLogger() action = Action(logger, "uuid", TaskLevel(level=[1]), "sys:me") with action as act: act.finish() act.finish(Exception()) act.finish() # Only initial finish message is logged: self.assertEqual(len(logger.messages), 1) def test_stringActionCompatibility(self): """ L{Action} can be initialized with a string version of a L{TaskLevel}, for backwards compatibility. """ logger = MemoryLogger() action = Action(logger, "uuid", "/1/2/", "sys:me") self.assertEqual(action._task_level, TaskLevel(level=[1, 2])) def test_stringActionCompatibilityWarning(self): """ Calling L{Action} with a string results in a L{DeprecationWarning} """ logger = MemoryLogger() with catch_warnings(record=True) as warnings: simplefilter("always") # Catch all warnings Action(logger, "uuid", "/1/2/", "sys:me") self.assertEqual(warnings[-1].category, DeprecationWarning) class StartActionAndTaskTests(TestCase): """ Tests for L{startAction} and L{startTask}. """ def test_startTaskNewAction(self): """ L{startTask} creates a new top-level L{Action}. """ logger = MemoryLogger() action = startTask(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._task_level, TaskLevel(level=[])) def test_startTaskSerializers(self): """ If serializers are passed to L{startTask} they are attached to the resulting L{Action}. """ logger = MemoryLogger() serializers = _ActionSerializers(None, None, None) action = startTask(logger, "sys:do", serializers) self.assertIs(action._serializers, serializers) def test_startActionSerializers(self): """ If serializers are passed to L{startAction} they are attached to the resulting L{Action}. """ logger = MemoryLogger() serializers = _ActionSerializers(None, None, None) action = startAction(logger, "sys:do", serializers) self.assertIs(action._serializers, serializers) def test_startTaskNewUUID(self): """ L{startTask} creates an L{Action} with its own C{task_uuid}. """ logger = MemoryLogger() action = startTask(logger, "sys:do") action2 = startTask(logger, "sys:do") self.assertNotEqual(action._identification["task_uuid"], action2._identification["task_uuid"]) def test_startTaskLogsStart(self): """ L{startTask} logs a start message for the newly created L{Action}. """ logger = MemoryLogger() action = startTask(logger, "sys:do", key="value") assertContainsFields(self, logger.messages[0], {"task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value"}) def test_startActionNoParent(self): """ L{startAction} when C{currentAction()} is C{None} creates a top-level L{Action}. """ logger = MemoryLogger() action = startAction(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._task_level, TaskLevel(level=[])) def test_startActionNoParentLogStart(self): """ L{startAction} when C{currentAction()} is C{None} logs a start message. """ logger = MemoryLogger() action = startAction(logger, "sys:do", key="value") assertContainsFields(self, logger.messages[0], {"task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value"}) def test_startActionWithParent(self): """ L{startAction} uses the C{currentAction()} as parent for a new L{Action}. """ logger = MemoryLogger() parent = Action(logger, "uuid", TaskLevel(level=[2]), "other:thing") with parent: action = startAction(logger, "sys:do") self.assertIsInstance(action, Action) self.assertEqual(action._identification["task_uuid"], "uuid") self.assertEqual(action._task_level, TaskLevel(level=[2, 1])) def test_startActionWithParentLogStart(self): """ L{startAction} when C{currentAction()} is an L{Action} logs a start message. """ logger = MemoryLogger() parent = Action(logger, "uuid", TaskLevel(level=[]), "other:thing") with parent: startAction(logger, "sys:do", key="value") assertContainsFields(self, logger.messages[0], {"task_uuid": "uuid", "task_level": [1, 1], "action_type": "sys:do", "action_status": "started", "key": "value"}) def test_startTaskNoLogger(self): """ When no logger is given L{startTask} logs to the default ``Logger``. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) action = startTask(action_type="sys:do", key="value") assertContainsFields(self, messages[0], {"task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value"}) def test_startActionNoLogger(self): """ When no logger is given L{startAction} logs to the default ``Logger``. """ messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) action = startAction(action_type="sys:do", key="value") assertContainsFields(self, messages[0], {"task_uuid": action._identification["task_uuid"], "task_level": [1], "action_type": "sys:do", "action_status": "started", "key": "value"}) class PEP8Tests(TestCase): """ Tests for PEP 8 method compatibility. """ def test_add_success_fields(self): """ L{Action.addSuccessFields} is the same as L{Action.add_success_fields}. """ self.assertEqual(Action.addSuccessFields, Action.add_success_fields) def test_serialize_task_id(self): """ L{Action.serialize_task_id} is the same as L{Action.serializeTaskId}. """ self.assertEqual(Action.serialize_task_id, Action.serializeTaskId) def test_continue_task(self): """ L{Action.continue_task} is the same as L{Action.continueTask}. """ self.assertEqual(Action.continue_task, Action.continueTask) class SerializationTests(TestCase): """ Tests for L{Action} serialization and deserialization. """ def test_serializeTaskId(self): """ L{Action.serializeTaskId} result is composed of the task UUID and an incremented task level. """ action = Action(None, "uniq123", TaskLevel(level=[1, 2]), "mytype") self.assertEqual([action._nextTaskLevel(), action.serializeTaskId(), action._nextTaskLevel()], [TaskLevel(level=[1, 2, 1]), b"uniq123@/1/2/2", TaskLevel(level=[1, 2, 3])]) def test_continueTaskReturnsAction(self): """ L{Action.continueTask} returns an L{Action} whose C{task_level} and C{task_uuid} are derived from those in the given serialized task identifier. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() newAction = Action.continueTask(MemoryLogger(), taskId) self.assertEqual([newAction.__class__, newAction._identification, newAction._task_level], [Action, {"task_uuid": "uniq456", "action_type": "eliot:remote_task"}, TaskLevel(level=[3, 4, 1])]) def test_continueTaskStartsAction(self): """ L{Action.continueTask} starts the L{Action} it creates. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() logger = MemoryLogger() Action.continueTask(logger, taskId) assertContainsFields(self, logger.messages[0], {"task_uuid": "uniq456", "task_level": [3, 4, 1, 1], "action_type": "eliot:remote_task", "action_status": "started"}) def test_continueTaskNoLogger(self): """ L{Action.continueTask} can be called without a logger. """ originalAction = Action(None, "uniq456", TaskLevel(level=[3, 4]), "mytype") taskId = originalAction.serializeTaskId() messages = [] add_destination(messages.append) self.addCleanup(remove_destination, messages.append) Action.continueTask(task_id=taskId) assertContainsFields(self, messages[0], {"task_uuid": "uniq456", "task_level": [3, 4, 1, 1], "action_type": "eliot:remote_task", "action_status": "started"}) def test_continueTaskRequiredTaskId(self): """ L{Action.continue_task} requires a C{task_id} to be passed in. """ self.assertRaises(RuntimeError, Action.continueTask) TASK_LEVELS = integers(min_value=1) class TaskLevelTests(TestCase): """ Tests for L{TaskLevel}. """ def assert_fully_less_than(self, x, y): """ Assert that x < y according to all the comparison operators. """ self.assertTrue(all([ # lt x < y, not y < x, # le x <= y, not y <= x, # gt y > x, not x > y, # ge y >= x, not x >= y, # eq not x == y, not y == x, # ne x != y, y != x, ])) @given(lists(TASK_LEVELS)) def test_parent_of_child(self, base_task_level): """ L{TaskLevel.child} returns the first child of the task. """ base_task = TaskLevel(level=base_task_level) child_task = base_task.child() self.assertEqual(base_task, child_task.parent()) @given(lists(TASK_LEVELS, min_size=1)) def test_child_greater_than_parent(self, task_level): """ L{TaskLevel.child} returns a child that is greater than its parent. """ task = TaskLevel(level=task_level) self.assert_fully_less_than(task, task.child()) @given(lists(TASK_LEVELS, min_size=1)) def test_next_sibling_greater(self, task_level): """ L{TaskLevel.next_sibling} returns a greater task level. """ task = TaskLevel(level=task_level) self.assert_fully_less_than(task, task.next_sibling()) @given(lists(TASK_LEVELS, min_size=1)) def test_next_sibling(self, task_level): """ L{TaskLevel.next_sibling} returns the next sibling of a task. """ task = TaskLevel(level=task_level) sibling = task.next_sibling() self.assertEqual( sibling, TaskLevel(level=task_level[:-1] + [task_level[-1] + 1])) def test_parent_of_root(self): """ L{TaskLevel.parent} of the root task level is C{None}. """ self.assertIs(TaskLevel(level=[]).parent(), None) def test_toString(self): """ L{TaskLevel.toString} serializes the object to a Unicode string. """ root = TaskLevel(level=[]) child2_1 = root.child().next_sibling().child() self.assertEqual([root.toString(), child2_1.toString()], ["/", "/2/1"]) def test_fromString(self): """ L{TaskLevel.fromString} deserializes the output of L{TaskLevel.toString}. """ self.assertEqual([TaskLevel.fromString("/"), TaskLevel.fromString("/2/1")], [TaskLevel(level=[]), TaskLevel(level=[2, 1])]) def test_from_string(self): """ L{TaskLevel.from_string} is the same as as L{TaskLevel.fromString}. """ self.assertEqual(TaskLevel.from_string, TaskLevel.fromString) def test_to_string(self): """ L{TaskLevel.to_string} is the same as as L{TaskLevel.toString}. """ self.assertEqual(TaskLevel.to_string, TaskLevel.toString)
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2007 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.common.collect; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.collect.CollectPreconditions.checkNonnegative; import static com.google.common.collect.CollectPreconditions.checkRemove; import static java.util.Objects.requireNonNull; import com.google.common.annotations.GwtCompatible; import com.google.common.annotations.GwtIncompatible; import com.google.common.annotations.J2ktIncompatible; import com.google.common.primitives.Ints; import com.google.errorprone.annotations.CanIgnoreReturnValue; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.util.Arrays; import java.util.Iterator; import java.util.NoSuchElementException; import org.jspecify.annotations.Nullable; /** * Multiset implementation specialized for enum elements, supporting all single-element operations * in O(1). * * <p>See the Guava User Guide article on <a href= * "https://github.com/google/guava/wiki/NewCollectionTypesExplained#multiset">{@code Multiset}</a>. * * @author Jared Levy * @since 2.0 */ @GwtCompatible @J2ktIncompatible @SuppressWarnings("EnumOrdinal") // This is one of the low-level utilities where it's suitable. public final class EnumMultiset<E extends Enum<E>> extends AbstractMultiset<E> implements Serializable { /** Creates an empty {@code EnumMultiset}. */ public static <E extends Enum<E>> EnumMultiset<E> create(Class<E> type) { return new EnumMultiset<>(type); } /** * Creates a new {@code EnumMultiset} containing the specified elements. * * <p>This implementation is highly efficient when {@code elements} is itself a {@link Multiset}. * * @param elements the elements that the multiset should contain * @throws IllegalArgumentException if {@code elements} is empty */ public static <E extends Enum<E>> EnumMultiset<E> create(Iterable<E> elements) { Iterator<E> iterator = elements.iterator(); checkArgument(iterator.hasNext(), "EnumMultiset constructor passed empty Iterable"); EnumMultiset<E> multiset = new EnumMultiset<>(iterator.next().getDeclaringClass()); Iterables.addAll(multiset, elements); return multiset; } /** * Returns a new {@code EnumMultiset} instance containing the given elements. Unlike {@link * EnumMultiset#create(Iterable)}, this method does not produce an exception on an empty iterable. * * @since 14.0 */ public static <E extends Enum<E>> EnumMultiset<E> create(Iterable<E> elements, Class<E> type) { EnumMultiset<E> result = create(type); Iterables.addAll(result, elements); return result; } private transient Class<E> type; private transient E[] enumConstants; private transient int[] counts; private transient int distinctElements; private transient long size; /** Creates an empty {@code EnumMultiset}. */ private EnumMultiset(Class<E> type) { this.type = type; checkArgument(type.isEnum()); this.enumConstants = type.getEnumConstants(); this.counts = new int[enumConstants.length]; } private boolean isActuallyE(@Nullable Object o) { if (o instanceof Enum) { Enum<?> e = (Enum<?>) o; int index = e.ordinal(); return index < enumConstants.length && enumConstants[index] == e; } return false; } /** * Returns {@code element} cast to {@code E}, if it actually is a nonnull E. Otherwise, throws * either a NullPointerException or a ClassCastException as appropriate. */ private void checkIsE(Object element) { checkNotNull(element); if (!isActuallyE(element)) { throw new ClassCastException("Expected an " + type + " but got " + element); } } @Override int distinctElements() { return distinctElements; } @Override public int size() { return Ints.saturatedCast(size); } @Override public int count(@Nullable Object element) { // isActuallyE checks for null, but we check explicitly to help nullness checkers. if (element == null || !isActuallyE(element)) { return 0; } Enum<?> e = (Enum<?>) element; return counts[e.ordinal()]; } // Modification Operations @CanIgnoreReturnValue @Override public int add(E element, int occurrences) { checkIsE(element); checkNonnegative(occurrences, "occurrences"); if (occurrences == 0) { return count(element); } int index = element.ordinal(); int oldCount = counts[index]; long newCount = (long) oldCount + occurrences; checkArgument(newCount <= Integer.MAX_VALUE, "too many occurrences: %s", newCount); counts[index] = (int) newCount; if (oldCount == 0) { distinctElements++; } size += occurrences; return oldCount; } // Modification Operations @CanIgnoreReturnValue @Override public int remove(@Nullable Object element, int occurrences) { // isActuallyE checks for null, but we check explicitly to help nullness checkers. if (element == null || !isActuallyE(element)) { return 0; } Enum<?> e = (Enum<?>) element; checkNonnegative(occurrences, "occurrences"); if (occurrences == 0) { return count(element); } int index = e.ordinal(); int oldCount = counts[index]; if (oldCount == 0) { return 0; } else if (oldCount <= occurrences) { counts[index] = 0; distinctElements--; size -= oldCount; } else { counts[index] = oldCount - occurrences; size -= occurrences; } return oldCount; } // Modification Operations @CanIgnoreReturnValue @Override public int setCount(E element, int count) { checkIsE(element); checkNonnegative(count, "count"); int index = element.ordinal(); int oldCount = counts[index]; counts[index] = count; size += count - oldCount; if (oldCount == 0 && count > 0) { distinctElements++; } else if (oldCount > 0 && count == 0) { distinctElements--; } return oldCount; } @Override public void clear() { Arrays.fill(counts, 0); size = 0; distinctElements = 0; } abstract class Itr<T> implements Iterator<T> { int index = 0; int toRemove = -1; abstract T output(int index); @Override public boolean hasNext() { for (; index < enumConstants.length; index++) { if (counts[index] > 0) { return true; } } return false; } @Override public T next() { if (!hasNext()) { throw new NoSuchElementException(); } T result = output(index); toRemove = index; index++; return result; } @Override public void remove() { checkRemove(toRemove >= 0); if (counts[toRemove] > 0) { distinctElements--; size -= counts[toRemove]; counts[toRemove] = 0; } toRemove = -1; } } @Override Iterator<E> elementIterator() { return new Itr<E>() { @Override E output(int index) { return enumConstants[index]; } }; } @Override Iterator<Entry<E>> entryIterator() { return new Itr<Entry<E>>() { @Override Entry<E> output(int index) { return new Multisets.AbstractEntry<E>() { @Override public E getElement() { return enumConstants[index]; } @Override public int getCount() { return counts[index]; } }; } }; } @Override public Iterator<E> iterator() { return Multisets.iteratorImpl(this); } @GwtIncompatible // java.io.ObjectOutputStream private void writeObject(ObjectOutputStream stream) throws IOException { stream.defaultWriteObject(); stream.writeObject(type); Serialization.writeMultiset(this, stream); } /** * @serialData the {@code Class<E>} for the enum type, the number of distinct elements, the first * element, its count, the second element, its count, and so on */ @GwtIncompatible // java.io.ObjectInputStream private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { stream.defaultReadObject(); @SuppressWarnings("unchecked") // reading data stored by writeObject Class<E> localType = (Class<E>) requireNonNull(stream.readObject()); type = localType; enumConstants = type.getEnumConstants(); counts = new int[enumConstants.length]; Serialization.populateMultiset(this, stream); } @GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0; }
java
github
https://github.com/google/guava
android/guava/src/com/google/common/collect/EnumMultiset.java
# -*- coding: utf-8 -*- """ *************************************************************************** ExecuteSQL.py -- use virtual layers to execute SQL on any sources --------------------- Date : Jan 2016 Copyright : (C) 2016 by Hugo Mercier Email : hugo dot mercier at oslandia dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Hugo Mercier' __date__ = 'January 2016' __copyright__ = '(C) 2016, Hugo Mercier' from qgis.core import (QgsVirtualLayerDefinition, QgsVectorLayer, QgsWkbTypes, QgsProcessingAlgorithm, QgsProcessingParameterMultipleLayers, QgsProcessingParameterDefinition, QgsExpression, QgsProcessingUtils, QgsProcessingParameterString, QgsProcessingParameterEnum, QgsProcessingParameterCrs, QgsProcessingParameterFeatureSink, QgsFeatureSink, QgsProcessingException, QgsVectorFileWriter, QgsProject) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm class ParameterExecuteSql(QgsProcessingParameterDefinition): def __init__(self, name='', description=''): super().__init__(name, description) self.setMetadata({ 'widget_wrapper': 'processing.algs.qgis.ui.ExecuteSQLWidget.ExecuteSQLWidgetWrapper' }) def type(self): return 'execute_sql' def clone(self): return ParameterExecuteSql(self.name(), self.description()) class ExecuteSQL(QgisAlgorithm): """ This algorithm allows executing an SQL query on a set of input vector layers thanks to the virtual layer provider """ INPUT_DATASOURCES = 'INPUT_DATASOURCES' INPUT_QUERY = 'INPUT_QUERY' INPUT_UID_FIELD = 'INPUT_UID_FIELD' INPUT_GEOMETRY_FIELD = 'INPUT_GEOMETRY_FIELD' INPUT_GEOMETRY_TYPE = 'INPUT_GEOMETRY_TYPE' INPUT_GEOMETRY_CRS = 'INPUT_GEOMETRY_CRS' OUTPUT = 'OUTPUT' def group(self): return self.tr('Vector general') def groupId(self): return 'vectorgeneral' def __init__(self): super().__init__() def flags(self): return super().flags() | QgsProcessingAlgorithm.FlagNoThreading def initAlgorithm(self, config=None): self.addParameter(QgsProcessingParameterMultipleLayers(name=self.INPUT_DATASOURCES, description=self.tr('Additional input datasources (called input1, .., inputN in the query)'), optional=True)) self.addParameter(ParameterExecuteSql(name=self.INPUT_QUERY, description=self.tr('SQL query'))) self.addParameter(QgsProcessingParameterString(name=self.INPUT_UID_FIELD, description=self.tr('Unique identifier field'), optional=True)) self.addParameter(QgsProcessingParameterString(name=self.INPUT_GEOMETRY_FIELD, description=self.tr('Geometry field'), optional=True)) self.geometryTypes = [ self.tr('Autodetect'), self.tr('No geometry'), 'Point', 'LineString', 'Polygon', 'MultiPoint', 'MultiLineString', 'MultiPolygon'] self.addParameter(QgsProcessingParameterEnum(self.INPUT_GEOMETRY_TYPE, self.tr('Geometry type'), options=self.geometryTypes, optional=True)) self.addParameter(QgsProcessingParameterCrs(self.INPUT_GEOMETRY_CRS, self.tr('CRS'), optional=True)) self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('SQL Output'))) def name(self): return 'executesql' def displayName(self): return self.tr('Execute SQL') def processAlgorithm(self, parameters, context, feedback): layers = self.parameterAsLayerList(parameters, self.INPUT_DATASOURCES, context) query = self.parameterAsString(parameters, self.INPUT_QUERY, context) uid_field = self.parameterAsString(parameters, self.INPUT_UID_FIELD, context) geometry_field = self.parameterAsString(parameters, self.INPUT_GEOMETRY_FIELD, context) geometry_type = self.parameterAsEnum(parameters, self.INPUT_GEOMETRY_TYPE, context) geometry_crs = self.parameterAsCrs(parameters, self.INPUT_GEOMETRY_CRS, context) df = QgsVirtualLayerDefinition() for layerIdx, layer in enumerate(layers): # Issue https://github.com/qgis/QGIS/issues/24041 # When using this algorithm from the graphic modeler, it may try to # access (thanks the QgsVirtualLayerProvider) to memory layer that # belongs to temporary QgsMapLayerStore, not project. # So, we write them to disk is this is the case. if context.project() and not context.project().mapLayer(layer.id()): basename = "memorylayer." + QgsVectorFileWriter.supportedFormatExtensions()[0] tmp_path = QgsProcessingUtils.generateTempFilename(basename) QgsVectorFileWriter.writeAsVectorFormat( layer, tmp_path, layer.dataProvider().encoding()) df.addSource('input{}'.format(layerIdx + 1), tmp_path, "ogr") else: df.addSource('input{}'.format(layerIdx + 1), layer.id()) if query == '': raise QgsProcessingException( self.tr('Empty SQL. Please enter valid SQL expression and try again.')) localContext = self.createExpressionContext(parameters, context) expandedQuery = QgsExpression.replaceExpressionText(query, localContext) df.setQuery(expandedQuery) if uid_field: df.setUid(uid_field) if geometry_type == 1: # no geometry df.setGeometryWkbType(QgsWkbTypes.NoGeometry) else: if geometry_field: df.setGeometryField(geometry_field) if geometry_type > 1: df.setGeometryWkbType(geometry_type - 1) if geometry_crs.isValid(): df.setGeometrySrid(geometry_crs.postgisSrid()) vLayer = QgsVectorLayer(df.toString(), "temp_vlayer", "virtual") if not vLayer.isValid(): raise QgsProcessingException(vLayer.dataProvider().error().message()) if vLayer.wkbType() == QgsWkbTypes.Unknown: raise QgsProcessingException(self.tr("Cannot find geometry field")) (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, vLayer.fields(), vLayer.wkbType() if geometry_type != 1 else 1, vLayer.crs()) if sink is None: raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT)) features = vLayer.getFeatures() total = 100.0 / vLayer.featureCount() if vLayer.featureCount() else 0 for current, inFeat in enumerate(features): if feedback.isCanceled(): break sink.addFeature(inFeat, QgsFeatureSink.FastInsert) feedback.setProgress(int(current * total)) return {self.OUTPUT: dest_id}
unknown
codeparrot/codeparrot-clean
"""brainfuck interpreter adapted from (public domain) code at http://brainfuck.sourceforge.net/brain.py""" import re import asyncio import random from cloudbot import hook BUFFER_SIZE = 5000 MAX_STEPS = 1000000 @asyncio.coroutine @hook.command("brainfuck", "bf") def bf(text): """<prog> - executes <prog> as Brainfuck code :type text: str """ program = re.sub('[^][<>+-.,]', '', text) # create a dict of brackets pairs, for speed later on brackets = {} open_brackets = [] for pos in range(len(program)): if program[pos] == '[': open_brackets.append(pos) elif program[pos] == ']': if len(open_brackets) > 0: brackets[pos] = open_brackets[-1] brackets[open_brackets[-1]] = pos open_brackets.pop() else: return "Unbalanced brackets" if len(open_brackets) != 0: return "Unbalanced brackets" # now we can start interpreting ip = 0 # instruction pointer mp = 0 # memory pointer steps = 0 memory = [0] * BUFFER_SIZE # initial memory area rightmost = 0 output = "" # we'll save the output here # the main program loop: while ip < len(program): c = program[ip] if c == '+': memory[mp] = (memory[mp] + 1) % 256 elif c == '-': memory[mp] = (memory[mp] - 1) % 256 elif c == '>': mp += 1 if mp > rightmost: rightmost = mp if mp >= len(memory): # no restriction on memory growth! memory.extend([0] * BUFFER_SIZE) elif c == '<': mp -= 1 % len(memory) elif c == '.': output += chr(memory[mp]) if len(output) > 500: break elif c == ',': memory[mp] = random.randint(1, 255) elif c == '[': if memory[mp] == 0: ip = brackets[ip] elif c == ']': if memory[mp] != 0: ip = brackets[ip] ip += 1 steps += 1 if steps > MAX_STEPS: if not output: output = "(no output)" output += "(exceeded {} iterations)".format(MAX_STEPS) break stripped_output = re.sub(r'[\x00-\x1F]', '', output) if not stripped_output: if output: return "No printable output" return "No output" return stripped_output[:430]
unknown
codeparrot/codeparrot-clean
TIS_620_TO_UCS_TBL = [ ["A1",0xE01], ["A2",0xE02], ["A3",0xE03], ["A4",0xE04], ["A5",0xE05], ["A6",0xE06], ["A7",0xE07], ["A8",0xE08], ["A9",0xE09], ["AA",0xE0A], ["AB",0xE0B], ["AC",0xE0C], ["AD",0xE0D], ["AE",0xE0E], ["AF",0xE0F], ["B0",0xE10], ["B1",0xE11], ["B2",0xE12], ["B3",0xE13], ["B4",0xE14], ["B5",0xE15], ["B6",0xE16], ["B7",0xE17], ["B8",0xE18], ["B9",0xE19], ["BA",0xE1A], ["BB",0xE1B], ["BC",0xE1C], ["BD",0xE1D], ["BE",0xE1E], ["BF",0xE1F], ["C0",0xE20], ["C1",0xE21], ["C2",0xE22], ["C3",0xE23], ["C4",0xE24], ["C5",0xE25], ["C6",0xE26], ["C7",0xE27], ["C8",0xE28], ["C9",0xE29], ["CA",0xE2A], ["CB",0xE2B], ["CC",0xE2C], ["CD",0xE2D], ["CE",0xE2E], ["CF",0xE2F], ["D0",0xE30], ["D1",0xE31], ["D2",0xE32], ["D3",0xE33], ["D4",0xE34], ["D5",0xE35], ["D6",0xE36], ["D7",0xE37], ["D8",0xE38], ["D9",0xE39], ["DA",0xE3A], ["DF",0xE3F], ["E0",0xE40], ["E1",0xE41], ["E2",0xE42], ["E3",0xE43], ["E4",0xE44], ["E5",0xE45], ["E6",0xE46], ["E7",0xE47], ["E8",0xE48], ["E9",0xE49], ["EA",0xE4A], ["EB",0xE4B], ["EC",0xE4C], ["ED",0xE4D], ["EE",0xE4E], ["EF",0xE4F], ["F0",0xE50], ["F1",0xE51], ["F2",0xE52], ["F3",0xE53], ["F4",0xE54], ["F5",0xE55], ["F6",0xE56], ["F7",0xE57], ["F8",0xE58], ["F9",0xE59], ["FA",0xE5A], ["FB",0xE5B], ]
ruby
github
https://github.com/ruby/ruby
enc/trans/tis-620-tbl.rb
#ifndef COROUTINE_PPC64_CONTEXT_H #define COROUTINE_PPC64_CONTEXT_H 1 #pragma once #include <assert.h> #include <stddef.h> #include <stdint.h> #include <string.h> #define COROUTINE __attribute__((noreturn)) void enum { COROUTINE_REGISTERS = 20 /* 19 general purpose registers (r13-r31) and 1 return address */ + 4 /* space for fiber_entry() to store the link register */ }; struct coroutine_context { void **stack_pointer; void *argument; }; typedef COROUTINE(* coroutine_start)(struct coroutine_context *from, struct coroutine_context *self); static inline void coroutine_initialize_main(struct coroutine_context * context) { context->stack_pointer = NULL; } static inline void coroutine_initialize( struct coroutine_context *context, coroutine_start start, void *stack, size_t size ) { assert(start && stack && size >= 1024); // Stack grows down. Force 16-byte alignment. char * top = (char*)stack + size; context->stack_pointer = (void**)((uintptr_t)top & ~0xF); context->stack_pointer -= COROUTINE_REGISTERS; memset(context->stack_pointer, 0, sizeof(void*) * COROUTINE_REGISTERS); /* Skip a global prologue that sets the TOC register */ context->stack_pointer[19] = ((char*)start) + 8; } struct coroutine_context * coroutine_transfer(struct coroutine_context * current, struct coroutine_context * target); static inline void coroutine_destroy(struct coroutine_context * context) { context->stack_pointer = NULL; } #endif /* COROUTINE_PPC64_CONTEXT_H */
c
github
https://github.com/ruby/ruby
coroutine/ppc64/Context.h
# -*- coding: utf-8 -*- from ast import literal_eval import datetime from functools import partial import logging import werkzeug.urls import urllib2 from openerp import release, SUPERUSER_ID from openerp.models import AbstractModel from openerp.osv import osv from openerp.tools.translate import _ from openerp.tools.config import config from openerp.tools import misc _logger = logging.getLogger(__name__) class publisher_warranty_contract(AbstractModel): _name = "publisher_warranty.contract" def _get_message(self, cr, uid): Users = self.pool['res.users'] user_count = partial(Users.search_count, cr, uid) get_param = partial(self.pool['ir.config_parameter'].get_param, cr, SUPERUSER_ID) dbuuid = get_param('database.uuid') db_create_date = get_param('database.create_date') limit_date = datetime.datetime.now() limit_date = limit_date - datetime.timedelta(15) limit_date_str = limit_date.strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT) nbr_users = user_count([]) nbr_active_users = user_count([("login_date", ">=", limit_date_str)]) nbr_share_users = 0 nbr_active_share_users = 0 if "share" in Users._fields: nbr_share_users = user_count([("share", "=", True)]) nbr_active_share_users = user_count([("share", "=", True), ("login_date", ">=", limit_date_str)]) user = Users.browse(cr, uid, uid) domain = [('application', '=', True), ('state', 'in', ['installed', 'to upgrade', 'to remove'])] apps = self.pool['ir.module.module'].search_read(cr, uid, domain, ['name']) web_base_url = get_param('web.base.url') msg = { "dbuuid": dbuuid, "nbr_users": nbr_users, "nbr_active_users": nbr_active_users, "nbr_share_users": nbr_share_users, "nbr_active_share_users": nbr_active_share_users, "dbname": cr.dbname, "db_create_date": db_create_date, "version": release.version, "language": user.lang, "web_base_url": web_base_url, "apps": [app['name'] for app in apps], } if user.partner_id.company_id: company_id = user.partner_id.company_id.id msg.update(self.pool.get("res.company").read(cr, uid, [company_id], ["name", "email", "phone"])[0]) return msg def _get_sys_logs(self, cr, uid): """ Utility method to send a publisher warranty get logs messages. """ msg = self._get_message(cr, uid) arguments = {'arg0': msg, "action": "update"} arguments_raw = werkzeug.urls.url_encode(arguments) url = config.get("publisher_warranty_url") uo = urllib2.urlopen(url, arguments_raw, timeout=30) try: submit_result = uo.read() return literal_eval(submit_result) finally: uo.close() def update_notification(self, cr, uid, ids, cron_mode=True, context=None): """ Send a message to OpenERP's publisher warranty server to check the validity of the contracts, get notifications, etc... @param cron_mode: If true, catch all exceptions (appropriate for usage in a cron). @type cron_mode: boolean """ try: try: result = self._get_sys_logs(cr, uid) except Exception: if cron_mode: # we don't want to see any stack trace in cron return False _logger.debug("Exception while sending a get logs messages", exc_info=1) raise osv.except_osv(_("Error"), _("Error during communication with the publisher warranty server.")) # old behavior based on res.log; now on mail.message, that is not necessarily installed IMD = self.pool['ir.model.data'] user = self.pool['res.users'].browse(cr, SUPERUSER_ID, SUPERUSER_ID) poster = IMD.xmlid_to_object(cr, SUPERUSER_ID, 'mail.group_all_employees', context=context) if not (poster and poster.exists()): if not user.exists(): return True poster = user for message in result["messages"]: try: poster.message_post(body=message, subtype='mt_comment', partner_ids=[user.partner_id.id]) except Exception: _logger.warning('Cannot send ping message', exc_info=True) except Exception: if cron_mode: return False # we don't want to see any stack trace in cron else: raise return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
##################################################################### # -*- coding: iso-8859-1 -*- # # # # Frets on Fire # # Copyright (C) 2006 Sami Kyöstilä # # # # This program is free software; you can redistribute it and/or # # modify it under the terms of the GNU General Public License # # as published by the Free Software Foundation; either version 2 # # of the License, or (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, write to the Free Software # # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # # MA 02110-1301, USA. # ##################################################################### import unittest from Audio import Audio class AudioTest(unittest.TestCase): def testOpen(self): a = Audio() assert a.open() a.close() if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
'use strict'; const common = require('../common.js'); const assert = require('assert'); const bench = common.createBenchmark(main, { n: [1e6], direction: ['start', 'end'], }); function main({ direction, n }) { const timersList = []; bench.start(); if (direction === 'start') { for (let i = 1; i <= n; i++) { timersList.push(setTimeout(cb, i)); } } else { for (let i = n; i > 0; i--) { timersList.push(setTimeout(cb, i)); } } bench.end(n); for (let j = 0; j < n; j++) { clearTimeout(timersList[j]); } } function cb() { assert.fail(`Timer ${this._idleTimeout} should not call callback`); }
javascript
github
https://github.com/nodejs/node
benchmark/timers/timers-insert-unpooled.js
from __future__ import print_function from LogAnalyzer import Test,TestResult import DataflashLog from math import sqrt class TestIMUMatch(Test): '''test for empty or near-empty logs''' def __init__(self): Test.__init__(self) self.name = "IMU Mismatch" def run(self, logdata, verbose): #tuning parameters: warn_threshold = .75 fail_threshold = 1.5 filter_tc = 5.0 self.result = TestResult() self.result.status = TestResult.StatusType.GOOD if ("IMU" in logdata.channels) and (not "IMU2" in logdata.channels): self.result.status = TestResult.StatusType.NA self.result.statusMessage = "No IMU2" return if (not "IMU" in logdata.channels) or (not "IMU2" in logdata.channels): self.result.status = TestResult.StatusType.UNKNOWN self.result.statusMessage = "No IMU log data" return imu1 = logdata.channels["IMU"] imu2 = logdata.channels["IMU2"] timeLabel = None for i in 'TimeMS','TimeUS','Time': if i in logdata.channels["GPS"]: timeLabel = i break imu1_timems = imu1[timeLabel].listData imu1_accx = imu1["AccX"].listData imu1_accy = imu1["AccY"].listData imu1_accz = imu1["AccZ"].listData imu2_timems = imu2[timeLabel].listData imu2_accx = imu2["AccX"].listData imu2_accy = imu2["AccY"].listData imu2_accz = imu2["AccZ"].listData imu_multiplier = 1.0E-3 if timeLabel == 'TimeUS': imu_multiplier = 1.0E-6 imu1 = [] imu2 = [] for i in range(len(imu1_timems)): imu1.append({ 't': imu1_timems[i][1]*imu_multiplier, 'x': imu1_accx[i][1], 'y': imu1_accy[i][1], 'z': imu1_accz[i][1]}) for i in range(len(imu2_timems)): imu2.append({ 't': imu2_timems[i][1]*imu_multiplier, 'x': imu2_accx[i][1], 'y': imu2_accy[i][1], 'z': imu2_accz[i][1]}) imu1.sort(key=lambda x: x['t']) imu2.sort(key=lambda x: x['t']) imu2_index = 0 last_t = None xdiff_filtered = 0 ydiff_filtered = 0 zdiff_filtered = 0 max_diff_filtered = 0 for i in range(len(imu1)): #find closest imu2 value t = imu1[i]['t'] dt = 0 if last_t is None else t-last_t dt=min(dt,.1) next_imu2 = None for i in range(imu2_index,len(imu2)): next_imu2 = imu2[i] imu2_index=i if next_imu2['t'] >= t: break prev_imu2 = imu2[imu2_index-1] closest_imu2 = next_imu2 if abs(next_imu2['t']-t)<abs(prev_imu2['t']-t) else prev_imu2 xdiff = imu1[i]['x']-closest_imu2['x'] ydiff = imu1[i]['y']-closest_imu2['y'] zdiff = imu1[i]['z']-closest_imu2['z'] xdiff_filtered += (xdiff-xdiff_filtered)*dt/filter_tc ydiff_filtered += (ydiff-ydiff_filtered)*dt/filter_tc zdiff_filtered += (zdiff-zdiff_filtered)*dt/filter_tc diff_filtered = sqrt(xdiff_filtered**2+ydiff_filtered**2+zdiff_filtered**2) max_diff_filtered = max(max_diff_filtered,diff_filtered) #print(max_diff_filtered) last_t = t if max_diff_filtered > fail_threshold: self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold) self.result.status = TestResult.StatusType.FAIL elif max_diff_filtered > warn_threshold: self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold) self.result.status = TestResult.StatusType.WARN else: self.result.statusMessage = "(Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold, fail_threshold)
unknown
codeparrot/codeparrot-clean
//// [tests/cases/conformance/classes/propertyMemberDeclarations/autoAccessor6.ts] //// //// [autoAccessor6.ts] class C1 { accessor a: any; } class C2 extends C1 { a = 1; } class C3 extends C1 { get a() { return super.a; } } //// [autoAccessor6.js] "use strict"; var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) { if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter"); if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it"); return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver); }; var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) { if (kind === "m") throw new TypeError("Private method is not writable"); if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter"); if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it"); return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value; }; var _C1_a_accessor_storage; class C1 { constructor() { _C1_a_accessor_storage.set(this, void 0); } get a() { return __classPrivateFieldGet(this, _C1_a_accessor_storage, "f"); } set a(value) { __classPrivateFieldSet(this, _C1_a_accessor_storage, value, "f"); } } _C1_a_accessor_storage = new WeakMap(); class C2 extends C1 { constructor() { super(...arguments); this.a = 1; } } class C3 extends C1 { get a() { return super.a; } }
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/autoAccessor6(target=es2015,usedefineforclassfields=false).js
#!/usr/bin/env python """ SoftLayer external inventory script. The SoftLayer Python API client is required. Use `pip install softlayer` to install it. You have a few different options for configuring your username and api_key. You can pass environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to ~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: - https://softlayer-python.readthedocs.org/en/latest/config_file.html The SoftLayer Python client has a built in command for saving this configuration file via the command `sl config setup`. """ # Copyright (C) 2014 AJ Bourg <aj@ajbourg.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # I found the structure of the ec2.py script very helpful as an example # as I put this together. Thanks to whoever wrote that script! # import SoftLayer import re import argparse import itertools try: import json except: import simplejson as json class SoftLayerInventory(object): common_items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'datacenter', 'tagReferences.tag.name', 'userData.value', ] vs_items = [ 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] hw_items = [ 'hardwareStatusId', 'processorPhysicalCoreAmount', 'memoryCapacity', ] def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): '''Main path''' self.inventory = self._empty_inventory() self.parse_options() if self.args.list: self.get_all_servers() print(self.json_format_dict(self.inventory, True)) elif self.args.host: self.get_virtual_servers() print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) def push(self, my_dict, key, element): '''Push an element onto an array that may not have been defined in the dict''' if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def parse_options(self): '''Parse all the arguments from the CLI''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') parser.add_argument('--list', action='store_true', default=False, help='List instances (default: False)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() def json_format_dict(self, data, pretty=False): '''Converts a dict to a JSON object and dumps it as a formatted string''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def process_instance(self, instance, instance_type="virtual"): '''Populate the inventory dictionary with any instance information''' # only want active instances if 'status' in instance and instance['status']['name'] != 'Active': return # and powered on instances if 'powerState' in instance and instance['powerState']['name'] != 'Running': return # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: return # if there's no IP address, we can't reach it if 'primaryIpAddress' not in instance: return instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' dest = instance['primaryIpAddress'] self.inventory["_meta"]["hostvars"][dest] = instance # Inventory: group by memory if 'maxMemory' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) elif 'memoryCapacity' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) # Inventory: group by cpu count if 'maxCpu' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) elif 'processorPhysicalCoreAmount' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) # Inventory: group by datacenter self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) # Inventory: group by hostname self.push(self.inventory, self.to_safe(instance['hostname']), dest) # Inventory: group by FQDN self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) # Inventory: group by domain self.push(self.inventory, self.to_safe(instance['domain']), dest) # Inventory: group by type (hardware/virtual) self.push(self.inventory, instance_type, dest) # Inventory: group by tag for tag in instance['tagReferences']: self.push(self.inventory, tag['tag']['name'], dest) def get_virtual_servers(self): '''Get all the CCI instances''' vs = SoftLayer.VSManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) instances = vs.list_instances(mask=mask) for instance in instances: self.process_instance(instance) def get_physical_servers(self): '''Get all the hardware instances''' hw = SoftLayer.HardwareManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) instances = hw.list_hardware(mask=mask) for instance in instances: self.process_instance(instance, 'hardware') def get_all_servers(self): self.client = SoftLayer.Client() self.get_virtual_servers() self.get_physical_servers() SoftLayerInventory()
unknown
codeparrot/codeparrot-clean
"""Flexible enumeration of C types.""" from __future__ import division, print_function from Enumeration import * # TODO: # - struct improvements (flexible arrays, packed & # unpacked, alignment) # - objective-c qualified id # - anonymous / transparent unions # - VLAs # - block types # - K&R functions # - pass arguments of different types (test extension, transparent union) # - varargs ### # Actual type types class Type(object): def isBitField(self): return False def isPaddingBitField(self): return False def getTypeName(self, printer): name = 'T%d' % len(printer.types) typedef = self.getTypedefDef(name, printer) printer.addDeclaration(typedef) return name class BuiltinType(Type): def __init__(self, name, size, bitFieldSize=None): self.name = name self.size = size self.bitFieldSize = bitFieldSize def isBitField(self): return self.bitFieldSize is not None def isPaddingBitField(self): return self.bitFieldSize is 0 def getBitFieldSize(self): assert self.isBitField() return self.bitFieldSize def getTypeName(self, printer): return self.name def sizeof(self): return self.size def __str__(self): return self.name class EnumType(Type): unique_id = 0 def __init__(self, index, enumerators): self.index = index self.enumerators = enumerators self.unique_id = self.__class__.unique_id self.__class__.unique_id += 1 def getEnumerators(self): result = '' for i, init in enumerate(self.enumerators): if i > 0: result = result + ', ' result = result + 'enum%dval%d_%d' % (self.index, i, self.unique_id) if init: result = result + ' = %s' % (init) return result def __str__(self): return 'enum { %s }' % (self.getEnumerators()) def getTypedefDef(self, name, printer): return 'typedef enum %s { %s } %s;'%(name, self.getEnumerators(), name) class RecordType(Type): def __init__(self, index, isUnion, fields): self.index = index self.isUnion = isUnion self.fields = fields self.name = None def __str__(self): def getField(t): if t.isBitField(): return "%s : %d;" % (t, t.getBitFieldSize()) else: return "%s;" % t return '%s { %s }'%(('struct','union')[self.isUnion], ' '.join(map(getField, self.fields))) def getTypedefDef(self, name, printer): def getField(it): i, t = it if t.isBitField(): if t.isPaddingBitField(): return '%s : 0;'%(printer.getTypeName(t),) else: return '%s field%d : %d;'%(printer.getTypeName(t),i, t.getBitFieldSize()) else: return '%s field%d;'%(printer.getTypeName(t),i) fields = [getField(f) for f in enumerate(self.fields)] # Name the struct for more readable LLVM IR. return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion], name, ' '.join(fields), name) class ArrayType(Type): def __init__(self, index, isVector, elementType, size): if isVector: # Note that for vectors, this is the size in bytes. assert size > 0 else: assert size is None or size >= 0 self.index = index self.isVector = isVector self.elementType = elementType self.size = size if isVector: eltSize = self.elementType.sizeof() assert not (self.size % eltSize) self.numElements = self.size // eltSize else: self.numElements = self.size def __str__(self): if self.isVector: return 'vector (%s)[%d]'%(self.elementType,self.size) elif self.size is not None: return '(%s)[%d]'%(self.elementType,self.size) else: return '(%s)[]'%(self.elementType,) def getTypedefDef(self, name, printer): elementName = printer.getTypeName(self.elementType) if self.isVector: return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName, name, self.size) else: if self.size is None: sizeStr = '' else: sizeStr = str(self.size) return 'typedef %s %s[%s];'%(elementName, name, sizeStr) class ComplexType(Type): def __init__(self, index, elementType): self.index = index self.elementType = elementType def __str__(self): return '_Complex (%s)'%(self.elementType) def getTypedefDef(self, name, printer): return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name) class FunctionType(Type): def __init__(self, index, returnType, argTypes): self.index = index self.returnType = returnType self.argTypes = argTypes def __str__(self): if self.returnType is None: rt = 'void' else: rt = str(self.returnType) if not self.argTypes: at = 'void' else: at = ', '.join(map(str, self.argTypes)) return '%s (*)(%s)'%(rt, at) def getTypedefDef(self, name, printer): if self.returnType is None: rt = 'void' else: rt = str(self.returnType) if not self.argTypes: at = 'void' else: at = ', '.join(map(str, self.argTypes)) return 'typedef %s (*%s)(%s);'%(rt, name, at) ### # Type enumerators class TypeGenerator(object): def __init__(self): self.cache = {} def setCardinality(self): abstract def get(self, N): T = self.cache.get(N) if T is None: assert 0 <= N < self.cardinality T = self.cache[N] = self.generateType(N) return T def generateType(self, N): abstract class FixedTypeGenerator(TypeGenerator): def __init__(self, types): TypeGenerator.__init__(self) self.types = types self.setCardinality() def setCardinality(self): self.cardinality = len(self.types) def generateType(self, N): return self.types[N] # Factorial def fact(n): result = 1 while n > 0: result = result * n n = n - 1 return result # Compute the number of combinations (n choose k) def num_combinations(n, k): return fact(n) // (fact(k) * fact(n - k)) # Enumerate the combinations choosing k elements from the list of values def combinations(values, k): # From ActiveState Recipe 190465: Generator for permutations, # combinations, selections of a sequence if k==0: yield [] else: for i in range(len(values)-k+1): for cc in combinations(values[i+1:],k-1): yield [values[i]]+cc class EnumTypeGenerator(TypeGenerator): def __init__(self, values, minEnumerators, maxEnumerators): TypeGenerator.__init__(self) self.values = values self.minEnumerators = minEnumerators self.maxEnumerators = maxEnumerators self.setCardinality() def setCardinality(self): self.cardinality = 0 for num in range(self.minEnumerators, self.maxEnumerators + 1): self.cardinality += num_combinations(len(self.values), num) def generateType(self, n): # Figure out the number of enumerators in this type numEnumerators = self.minEnumerators valuesCovered = 0 while numEnumerators < self.maxEnumerators: comb = num_combinations(len(self.values), numEnumerators) if valuesCovered + comb > n: break numEnumerators = numEnumerators + 1 valuesCovered += comb # Find the requested combination of enumerators and build a # type from it. i = 0 for enumerators in combinations(self.values, numEnumerators): if i == n - valuesCovered: return EnumType(n, enumerators) i = i + 1 assert False class ComplexTypeGenerator(TypeGenerator): def __init__(self, typeGen): TypeGenerator.__init__(self) self.typeGen = typeGen self.setCardinality() def setCardinality(self): self.cardinality = self.typeGen.cardinality def generateType(self, N): return ComplexType(N, self.typeGen.get(N)) class VectorTypeGenerator(TypeGenerator): def __init__(self, typeGen, sizes): TypeGenerator.__init__(self) self.typeGen = typeGen self.sizes = tuple(map(int,sizes)) self.setCardinality() def setCardinality(self): self.cardinality = len(self.sizes)*self.typeGen.cardinality def generateType(self, N): S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality) return ArrayType(N, True, self.typeGen.get(T), self.sizes[S]) class FixedArrayTypeGenerator(TypeGenerator): def __init__(self, typeGen, sizes): TypeGenerator.__init__(self) self.typeGen = typeGen self.sizes = tuple(size) self.setCardinality() def setCardinality(self): self.cardinality = len(self.sizes)*self.typeGen.cardinality def generateType(self, N): S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality) return ArrayType(N, false, self.typeGen.get(T), self.sizes[S]) class ArrayTypeGenerator(TypeGenerator): def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False): TypeGenerator.__init__(self) self.typeGen = typeGen self.useIncomplete = useIncomplete self.useZero = useZero self.maxSize = int(maxSize) self.W = useIncomplete + useZero + self.maxSize self.setCardinality() def setCardinality(self): self.cardinality = self.W * self.typeGen.cardinality def generateType(self, N): S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality) if self.useIncomplete: if S==0: size = None S = None else: S = S - 1 if S is not None: if self.useZero: size = S else: size = S + 1 return ArrayType(N, False, self.typeGen.get(T), size) class RecordTypeGenerator(TypeGenerator): def __init__(self, typeGen, useUnion, maxSize): TypeGenerator.__init__(self) self.typeGen = typeGen self.useUnion = bool(useUnion) self.maxSize = int(maxSize) self.setCardinality() def setCardinality(self): M = 1 + self.useUnion if self.maxSize is aleph0: S = aleph0 * self.typeGen.cardinality else: S = 0 for i in range(self.maxSize+1): S += M * (self.typeGen.cardinality ** i) self.cardinality = S def generateType(self, N): isUnion,I = False,N if self.useUnion: isUnion,I = (I&1),I>>1 fields = [self.typeGen.get(f) for f in getNthTuple(I,self.maxSize,self.typeGen.cardinality)] return RecordType(N, isUnion, fields) class FunctionTypeGenerator(TypeGenerator): def __init__(self, typeGen, useReturn, maxSize): TypeGenerator.__init__(self) self.typeGen = typeGen self.useReturn = useReturn self.maxSize = maxSize self.setCardinality() def setCardinality(self): if self.maxSize is aleph0: S = aleph0 * self.typeGen.cardinality() elif self.useReturn: S = 0 for i in range(1,self.maxSize+1+1): S += self.typeGen.cardinality ** i else: S = 0 for i in range(self.maxSize+1): S += self.typeGen.cardinality ** i self.cardinality = S def generateType(self, N): if self.useReturn: # Skip the empty tuple argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality) retIndex,argIndices = argIndices[0],argIndices[1:] retTy = self.typeGen.get(retIndex) else: retTy = None argIndices = getNthTuple(N, self.maxSize, self.typeGen.cardinality) args = [self.typeGen.get(i) for i in argIndices] return FunctionType(N, retTy, args) class AnyTypeGenerator(TypeGenerator): def __init__(self): TypeGenerator.__init__(self) self.generators = [] self.bounds = [] self.setCardinality() self._cardinality = None def getCardinality(self): if self._cardinality is None: return aleph0 else: return self._cardinality def setCardinality(self): self.bounds = [g.cardinality for g in self.generators] self._cardinality = sum(self.bounds) cardinality = property(getCardinality, None) def addGenerator(self, g): self.generators.append(g) for i in range(100): prev = self._cardinality self._cardinality = None for g in self.generators: g.setCardinality() self.setCardinality() if (self._cardinality is aleph0) or prev==self._cardinality: break else: raise RuntimeError("Infinite loop in setting cardinality") def generateType(self, N): index,M = getNthPairVariableBounds(N, self.bounds) return self.generators[index].get(M) def test(): fbtg = FixedTypeGenerator([BuiltinType('char', 4), BuiltinType('char', 4, 0), BuiltinType('int', 4, 5)]) fields1 = AnyTypeGenerator() fields1.addGenerator( fbtg ) fields0 = AnyTypeGenerator() fields0.addGenerator( fbtg ) # fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) ) btg = FixedTypeGenerator([BuiltinType('char', 4), BuiltinType('int', 4)]) etg = EnumTypeGenerator([None, '-1', '1', '1u'], 0, 3) atg = AnyTypeGenerator() atg.addGenerator( btg ) atg.addGenerator( RecordTypeGenerator(fields0, False, 4) ) atg.addGenerator( etg ) print('Cardinality:',atg.cardinality) for i in range(100): if i == atg.cardinality: try: atg.get(i) raise RuntimeError("Cardinality was wrong") except AssertionError: break print('%4d: %s'%(i, atg.get(i))) if __name__ == '__main__': test()
unknown
codeparrot/codeparrot-clean
""" Support for RFXtrx components. For more details about this component, please refer to the documentation at https://home-assistant.io/components/rfxtrx/ """ import asyncio import logging from collections import OrderedDict import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.util import slugify from homeassistant.const import ( EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, ATTR_ENTITY_ID, TEMP_CELSIUS, CONF_DEVICE_CLASS, CONF_COMMAND_ON, CONF_COMMAND_OFF ) from homeassistant.helpers.entity import Entity REQUIREMENTS = ['pyRFXtrx==0.20.1'] DOMAIN = 'rfxtrx' DEFAULT_SIGNAL_REPETITIONS = 1 ATTR_AUTOMATIC_ADD = 'automatic_add' ATTR_DEVICE = 'device' ATTR_DEBUG = 'debug' ATTR_STATE = 'state' ATTR_NAME = 'name' ATTR_FIREEVENT = 'fire_event' ATTR_DATA_TYPE = 'data_type' ATTR_DATA_BITS = 'data_bits' ATTR_DUMMY = 'dummy' ATTR_OFF_DELAY = 'off_delay' CONF_SIGNAL_REPETITIONS = 'signal_repetitions' CONF_DEVICES = 'devices' EVENT_BUTTON_PRESSED = 'button_pressed' DATA_TYPES = OrderedDict([ ('Temperature', TEMP_CELSIUS), ('Temperature2', TEMP_CELSIUS), ('Humidity', '%'), ('Barometer', ''), ('Wind direction', ''), ('Rain rate', ''), ('Energy usage', 'W'), ('Total usage', 'W'), ('Sound', ''), ('Sensor Status', ''), ('Counter value', ''), ('UV', 'uv')]) RECEIVED_EVT_SUBSCRIBERS = [] RFX_DEVICES = {} _LOGGER = logging.getLogger(__name__) RFXOBJECT = 'rfxobject' def _valid_device(value, device_type): """Validate a dictionary of devices definitions.""" config = OrderedDict() for key, device in value.items(): # Still accept old configuration if 'packetid' in device.keys(): msg = 'You are using an outdated configuration of the rfxtrx ' +\ 'device, {}.'.format(key) +\ ' Your new config should be:\n {}: \n name: {}'\ .format(device.get('packetid'), device.get(ATTR_NAME, 'deivce_name')) _LOGGER.warning(msg) key = device.get('packetid') device.pop('packetid') key = str(key) if not len(key) % 2 == 0: key = '0' + key if device_type == 'sensor': config[key] = DEVICE_SCHEMA_SENSOR(device) elif device_type == 'binary_sensor': config[key] = DEVICE_SCHEMA_BINARYSENSOR(device) elif device_type == 'light_switch': config[key] = DEVICE_SCHEMA(device) else: raise vol.Invalid('Rfxtrx device is invalid') if not config[key][ATTR_NAME]: config[key][ATTR_NAME] = key return config def valid_sensor(value): """Validate sensor configuration.""" return _valid_device(value, "sensor") def valid_binary_sensor(value): """Validate binary sensor configuration.""" return _valid_device(value, "binary_sensor") def _valid_light_switch(value): return _valid_device(value, "light_switch") DEVICE_SCHEMA = vol.Schema({ vol.Required(ATTR_NAME): cv.string, vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean, }) DEVICE_SCHEMA_SENSOR = vol.Schema({ vol.Optional(ATTR_NAME, default=None): cv.string, vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean, vol.Optional(ATTR_DATA_TYPE, default=[]): vol.All(cv.ensure_list, [vol.In(DATA_TYPES.keys())]), }) DEVICE_SCHEMA_BINARYSENSOR = vol.Schema({ vol.Optional(ATTR_NAME, default=None): cv.string, vol.Optional(CONF_DEVICE_CLASS, default=None): cv.string, vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean, vol.Optional(ATTR_OFF_DELAY, default=None): vol.Any(cv.time_period, cv.positive_timedelta), vol.Optional(ATTR_DATA_BITS, default=None): cv.positive_int, vol.Optional(CONF_COMMAND_ON, default=None): cv.byte, vol.Optional(CONF_COMMAND_OFF, default=None): cv.byte }) DEFAULT_SCHEMA = vol.Schema({ vol.Required("platform"): DOMAIN, vol.Optional(CONF_DEVICES, default={}): vol.All(dict, _valid_light_switch), vol.Optional(ATTR_AUTOMATIC_ADD, default=False): cv.boolean, vol.Optional(CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS): vol.Coerce(int), }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(ATTR_DEVICE): cv.string, vol.Optional(ATTR_DEBUG, default=False): cv.boolean, vol.Optional(ATTR_DUMMY, default=False): cv.boolean, }), }, extra=vol.ALLOW_EXTRA) def setup(hass, config): """Set up the RFXtrx component.""" # Declare the Handle event def handle_receive(event): """Handle revieved messgaes from RFXtrx gateway.""" # Log RFXCOM event if not event.device.id_string: return _LOGGER.debug("Receive RFXCOM event from " "(Device_id: %s Class: %s Sub: %s, Pkt_id: %s)", slugify(event.device.id_string.lower()), event.device.__class__.__name__, event.device.subtype, "".join("{0:02x}".format(x) for x in event.data)) # Callback to HA registered components. for subscriber in RECEIVED_EVT_SUBSCRIBERS: subscriber(event) # Try to load the RFXtrx module. import RFXtrx as rfxtrxmod device = config[DOMAIN][ATTR_DEVICE] debug = config[DOMAIN][ATTR_DEBUG] dummy_connection = config[DOMAIN][ATTR_DUMMY] if dummy_connection: hass.data[RFXOBJECT] =\ rfxtrxmod.Connect(device, None, debug=debug, transport_protocol=rfxtrxmod.DummyTransport2) else: hass.data[RFXOBJECT] = rfxtrxmod.Connect(device, None, debug=debug) def _start_rfxtrx(event): hass.data[RFXOBJECT].event_callback = handle_receive hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_rfxtrx) def _shutdown_rfxtrx(event): """Close connection with RFXtrx.""" hass.data[RFXOBJECT].close_connection() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx) return True def get_rfx_object(packetid): """Return the RFXObject with the packetid.""" import RFXtrx as rfxtrxmod try: binarypacket = bytearray.fromhex(packetid) except ValueError: return None pkt = rfxtrxmod.lowlevel.parse(binarypacket) if pkt is None: return None if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket): obj = rfxtrxmod.SensorEvent(pkt) elif isinstance(pkt, rfxtrxmod.lowlevel.Status): obj = rfxtrxmod.StatusEvent(pkt) else: obj = rfxtrxmod.ControlEvent(pkt) return obj def get_pt2262_deviceid(device_id, nb_data_bits): """Extract and return the address bits from a Lighting4/PT2262 packet.""" import binascii try: data = bytearray.fromhex(device_id) except ValueError: return None mask = 0xFF & ~((1 << nb_data_bits) - 1) data[len(data)-1] &= mask return binascii.hexlify(data) def get_pt2262_cmd(device_id, data_bits): """Extract and return the data bits from a Lighting4/PT2262 packet.""" try: data = bytearray.fromhex(device_id) except ValueError: return None mask = 0xFF & ((1 << data_bits) - 1) return hex(data[-1] & mask) # pylint: disable=unused-variable def get_pt2262_device(device_id): """Look for the device which id matches the given device_id parameter.""" for dev_id, device in RFX_DEVICES.items(): if (hasattr(device, 'is_lighting4') and device.masked_id == get_pt2262_deviceid(device_id, device.data_bits)): _LOGGER.info("rfxtrx: found matching device %s for %s", device_id, device.masked_id) return device return None # pylint: disable=unused-variable def find_possible_pt2262_device(device_id): """Look for the device which id matches the given device_id parameter.""" for dev_id, device in RFX_DEVICES.items(): if hasattr(device, 'is_lighting4') and len(dev_id) == len(device_id): size = None for i in range(0, len(dev_id)): if dev_id[i] != device_id[i]: break size = i if size is not None: size = len(dev_id) - size - 1 _LOGGER.info("rfxtrx: found possible device %s for %s " "with the following configuration:\n" "data_bits=%d\n" "command_on=0x%s\n" "command_off=0x%s\n", device_id, dev_id, size * 4, dev_id[-size:], device_id[-size:]) return device return None def get_devices_from_config(config, device): """Read rfxtrx configuration.""" signal_repetitions = config[CONF_SIGNAL_REPETITIONS] devices = [] for packet_id, entity_info in config[CONF_DEVICES].items(): event = get_rfx_object(packet_id) if event is None: _LOGGER.error("Invalid device: %s", packet_id) continue device_id = slugify(event.device.id_string.lower()) if device_id in RFX_DEVICES: continue _LOGGER.info("Add %s rfxtrx", entity_info[ATTR_NAME]) # Check if i must fire event fire_event = entity_info[ATTR_FIREEVENT] datas = {ATTR_STATE: False, ATTR_FIREEVENT: fire_event} new_device = device(entity_info[ATTR_NAME], event, datas, signal_repetitions) RFX_DEVICES[device_id] = new_device devices.append(new_device) return devices def get_new_device(event, config, device): """Add entity if not exist and the automatic_add is True.""" device_id = slugify(event.device.id_string.lower()) if device_id in RFX_DEVICES: return if not config[ATTR_AUTOMATIC_ADD]: return pkt_id = "".join("{0:02x}".format(x) for x in event.data) _LOGGER.info( "Automatic add %s rfxtrx device (Class: %s Sub: %s Packet_id: %s)", device_id, event.device.__class__.__name__, event.device.subtype, pkt_id ) datas = {ATTR_STATE: False, ATTR_FIREEVENT: False} signal_repetitions = config[CONF_SIGNAL_REPETITIONS] new_device = device(pkt_id, event, datas, signal_repetitions) RFX_DEVICES[device_id] = new_device return new_device def apply_received_command(event): """Apply command from rfxtrx.""" device_id = slugify(event.device.id_string.lower()) # Check if entity exists or previously added automatically if device_id not in RFX_DEVICES: return _LOGGER.debug( "Device_id: %s device_update. Command: %s", device_id, event.values['Command'] ) if event.values['Command'] == 'On'\ or event.values['Command'] == 'Off': # Update the rfxtrx device state is_on = event.values['Command'] == 'On' RFX_DEVICES[device_id].update_state(is_on) elif hasattr(RFX_DEVICES[device_id], 'brightness')\ and event.values['Command'] == 'Set level': _brightness = (event.values['Dim level'] * 255 // 100) # Update the rfxtrx device state is_on = _brightness > 0 RFX_DEVICES[device_id].update_state(is_on, _brightness) # Fire event if RFX_DEVICES[device_id].should_fire_event: RFX_DEVICES[device_id].hass.bus.fire( EVENT_BUTTON_PRESSED, { ATTR_ENTITY_ID: RFX_DEVICES[device_id].entity_id, ATTR_STATE: event.values['Command'].lower() } ) _LOGGER.info( "Rfxtrx fired event: (event_type: %s, %s: %s, %s: %s)", EVENT_BUTTON_PRESSED, ATTR_ENTITY_ID, RFX_DEVICES[device_id].entity_id, ATTR_STATE, event.values['Command'].lower() ) class RfxtrxDevice(Entity): """Represents a Rfxtrx device. Contains the common logic for Rfxtrx lights and switches. """ def __init__(self, name, event, datas, signal_repetitions): """Initialize the device.""" self.signal_repetitions = signal_repetitions self._name = name self._event = event self._state = datas[ATTR_STATE] self._should_fire_event = datas[ATTR_FIREEVENT] self._brightness = 0 self.added_to_hass = False @asyncio.coroutine def async_added_to_hass(self): """Subscribe RFXtrx events.""" self.added_to_hass = True @property def should_poll(self): """No polling needed for a RFXtrx switch.""" return False @property def name(self): """Return the name of the device if any.""" return self._name @property def should_fire_event(self): """Return is the device must fire event.""" return self._should_fire_event @property def is_on(self): """Return true if device is on.""" return self._state @property def assumed_state(self): """Return true if unable to access real state of entity.""" return True def turn_off(self, **kwargs): """Turn the device off.""" self._send_command("turn_off") def update_state(self, state, brightness=0): """Update det state of the device.""" self._state = state self._brightness = brightness if self.added_to_hass: self.schedule_update_ha_state() def _send_command(self, command, brightness=0): if not self._event: return if command == "turn_on": for _ in range(self.signal_repetitions): self._event.device.send_on(self.hass.data[RFXOBJECT] .transport) self._state = True elif command == "dim": for _ in range(self.signal_repetitions): self._event.device.send_dim(self.hass.data[RFXOBJECT] .transport, brightness) self._state = True elif command == 'turn_off': for _ in range(self.signal_repetitions): self._event.device.send_off(self.hass.data[RFXOBJECT] .transport) self._state = False self._brightness = 0 elif command == "roll_up": for _ in range(self.signal_repetitions): self._event.device.send_open(self.hass.data[RFXOBJECT] .transport) elif command == "roll_down": for _ in range(self.signal_repetitions): self._event.device.send_close(self.hass.data[RFXOBJECT] .transport) elif command == "stop_roll": for _ in range(self.signal_repetitions): self._event.device.send_stop(self.hass.data[RFXOBJECT] .transport) if self.added_to_hass: self.schedule_update_ha_state()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import unittest import all_tests import gdata.test_config as conf conf.options.set_value('runlive', 'true') conf.options.set_value('savecache', 'true') conf.options.set_value('clearcache', 'false') def suite(): return unittest.TestSuite((atom_tests.core_test.suite(),)) if __name__ == '__main__': unittest.TextTestRunner().run(all_tests.suite())
unknown
codeparrot/codeparrot-clean
# vim:sw=8:ts=8:et:nowrap import sys import urllib import urlparse import re import os.path import xml.sax import time import string import tempfile import mx.DateTime import traceback import miscfuncs toppath = miscfuncs.toppath pwcmdirs = miscfuncs.pwcmdirs tempfilename = miscfuncs.tempfilename from miscfuncs import NextAlphaString, AlphaStringToOrder from pullgluepages import ReplicatePatchToNewScrapedVersion, CmIndexFromNewPage # Pulls in all the debates, written answers, etc, glues them together, removes comments, # and stores them on the disk # we should put lordspages into cmpages as another directory, and move # all patch files into a set of directories parallel to the html and xml containing directories # index file which is created pwlordsindex = os.path.join(toppath, "lordindex.xml") # output directories (everything of one day in one file). pwlordspages = os.path.join(pwcmdirs, "lordspages") # this does the main loading and gluing of the initial day debate # files from which everything else feeds forward # gets the index file which we use to go through the pages class LoadLordsIndex(xml.sax.handler.ContentHandler): def __init__(self, lpwcmindex): self.res = [] if not os.path.isfile(lpwcmindex): return parser = xml.sax.make_parser() parser.setContentHandler(self) parser.parse(lpwcmindex) def startElement(self, name, attr): if name == "lordsdaydeb": ddr = (attr["date"], attr["url"], int(attr["type"])) if self.res and self.res[-1][0] == ddr[0]: if self.res[-1][2] > ddr[2]: return self.res.pop() self.res.append(ddr) # extract the table of contents from an index page def ExtractIndexContents(urlx, sdate): urx = urllib.urlopen(urlx) lktex = urx.read() urx.close() lktex = re.sub('^.*?<a name="contents">\s*</a>\s*(?s)', '', lktex) lktex = re.sub('^(.*?)<hr(?: ?/)?>.*$(?s)', r'\1', lktex) lktex = re.sub('<!--.*?-->(?s)', '', lktex) # 2012 summer recess Lords written answer handling #dmy = sdate[8:10]+sdate[5:7]+sdate[0:4] #m = re.search('<a href="(/pa/ld/ldtoday/writtens/' + dmy + '\.htm)">', lktex) #if m: # return ( ( m.group(1), 'Written Answers and Statements' ), ) # get the links res = re.findall('<h[23] align="?center"?><a href="([^"]*?\.htm)#[^"]*"(?: shape="rect")?>([^<]*)</a>\s*</h[23]>(?is)', lktex) if not res: res = re.findall('<p><a href\s*=\s*"([^"]*?\.htm)#[^"]*"><h3><center>((?:<!|[^<])*)(?:</center>|</h3>)+\s*</a></p>(?i)', lktex) if not res: print "no links found from day index page", urlx raise Exception, "no links" return res def GlueByNext(fout, urla, urlx, sdate): # put out the indexlink for comparison with the hansardindex file lt = time.gmtime() fout.write('<pagex url="%s" scrapedate="%s" scrapetime="%s"/>\n' % \ (urlx, time.strftime('%Y-%m-%d', lt), time.strftime('%X', lt))) if urla[0] == 'http://www.publications.parliament.uk/pa/ld200607/ldhansrd/text/61130-0001.htm': urla = [urla[0]] if urla[0] == 'http://www.publications.parliament.uk/pa/ld200607/ldhansrd/text/70125-0001.htm': urla = urla[2:] if urla[0] == 'http://www.publications.parliament.uk/pa/ld200506/ldhansrd/vo050517/text/50517-02.htm': urla.insert(0, 'http://www.publications.parliament.uk/pa/ld200506/ldhansrd/vo050517/text/50517-01.htm') if urla[0] == 'http://www.publications.parliament.uk/pa/ld200405/ldhansrd/vo041123/text/41123-02.htm': urla.insert(0, 'http://www.publications.parliament.uk/pa/ld200405/ldhansrd/vo041123/text/41123-01.htm') if urla[0] == 'http://www.publications.parliament.uk/pa/ld200708/ldhansrd/text/80722-0001.htm': urla = [urla[0]] if urla[0] == 'http://www.publications.parliament.uk/pa/ld200708/ldhansrd/text/81104-0001.htm': urla = [urla[0]] if urla[0] == 'http://www.publications.parliament.uk/pa/ld201011/ldhansrd/text/110119-0001.htm': urla = [urla[0]] # Missing header/footer, need to be able to find 2nd HTML page if urla[0] == 'http://www.publications.parliament.uk/pa/ld201213/ldhansrd/text/130327-0001.htm': urla.insert(1, 'http://www.publications.parliament.uk/pa/ld201213/ldhansrd/text/130327-0002.htm') # loop which scrapes through all the pages following the nextlinks # knocking off the known links as we go in case a "next page" is missing. while urla: url = urla[0] ur = urllib.urlopen(url) sr = ur.read() ur.close(); # write the marker telling us which page this comes from fout.write('<page url="' + url + '"/>\n') # To cope with post 2006-07-03, turn <body> into <hr> sr = re.sub('<body><notus', '<body><hr><notus', sr) #sr = re.sub('<body><br>', '<body><hr><br>', sr) sr = re.sub('<body><h3 align="center"', '<body><hr><h3 align="center"', sr) sr = re.sub('<body><p>', '<body><hr><p>', sr) # post 2006-09 sr = re.sub("</?mekonParaReplace[^>]*>", "", sr) sr = re.sub("</?mekonHrefReplace[^>]*>", "", sr) sr = re.sub("<meta[^>]*>", "", sr) sr = re.sub('<a name="([^"]*)" />', r'<a name="\1"></a>', sr) # Should be WriteCleanText like for Commons? sr = re.sub('(<a href="[^"]*&amp)(">.*?)(</a>)(;.*?)([ .,<])', r'\1\4\2\4\3\5', sr) sr = re.sub('<div id="maincontent1">\s+<notus', '<hr> <notus', sr) sr = re.sub('<div id="maincontent1">\s*<link[^>]*>\s*<notus', '<hr> <notus', sr) # New 2008-10... sr = re.sub('<div id="maincontent1">\s*<link[^>]*>\s*<h1', '<hr> <h1', sr) # New 2011-01... sr = re.sub('<div id="maincontent">(?:\s*<table.*?</table>)?(?s)', '', sr) if url in ('http://www.publications.parliament.uk/pa/ld200607/ldhansrd/text/71001w0001.htm', 'http://www.publications.parliament.uk/pa/ld201011/ldhansrd/text/110118-0001.htm', ): sr = re.sub('Daily Hansard</span></div>', 'Daily Hansard</span></div> <hr>', sr) # To deal with missing header/footer on this day. Might need removing if they come back? if url == 'http://www.publications.parliament.uk/pa/ld201213/ldhansrd/text/121105-wms0001.htm' or re.match('http://www.publications.parliament.uk/pa/ld201213/ldhansrd/text/130327', url): sr = re.sub('<body>', '<body> <hr>', sr) # For 2013-02-26, 2013-05-08, so far sr = re.sub('<div id="content-small"><!--end', '<div id="content-small"> <hr><!--end', sr) # 2012 summer recess hack #if re.match('http://www.publications.parliament.uk/pa/ld/ldtoday/writtens/..0[78]2012\.htm$', url): # sr = sr.replace('<div class="hansardContent">', '<hr><a name="column_WA0">').replace('<hr/>', '<hr>') # post 2008-03, stupid duplication of <b>s sr = re.sub('<b>((?:<a name="[^"]*"></a>)*)<b>', '\\1<b>', sr) sr = re.sub('</b><!--[^>]*--></b>', '</b>', sr) # split by sections hrsections = re.split('<hr[^>]*>(?i)', sr) # this is the case for debates on 2003-03-13 page 30 # http://www.publications.parliament.uk/pa/cm200203/cmhansrd/vo030313/debtext/30313-32.htm if len(hrsections) == 1: # special case for the Grand committee proceedings on 2011-03-23 if url == 'http://www.publications.parliament.uk/pa/ld201011/ldhansrd/text/110323-gc0001.htm': miscfuncs.WriteCleanText(fout, sr, False) else: # print len(hrsections), 'page missing', url # fout.write('<UL><UL><UL></UL></UL></UL>\n') print "Bridging the empty page at %s" % url urla = urla[1:] continue # Lords Written Statements on 2006-07-05, for example, sadly if len(hrsections) == 2: miscfuncs.WriteCleanText(fout, hrsections[1], False) # write the body of the text for i in range(1, len(hrsections) - 1): miscfuncs.WriteCleanText(fout, hrsections[i], False) # find the lead on with the footer footer = hrsections[-1] # the files are sectioned by the <hr> tag into header, body and footer. nextsectionlink = re.findall('<\s*a\s+href\s*=\s*"?(.*?)"?\s*>next section</a>(?i)', footer) if len(nextsectionlink) > 1: raise Exception, "More than one Next Section!!!" if not nextsectionlink: urla = urla[1:] if urla: print "Bridging the missing next section link at %s" % url else: url = urlparse.urljoin(url, nextsectionlink[0]) # Specific case on 2011-02-23 if url == 'http://www.publications.parliament.uk/pa/ld201011/ldhansrd/text/110323-wms0001.htm': url = 'http://www.publications.parliament.uk/pa/ld201011/ldhansrd/text/110323-gc0001.htm' # this link is known if (len(urla) > 1) and (urla[1] == url): urla = urla[1:] # unknown link, either there's a gap in the urla's or a mistake. else: for uo in urla: if uo == url: print string.join(urla, "\n") print "\n\n" print url print "\n\n" raise Exception, "Next Section misses out the urla list" urla[0] = url pass #endwhile urla ############### # main function ############### def LordsPullGluePages(datefrom, dateto, bforcescrape): # make the output firectory if not os.path.isdir(pwlordspages): os.mkdir(pwlordspages) # load the index file previously made by createhansardindex clordsindex = LoadLordsIndex(pwlordsindex) # scan through the directory and make a mapping of all the copies for each lddaymap = { } for ldfile in os.listdir(pwlordspages): mnums = re.match("daylord(\d{4}-\d\d-\d\d)([a-z]*)\.html$", ldfile) if mnums: lddaymap.setdefault(mnums.group(1), []).append((AlphaStringToOrder(mnums.group(2)), mnums.group(2), ldfile)) elif os.path.isfile(os.path.join(pwlordspages, ldfile)): print "not recognized file:", ldfile, " in ", pwlordspages scrape = [] # Post 2010 election scraping done directly, not via index if dateto >= '2010-05-18': if datefrom > '2010-05-18': date = mx.DateTime.DateTimeFrom(datefrom) else: date = mx.DateTime.DateTimeFrom('2010-05-18') while date.date <= dateto and date < mx.DateTime.today(): for recordType, link in CmIndexFromNewPage(date, 'lords'): scrape.append((date.date, link, recordType)) date += mx.DateTime.DateTimeDelta(1) # loop through the index of each lord line. for dnu in clordsindex.res: # implement date range if dnu[0] < datefrom or dnu[0] > dateto: continue scrape.append(dnu) for dnu in scrape: # make the filename dgflatestalpha, dgflatest = "", None if dnu[0] in lddaymap: ldgf = max(lddaymap[dnu[0]]) dgflatestalpha = ldgf[1] dgflatest = os.path.join(pwlordspages, ldgf[2]) dgfnextalpha = NextAlphaString(dgflatestalpha) ldgfnext = 'daylord%s%s.html' % (dnu[0], dgfnextalpha) dgfnext = os.path.join(pwlordspages, ldgfnext) assert not dgflatest or os.path.isfile(dgflatest) assert not os.path.isfile(dgfnext) dgfnextstem = "%s%s" % (dnu[0], dgfnextalpha) dgflateststem = "%s%s" % (dnu[0], dgflatestalpha) # hansard index page urlx = dnu[1] # if not force scrape then we may choose to scrape it anyway # where the header doesn't match if not bforcescrape and dgflatest: fpgx = open(dgflatest, "r") pgx = fpgx.readline() fpgx.close() if pgx: pgx = re.findall('<pagex url="([^"]*)"[^/]*/>', pgx) if pgx: if pgx[0] == urlx: continue # make the message print dnu[0], (dgflatest and 'RE-scraping' or 'scraping'), re.sub(".*?ldhansrd/", "", urlx) # The different sections are often all run together # with the title of written answers in the middle of a page. icont = ExtractIndexContents(urlx, dnu[0]) # this gets the first link (the second [0][1] would be it's title.) urla = [ ] for iconti in icont: uo = urlparse.urljoin(urlx, iconti[0]) if (not urla) or (urla[-1] != uo): urla.append(uo) # now we take out the local pointer and start the gluing # we could check that all our links above get cleared. try: dtemp = open(tempfilename, "w") GlueByNext(dtemp, urla, urlx, dnu[0]) dtemp.close() except Exception, e: print e traceback.print_exc() print "Problem with %s, moving on" % dnu[0] continue # now we have to decide whether it's actually new and should be copied onto dgfnext. if dgflatest: # the removal of \r makes testing sizes unreliable -- : and os.path.getsize(tempfilename) == os.path.getsize(dgflatest): # load in as strings and check matching fdgflatest = open(dgflatest) sdgflatest = fdgflatest.readlines() fdgflatest.close() fdgfnext = open(tempfilename) sdgfnext = fdgfnext.readlines() fdgfnext.close() # first line contains the scrape date if sdgflatest[1:] == sdgfnext[1:]: print " matched with:", dgflatest continue ReplicatePatchToNewScrapedVersion('lordspages', dgflateststem, dgflatest, dgfnext, dgfnextstem) print dnu[0], (dgflatest and 'RE-scraped' or 'scraped'), re.sub(".*?cmpages/", "", dgfnext) os.rename(tempfilename, dgfnext)
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.unbatch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class UnbatchTest(test_base.DatasetTestBase, parameterized.TestCase): def testUnbatchWithUnknownRankInput(self): dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).apply(batching.unbatch()) self.assertDatasetProduces(dataset, range(4)) def testUnbatchScalarDataset(self): data = tuple([math_ops.range(10) for _ in range(3)]) data = dataset_ops.Dataset.from_tensor_slices(data) expected_types = (dtypes.int32,) * 3 data = data.batch(2) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) data = data.apply(batching.unbatch()) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)]) def testUnbatchDatasetWithStrings(self): data = tuple([math_ops.range(10) for _ in range(3)]) data = dataset_ops.Dataset.from_tensor_slices(data) data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z)) expected_types = (dtypes.int32, dtypes.string, dtypes.int32) data = data.batch(2) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) data = data.apply(batching.unbatch()) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) self.assertDatasetProduces( data, [(i, compat.as_bytes(str(i)), i) for i in range(10)]) def testUnbatchDatasetWithSparseTensor(self): st = sparse_tensor.SparseTensorValue( indices=[[i, i] for i in range(10)], values=list(range(10)), dense_shape=[10, 10]) data = dataset_ops.Dataset.from_tensors(st) data = data.apply(batching.unbatch()) data = data.batch(5) data = data.apply(batching.unbatch()) expected_output = [ sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10) ] self.assertDatasetProduces(data, expected_output=expected_output) def testUnbatchDatasetWithDenseAndSparseTensor(self): st = sparse_tensor.SparseTensorValue( indices=[[i, i] for i in range(10)], values=list(range(10)), dense_shape=[10, 10]) data = dataset_ops.Dataset.from_tensors((list(range(10)), st)) data = data.apply(batching.unbatch()) data = data.batch(5) data = data.apply(batching.unbatch()) expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10])) for i in range(10)] self.assertDatasetProduces(data, expected_output=expected_output) def testUnbatchSingleElementTupleDataset(self): data = tuple([(math_ops.range(10),) for _ in range(3)]) data = dataset_ops.Dataset.from_tensor_slices(data) expected_types = ((dtypes.int32,),) * 3 data = data.batch(2) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) data = data.apply(batching.unbatch()) self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data)) self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)]) def testUnbatchMultiElementTupleDataset(self): data = tuple([(math_ops.range(10 * i, 10 * i + 10), array_ops.fill([10], "hi")) for i in range(3)]) data = dataset_ops.Dataset.from_tensor_slices(data) expected_types = ((dtypes.int32, dtypes.string),) * 3 data = data.batch(2) self.assertAllEqual(expected_types, dataset_ops.get_legacy_output_types(data)) data = data.apply(batching.unbatch()) self.assertAllEqual(expected_types, dataset_ops.get_legacy_output_types(data)) self.assertDatasetProduces( data, [((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)]) def testUnbatchEmpty(self): data = dataset_ops.Dataset.from_tensors( (constant_op.constant([]), constant_op.constant([], shape=[0, 4]), constant_op.constant([], shape=[0, 4, 0]))) data = data.apply(batching.unbatch()) self.assertDatasetProduces(data, []) def testUnbatchStaticShapeMismatch(self): data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8), np.arange(9))) with self.assertRaises(ValueError): data.apply(batching.unbatch()) # Note: dynamic shape mismatch is graph specific test. @test_util.run_deprecated_v1 def testSkipEagerUnbatchDynamicShapeMismatch(self): ph1 = array_ops.placeholder(dtypes.int32, shape=[None]) ph2 = array_ops.placeholder(dtypes.int32, shape=None) data = dataset_ops.Dataset.from_tensors((ph1, ph2)) data = data.apply(batching.unbatch()) iterator = dataset_ops.make_initializable_iterator(data) next_element = iterator.get_next() with self.cached_session() as sess: # Mismatch in the 0th dimension. sess.run( iterator.initializer, feed_dict={ ph1: np.arange(7).astype(np.int32), ph2: np.arange(8).astype(np.int32) }) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element) # No 0th dimension (i.e. scalar value) for one component. sess.run( iterator.initializer, feed_dict={ ph1: np.arange(7).astype(np.int32), ph2: 7 }) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element) if __name__ == "__main__": test.main()
unknown
codeparrot/codeparrot-clean
"""A prompt is the input to the model. Prompt is often constructed from multiple components and prompt values. Prompt classes and functions make constructing and working with prompts easy. """ from typing import TYPE_CHECKING from langchain_core._import_utils import import_attr if TYPE_CHECKING: from langchain_core.prompts.base import ( BasePromptTemplate, aformat_document, format_document, ) from langchain_core.prompts.chat import ( AIMessagePromptTemplate, BaseChatPromptTemplate, ChatMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain_core.prompts.dict import DictPromptTemplate from langchain_core.prompts.few_shot import ( FewShotChatMessagePromptTemplate, FewShotPromptTemplate, ) from langchain_core.prompts.few_shot_with_templates import ( FewShotPromptWithTemplates, ) from langchain_core.prompts.loading import load_prompt from langchain_core.prompts.prompt import PromptTemplate from langchain_core.prompts.string import ( StringPromptTemplate, check_valid_template, get_template_variables, jinja2_formatter, validate_jinja2, ) __all__ = ( "AIMessagePromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "ChatMessagePromptTemplate", "ChatPromptTemplate", "DictPromptTemplate", "FewShotChatMessagePromptTemplate", "FewShotPromptTemplate", "FewShotPromptWithTemplates", "HumanMessagePromptTemplate", "MessagesPlaceholder", "PromptTemplate", "StringPromptTemplate", "SystemMessagePromptTemplate", "aformat_document", "check_valid_template", "format_document", "get_template_variables", "jinja2_formatter", "load_prompt", "validate_jinja2", ) _dynamic_imports = { "BasePromptTemplate": "base", "format_document": "base", "aformat_document": "base", "AIMessagePromptTemplate": "chat", "BaseChatPromptTemplate": "chat", "ChatMessagePromptTemplate": "chat", "ChatPromptTemplate": "chat", "DictPromptTemplate": "dict", "HumanMessagePromptTemplate": "chat", "MessagesPlaceholder": "chat", "SystemMessagePromptTemplate": "chat", "FewShotChatMessagePromptTemplate": "few_shot", "FewShotPromptTemplate": "few_shot", "FewShotPromptWithTemplates": "few_shot_with_templates", "load_prompt": "loading", "PromptTemplate": "prompt", "StringPromptTemplate": "string", "check_valid_template": "string", "get_template_variables": "string", "jinja2_formatter": "string", "validate_jinja2": "string", } def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) result = import_attr(attr_name, module_name, __spec__.parent) globals()[attr_name] = result return result def __dir__() -> list[str]: return list(__all__)
python
github
https://github.com/langchain-ai/langchain
libs/core/langchain_core/prompts/__init__.py
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: vertica_schema version_added: '2.0' short_description: Adds or removes Vertica database schema and roles. description: - Adds or removes Vertica database schema and, optionally, roles with schema access privileges. - A schema will not be removed until all the objects have been dropped. - In such a situation, if the module tries to remove the schema it will fail and only remove roles created for the schema if they have no dependencies. options: name: description: - Name of the schema to add or remove. required: true usage_roles: description: - Comma separated list of roles to create and grant usage access to the schema. aliases: ['usage_role'] required: false default: null create_roles: description: - Comma separated list of roles to create and grant usage and create access to the schema. aliases: ['create_role'] required: false default: null owner: description: - Name of the user to set as owner of the schema. required: false default: null state: description: - Whether to create C(present), or drop C(absent) a schema. required: false default: present choices: ['present', 'absent'] db: description: - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ - name: creating a new vertica schema vertica_schema: name=schema_name db=db_name state=present - name: creating a new schema with specific schema owner vertica_schema: name=schema_name owner=dbowner db=db_name state=present - name: creating a new schema with roles vertica_schema: name=schema_name create_roles=schema_name_all usage_roles=schema_name_ro,schema_name_rw db=db_name state=present """ try: import pyodbc except ImportError: pyodbc_found = False else: pyodbc_found = True class NotSupportedError(Exception): pass class CannotDropError(Exception): pass # module specific functions def get_schema_facts(cursor, schema=''): facts = {} cursor.execute(""" select schema_name, schema_owner, create_time from schemata where not is_system_schema and schema_name not in ('public', 'TxtIndex') and (? = '' or schema_name ilike ?) """, schema, schema) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.schema_name.lower()] = { 'name': row.schema_name, 'owner': row.schema_owner, 'create_time': str(row.create_time), 'usage_roles': [], 'create_roles': []} cursor.execute(""" select g.object_name as schema_name, r.name as role_name, lower(g.privileges_description) privileges_description from roles r join grants g on g.grantee_id = r.role_id and g.object_type='SCHEMA' and g.privileges_description like '%USAGE%' and g.grantee not in ('public', 'dbadmin') and (? = '' or g.object_name ilike ?) """, schema, schema) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: schema_key = row.schema_name.lower() if 'create' in row.privileges_description: facts[schema_key]['create_roles'].append(row.role_name) else: facts[schema_key]['usage_roles'].append(row.role_name) return facts def update_roles(schema_facts, cursor, schema, existing, required, create_existing, create_required): for role in set(existing + create_existing) - set(required + create_required): cursor.execute("drop role {0} cascade".format(role)) for role in set(create_existing) - set(create_required): cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) for role in set(required + create_required) - set(existing + create_existing): cursor.execute("create role {0}".format(role)) cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) for role in set(create_required) - set(create_existing): cursor.execute("grant create on schema {0} to {1}".format(schema, role)) def check(schema_facts, schema, usage_roles, create_roles, owner): schema_key = schema.lower() if schema_key not in schema_facts: return False if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): return False if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: return False if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: return False return True def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): schema_key = schema.lower() if schema_key not in schema_facts: query_fragments = ["create schema {0}".format(schema)] if owner: query_fragments.append("authorization {0}".format(owner)) cursor.execute(' '.join(query_fragments)) update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) schema_facts.update(get_schema_facts(cursor, schema)) return True else: changed = False if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): raise NotSupportedError(( "Changing schema owner is not supported. " "Current owner: {0}." ).format(schema_facts[schema_key]['owner'])) if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \ cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: update_roles(schema_facts, cursor, schema, schema_facts[schema_key]['usage_roles'], usage_roles, schema_facts[schema_key]['create_roles'], create_roles) changed = True if changed: schema_facts.update(get_schema_facts(cursor, schema)) return changed def absent(schema_facts, cursor, schema, usage_roles, create_roles): schema_key = schema.lower() if schema_key in schema_facts: update_roles(schema_facts, cursor, schema, schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) try: cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) except pyodbc.Error: raise CannotDropError("Dropping schema failed due to dependencies.") del schema_facts[schema_key] return True else: return False # module logic def main(): module = AnsibleModule( argument_spec=dict( schema=dict(required=True, aliases=['name']), usage_roles=dict(default=None, aliases=['usage_role']), create_roles=dict(default=None, aliases=['create_role']), owner=dict(default=None), state=dict(default='present', choices=['absent', 'present']), db=dict(default=None), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), login_password=dict(default=None), ), supports_check_mode = True) if not pyodbc_found: module.fail_json(msg="The python pyodbc module is required.") schema = module.params['schema'] usage_roles = [] if module.params['usage_roles']: usage_roles = module.params['usage_roles'].split(',') usage_roles = filter(None, usage_roles) create_roles = [] if module.params['create_roles']: create_roles = module.params['create_roles'].split(',') create_roles = filter(None, create_roles) owner = module.params['owner'] state = module.params['state'] db = '' if module.params['db']: db = module.params['db'] changed = False try: dsn = ( "Driver=Vertica;" "Server={0};" "Port={1};" "Database={2};" "User={3};" "Password={4};" "ConnectionLoadBalance={5}" ).format(module.params['cluster'], module.params['port'], db, module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() except Exception, e: module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: schema_facts = get_schema_facts(cursor) if module.check_mode: changed = not check(schema_facts, schema, usage_roles, create_roles, owner) elif state == 'absent': try: changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) except pyodbc.Error, e: module.fail_json(msg=str(e)) elif state == 'present': try: changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) except pyodbc.Error, e: module.fail_json(msg=str(e)) except NotSupportedError, e: module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) except CannotDropError, e: module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception, e: module.fail_json(msg=e) module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) # import ansible utilities from ansible.module_utils.basic import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
from datetime import datetime from django.db import models from django.utils.encoding import python_2_unicode_compatible # M2M described on one of the models @python_2_unicode_compatible class Person(models.Model): name = models.CharField(max_length=128) class Meta: ordering = ('name',) def __str__(self): return self.name @python_2_unicode_compatible class Group(models.Model): name = models.CharField(max_length=128) members = models.ManyToManyField(Person, through='Membership') custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom") nodefaultsnonulls = models.ManyToManyField(Person, through='TestNoDefaultsOrNulls', related_name="testnodefaultsnonulls") class Meta: ordering = ('name',) def __str__(self): return self.name @python_2_unicode_compatible class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) date_joined = models.DateTimeField(default=datetime.now) invite_reason = models.CharField(max_length=64, null=True) class Meta: ordering = ('date_joined', 'invite_reason', 'group') def __str__(self): return "%s is a member of %s" % (self.person.name, self.group.name) @python_2_unicode_compatible class CustomMembership(models.Model): person = models.ForeignKey( Person, models.CASCADE, db_column="custom_person_column", related_name="custom_person_related_name", ) group = models.ForeignKey(Group, models.CASCADE) weird_fk = models.ForeignKey(Membership, models.SET_NULL, null=True) date_joined = models.DateTimeField(default=datetime.now) def __str__(self): return "%s is a member of %s" % (self.person.name, self.group.name) class Meta: db_table = "test_table" class TestNoDefaultsOrNulls(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) nodefaultnonull = models.CharField(max_length=5) @python_2_unicode_compatible class PersonSelfRefM2M(models.Model): name = models.CharField(max_length=5) friends = models.ManyToManyField('self', through="Friendship", symmetrical=False) def __str__(self): return self.name class Friendship(models.Model): first = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_from_set") second = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_to_set") date_friended = models.DateTimeField() # Custom through link fields @python_2_unicode_compatible class Event(models.Model): title = models.CharField(max_length=50) invitees = models.ManyToManyField(Person, through='Invitation', through_fields=('event', 'invitee'), related_name='events_invited') def __str__(self): return self.title class Invitation(models.Model): event = models.ForeignKey(Event, models.CASCADE, related_name='invitations') # field order is deliberately inverted. the target field is "invitee". inviter = models.ForeignKey(Person, models.CASCADE, related_name='invitations_sent') invitee = models.ForeignKey(Person, models.CASCADE, related_name='invitations') @python_2_unicode_compatible class Employee(models.Model): name = models.CharField(max_length=5) subordinates = models.ManyToManyField('self', through="Relationship", through_fields=('source', 'target'), symmetrical=False) class Meta: ordering = ('pk',) def __str__(self): return self.name class Relationship(models.Model): # field order is deliberately inverted. another = models.ForeignKey(Employee, models.SET_NULL, related_name="rel_another_set", null=True) target = models.ForeignKey(Employee, models.CASCADE, related_name="rel_target_set") source = models.ForeignKey(Employee, models.CASCADE, related_name="rel_source_set") class Ingredient(models.Model): iname = models.CharField(max_length=20, unique=True) class Meta: ordering = ('iname',) class Recipe(models.Model): rname = models.CharField(max_length=20, unique=True) ingredients = models.ManyToManyField( Ingredient, through='RecipeIngredient', related_name='recipes', ) class Meta: ordering = ('rname',) class RecipeIngredient(models.Model): ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname') recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
unknown
codeparrot/codeparrot-clean
/* * Copyright 2025-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.build.autoconfigure; import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.VerificationException; import org.gradle.language.base.plugins.LifecycleBasePlugin; /** * Task to check the contents of a project's * {@code META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports} * file. * * @author Andy Wilkinson */ public abstract class CheckAutoConfigurationImports extends AutoConfigurationImportsTask { private FileCollection classpath = getProject().getObjects().fileCollection(); public CheckAutoConfigurationImports() { getOutputDirectory().convention(getProject().getLayout().getBuildDirectory().dir(getName())); setGroup(LifecycleBasePlugin.VERIFICATION_GROUP); } @Classpath public FileCollection getClasspath() { return this.classpath; } public void setClasspath(Object classpath) { this.classpath = getProject().getObjects().fileCollection().from(classpath); } @OutputDirectory public abstract DirectoryProperty getOutputDirectory(); @TaskAction void execute() { File importsFile = getSource().getSingleFile(); check(importsFile); } private void check(File importsFile) { List<String> imports = loadImports(); List<String> problems = new ArrayList<>(); for (String imported : imports) { File classFile = find(imported); if (classFile == null) { problems.add("'%s' was not found".formatted(imported)); } else if (!correctlyAnnotated(classFile)) { problems.add("'%s' is not annotated with @AutoConfiguration".formatted(imported)); } } List<String> sortedValues = new ArrayList<>(imports); Collections.sort(sortedValues); if (!sortedValues.equals(imports)) { File sortedOutputFile = getOutputDirectory().file("sorted-" + importsFile.getName()).get().getAsFile(); writeString(sortedOutputFile, sortedValues.stream().collect(Collectors.joining(System.lineSeparator())) + System.lineSeparator()); problems.add("Entries should be sorted alphabetically (expect content written to " + sortedOutputFile.getAbsolutePath() + ")"); } File outputFile = getOutputDirectory().file("failure-report.txt").get().getAsFile(); writeReport(importsFile, problems, outputFile); if (!problems.isEmpty()) { throw new VerificationException("%s check failed. See '%s' for details" .formatted(AutoConfigurationImportsTask.IMPORTS_FILE, outputFile)); } } private File find(String className) { for (File root : this.classpath.getFiles()) { String classFilePath = className.replace(".", "/") + ".class"; File classFile = new File(root, classFilePath); if (classFile.isFile()) { return classFile; } } return null; } private boolean correctlyAnnotated(File classFile) { return AutoConfigurationClass.of(classFile) != null; } private void writeReport(File importsFile, List<String> problems, File outputFile) { outputFile.getParentFile().mkdirs(); StringBuilder report = new StringBuilder(); if (!problems.isEmpty()) { report.append("Found problems in '%s':%n".formatted(importsFile)); problems.forEach((problem) -> report.append(" - %s%n".formatted(problem))); } writeString(outputFile, report.toString()); } private void writeString(File file, String content) { try { Files.writeString(file.toPath(), content); } catch (IOException ex) { throw new UncheckedIOException(ex); } } }
java
github
https://github.com/spring-projects/spring-boot
buildSrc/src/main/java/org/springframework/boot/build/autoconfigure/CheckAutoConfigurationImports.java
# schema: [opcode, ins, outs, memuses, gas] # # memuses are written as an array of (start, len) pairs; values less than # zero are taken as stackarg indices and values zero or greater are taken # as literals opcodes = { 0x00: ['STOP', 0, 0, 0], 0x01: ['ADD', 2, 1, 1], 0x02: ['MUL', 2, 1, 1], 0x03: ['SUB', 2, 1, 1], 0x04: ['DIV', 2, 1, 1], 0x05: ['SDIV', 2, 1, 1], 0x06: ['MOD', 2, 1, 1], 0x07: ['SMOD', 2, 1, 1], 0x08: ['ADDMOD', 3, 1, 1], 0x09: ['MULMOD', 3, 1, 1], 0x0a: ['EXP', 2, 1, 1], 0x0b: ['SIGNEXTEND', 2, 1, 1], 0x10: ['LT', 2, 1, 1], 0x11: ['GT', 2, 1, 1], 0x12: ['SLT', 2, 1, 1], 0x13: ['SGT', 2, 1, 1], 0x14: ['EQ', 2, 1, 1], 0x15: ['ISZERO', 1, 1, 1], 0x16: ['AND', 2, 1, 1], 0x17: ['OR', 2, 1, 1], 0x18: ['XOR', 2, 1, 1], 0x19: ['NOT', 1, 1, 1], 0x1a: ['BYTE', 2, 1, 1], 0x20: ['SHA3', 2, 1, 10], 0x30: ['ADDRESS', 0, 1, 1], 0x31: ['BALANCE', 1, 1, 20], 0x32: ['ORIGIN', 0, 1, 1], 0x33: ['CALLER', 0, 1, 1], 0x34: ['CALLVALUE', 0, 1, 1], 0x35: ['CALLDATALOAD', 1, 1, 1], 0x36: ['CALLDATASIZE', 0, 1, 1], 0x37: ['CALLDATACOPY', 3, 0, 1], 0x38: ['CODESIZE', 0, 1, 1], 0x39: ['CODECOPY', 3, 0, 1], 0x3a: ['GASPRICE', 0, 1, 1], 0x3b: ['EXTCODESIZE', 1, 1, 1], 0x3c: ['EXTCODECOPY', 4, 0, 1], 0x40: ['BLOCKHASH', 1, 1, 1], 0x41: ['COINBASE', 0, 1, 1], 0x42: ['TIMESTAMP', 0, 1, 1], 0x43: ['NUMBER', 0, 1, 1], 0x44: ['DIFFICULTY', 0, 1, 1], 0x45: ['GASLIMIT', 0, 1, 1], 0x50: ['POP', 1, 0, 1], 0x51: ['MLOAD', 1, 1, 1], 0x52: ['MSTORE', 2, 0, 1], 0x53: ['MSTORE8', 2, 0, 1], 0x54: ['SLOAD', 1, 1, 20], 0x55: ['SSTORE', 2, 0, 0], 0x56: ['JUMP', 1, 0, 1], 0x57: ['JUMPI', 2, 0, 1], 0x58: ['PC', 0, 1, 1], 0x59: ['MSIZE', 0, 1, 1], 0x5a: ['GAS', 0, 1, 1], 0x5b: ['JUMPDEST', 0, 0, 1], 0xa0: ['LOG0', 2, 0, 32], 0xa1: ['LOG1', 3, 0, 64], 0xa2: ['LOG2', 4, 0, 96], 0xa3: ['LOG3', 5, 0, 128], 0xa4: ['LOG4', 6, 0, 160], 0xf0: ['CREATE', 3, 1, 100], 0xf1: ['CALL', 7, 1, 20], 0xf2: ['CALLCODE', 7, 1, 20], 0xf3: ['RETURN', 2, 1, 1], 0xff: ['SUICIDE', 1, 1, 0], } for i in range(1, 33): opcodes[0x5f + i] = ['PUSH' + str(i), 0, 1, 1] for i in range(1, 17): opcodes[0x7f + i] = ['DUP' + str(i), i, i + 1, 1] opcodes[0x8f + i] = ['SWAP' + str(i), i + 1, i + 1, 1] reverse_opcodes = {} for o in opcodes: vars()[opcodes[o][0]] = opcodes[o] reverse_opcodes[opcodes[o][0]] = o # Non-opcode gas prices GDEFAULT = 1 GMEMORY = 1 GSTORAGEKILL = -100 GSTORAGEMOD = 100 GSTORAGEADD = 300 GEXPONENTBYTE = 1 # cost of EXP exponent per byte GCOPY = 1 # cost to copy one 32 byte word GCONTRACTBYTE = 5 # one byte of code in contract creation GTXCOST = 500 # TX BASE GAS COST GTXDATAZERO = 1 # TX DATA ZERO BYTE GAS COST GTXDATANONZERO = 5 # TX DATA NON ZERO BYTE GAS COST GSHA3WORD = 10 # Cost of SHA3 per word GSHA256WORD = 50 # Cost of SHA256 per word GRIPEMD160WORD = 50 # Cost of RIPEMD160 per word
unknown
codeparrot/codeparrot-clean
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_GPU_PRIM_HELPERS_H_ #define TENSORFLOW_CORE_KERNELS_GPU_PRIM_HELPERS_H_ #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU #include "xla/stream_executor/stream.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/gpu_prim.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/util/gpu_kernel_helper.h" namespace tensorflow { namespace detail { template <typename T> __global__ void RangeInitKernel(const T start, const T delta, const T size, T* out) { GPU_1D_KERNEL_LOOP(i, size) { out[i] = start + i * delta; } } // Initialize out with range start, start + delta, start + 2 * delta, ... template <typename T> Status RangeInit(const Eigen::GpuDevice& d, const T start, const T delta, const T size, T* out) { if (size == 0) return OkStatus(); GpuLaunchConfig config = GetGpuLaunchConfig(size, d); return GpuLaunchKernel(RangeInitKernel<T>, config.block_count, config.thread_per_block, 0, d.stream(), start, delta, size, out); } // Computes keys_out = sorted(keys_in), and indices_out = argsort(keys_in). // If keys_out is not required, it can be set to nullptr. // If indices_in is nullptr, the range of input indices [0, size) will be used. template <bool Descending, typename Tkey, typename Tindex> Status GpuRadixSortImpl(OpKernelContext* context, int size, const Tkey* keys_in, Tkey* keys_out, // Optional const Tindex* indices_in, // Optional Tindex* indices_out, int num_bits = sizeof(Tkey) * 8) { if (size == 0) return OkStatus(); if (num_bits == 0) { // Workaround for CUB failing when begin_bit = end_bit = 0 (e.g., when all // keys are 0, so no sorting is needed). se::Stream* stream = context->op_device_context()->stream(); if (keys_out) { // Copy keys_in to keys_out. size_t num_bytes = size * sizeof(Tkey); se::DeviceMemoryBase src(const_cast<Tkey*>(keys_in), num_bytes); se::DeviceMemoryBase dst(keys_out, num_bytes); TF_RETURN_IF_ERROR(stream->Memcpy(&dst, src, num_bytes)); } if (indices_in) { // Copy indices_in to indices_out. size_t num_bytes = size * sizeof(Tindex); se::DeviceMemoryBase src(const_cast<Tindex*>(indices_in), num_bytes); se::DeviceMemoryBase dst(indices_out, num_bytes); TF_RETURN_IF_ERROR(stream->Memcpy(&dst, src, num_bytes)); } else { // Set output indices to range. const Eigen::GpuDevice& device = context->eigen_device<Eigen::GpuDevice>(); TF_RETURN_IF_ERROR(detail::RangeInit(device, Tindex(0), Tindex(1), Tindex(size), indices_out)); } return OkStatus(); } // Allocate temporary inputs/outputs if necessary. Tensor tmp_indices_in; if (!indices_in) { TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<Tindex>::value, TensorShape({size}), &tmp_indices_in)); Tindex* mutable_indices_in = tmp_indices_in.flat<Tindex>().data(); indices_in = mutable_indices_in; const Eigen::GpuDevice& device = context->eigen_device<Eigen::GpuDevice>(); // Initialize indices_in to the input index range. TF_RETURN_IF_ERROR(detail::RangeInit(device, Tindex(0), Tindex(1), Tindex(size), mutable_indices_in)); } Tensor tmp_keys_out; if (!keys_out) { TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<Tkey>::value, TensorShape({size}), &tmp_keys_out)); keys_out = tmp_keys_out.flat<Tkey>().data(); } // Determine temporary device storage requirements. Tensor temp_storage; size_t temp_storage_bytes = 0; const auto& cu_stream = GetGpuStream(context); gpuError_t err; if constexpr (Descending) { err = gpuprim::DeviceRadixSort::SortPairsDescending( nullptr, temp_storage_bytes, keys_in, keys_out, indices_in, indices_out, size, /*begin_bit=*/0, /*end_bit=*/num_bits, cu_stream); } else { err = gpuprim::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, keys_in, keys_out, indices_in, indices_out, size, /*begin_bit=*/0, /*end_bit=*/num_bits, cu_stream); } if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceRadixSort::SortPairs to calculate " "temp_storage_bytes, status: ", cudaGetErrorString(err)); } // Allocate temporary storage. TF_RETURN_IF_ERROR(context->allocate_temp( DT_INT8, TensorShape({static_cast<int64_t>(temp_storage_bytes)}), &temp_storage)); // Sort indices by keys. if constexpr (Descending) { err = gpuprim::DeviceRadixSort::SortPairsDescending( temp_storage.flat<int8>().data(), temp_storage_bytes, keys_in, keys_out, indices_in, indices_out, size, /*begin_bit=*/0, /*end_bit=*/num_bits, cu_stream); } else { err = gpuprim::DeviceRadixSort::SortPairs( temp_storage.flat<int8>().data(), temp_storage_bytes, keys_in, keys_out, indices_in, indices_out, size, /*begin_bit=*/0, /*end_bit=*/num_bits, cu_stream); } if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceRadixSort::SortPairs, " "temp_storage_bytes: ", temp_storage_bytes, "status: ", cudaGetErrorString(err)); } return OkStatus(); } } // namespace detail template <typename Tkey, typename Tindex> Status GpuRadixSort(OpKernelContext* context, int size, const Tkey* keys_in, Tkey* keys_out, // Optional const Tindex* indices_in, // Optional Tindex* indices_out, int num_bits = sizeof(Tkey) * 8) { return detail::GpuRadixSortImpl</*Descending=*/false>( context, size, keys_in, keys_out, indices_in, indices_out, num_bits); } template <typename Tkey, typename Tindex> Status GpuRadixSortDescending(OpKernelContext* context, int size, const Tkey* keys_in, Tkey* keys_out, // Optional const Tindex* indices_in, // Optional Tindex* indices_out, int num_bits = sizeof(Tkey) * 8) { return detail::GpuRadixSortImpl</*Descending=*/true>( context, size, keys_in, keys_out, indices_in, indices_out, num_bits); } template <typename InputIteratorT, typename OutputIteratorT> Status GpuInclusivePrefixSum(OpKernelContext* context, int size, InputIteratorT input, OutputIteratorT output) { static_assert( !std::is_same<typename std::remove_reference<decltype(*input)>::type, bool>::value, "GpuInclusivePrefixSum does not work correct with booleans, please use " "TransformInputIterator to explicitly cast to an integer."); if (size == 0) return OkStatus(); const auto& cu_stream = GetGpuStream(context); size_t temp_storage_bytes; auto err = gpuprim::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, input, output, size, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceScan::InclusiveSum to calculate " "temp_storage_bytes, status: ", cudaGetErrorString(err)); } Tensor temp_storage; TF_RETURN_IF_ERROR(context->allocate_temp( DT_INT8, TensorShape({static_cast<int64_t>(temp_storage_bytes)}), &temp_storage)); err = gpuprim::DeviceScan::InclusiveSum(temp_storage.flat<int8>().data(), temp_storage_bytes, input, output, size, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceScan::InclusiveSum, " "temp_storage_bytes: ", temp_storage_bytes, ", status: ", cudaGetErrorString(err)); } return OkStatus(); } // Note that this behaves deterministically for repeat calls on the same device. template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReduceOp, typename T> Status GpuSegmentedReduce( OpKernelContext* context, int num_segments, ReduceOp reduce_op, const T& initial_value, InputIteratorT input, // [any] OffsetIteratorT segment_offsets, // [num_segments + 1] OutputIteratorT output) { // [num_segments] if (num_segments == 0) return OkStatus(); const auto& cu_stream = GetGpuStream(context); size_t temp_storage_bytes; auto err = gpuprim::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, input, output, num_segments, segment_offsets, segment_offsets + 1, reduce_op, initial_value, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceSegmentedReduce::Reduce to calculate " "temp_storage_bytes, status: ", cudaGetErrorString(err)); } Tensor temp_storage; TF_RETURN_IF_ERROR(context->allocate_temp( DT_INT8, TensorShape({static_cast<int64_t>(temp_storage_bytes)}), &temp_storage)); err = gpuprim::DeviceSegmentedReduce::Reduce( temp_storage.flat<int8>().data(), temp_storage_bytes, input, output, num_segments, segment_offsets, segment_offsets + 1, reduce_op, initial_value, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceSegmentedReduce::Reduce" ", temp_storage_bytes: ", temp_storage_bytes, ", status: ", cudaGetErrorString(err)); } return OkStatus(); } template <typename InputIteratorT, typename FlagIteratorT, typename OutputIteratorT, typename NumSelectedT = int> Status GpuSelectFlagged(OpKernelContext* context, int size, InputIteratorT input, FlagIteratorT flags, OutputIteratorT output, NumSelectedT* out_num_selected = nullptr) { const auto& cu_stream = GetGpuStream(context); Tensor out_num_selected_t; if (!out_num_selected) { TF_RETURN_IF_ERROR( context->allocate_temp(DataTypeToEnum<NumSelectedT>::value, TensorShape({}), &out_num_selected_t)); out_num_selected = out_num_selected_t.scalar<NumSelectedT>().data(); } size_t temp_storage_bytes; auto err = gpuprim::DeviceSelect::Flagged(nullptr, temp_storage_bytes, input, flags, output, out_num_selected, size, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceSelect::Flagged to calculate " "temp_storage_bytes, status: ", cudaGetErrorString(err)); } Tensor temp_storage; TF_RETURN_IF_ERROR(context->allocate_temp( DT_INT8, TensorShape({static_cast<int64_t>(temp_storage_bytes)}), &temp_storage)); err = gpuprim::DeviceSelect::Flagged(temp_storage.flat<int8>().data(), temp_storage_bytes, input, flags, output, out_num_selected, size, cu_stream); if (err != 0) { return errors::Internal( "Failed to launch gpuprim::DeviceSelect::Flagged, temp_storage_bytes: ", temp_storage_bytes, ", status: ", cudaGetErrorString(err)); } return OkStatus(); } } // namespace tensorflow #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #endif // TENSORFLOW_CORE_KERNELS_GPU_PRIM_HELPERS_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/kernels/gpu_prim_helpers.h
use crate::process::imp::orphan::{OrphanQueue, Wait}; use crate::process::kill::Kill; use crate::signal::unix::InternalStream; use std::future::Future; use std::io; use std::ops::Deref; use std::pin::Pin; use std::process::ExitStatus; use std::task::Context; use std::task::Poll; /// Orchestrates between registering interest for receiving signals when a /// child process has exited, and attempting to poll for process completion. #[derive(Debug)] pub(crate) struct Reaper<W, Q, S> where W: Wait, Q: OrphanQueue<W>, { inner: Option<W>, orphan_queue: Q, signal: S, } impl<W, Q, S> Deref for Reaper<W, Q, S> where W: Wait, Q: OrphanQueue<W>, { type Target = W; fn deref(&self) -> &Self::Target { self.inner() } } impl<W, Q, S> Reaper<W, Q, S> where W: Wait, Q: OrphanQueue<W>, { pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self { Self { inner: Some(inner), orphan_queue, signal, } } fn inner(&self) -> &W { self.inner.as_ref().expect("inner has gone away") } pub(crate) fn inner_mut(&mut self) -> &mut W { self.inner.as_mut().expect("inner has gone away") } } impl<W, Q, S> Future for Reaper<W, Q, S> where W: Wait + Unpin, Q: OrphanQueue<W> + Unpin, S: InternalStream + Unpin, { type Output = io::Result<ExitStatus>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { loop { // If the child hasn't exited yet, then it's our responsibility to // ensure the current task gets notified when it might be able to // make progress. We can use the delivery of a SIGCHLD signal as a // sign that we can potentially make progress. // // However, we will register for a notification on the next signal // BEFORE we poll the child. Otherwise it is possible that the child // can exit and the signal can arrive after we last polled the child, // but before we've registered for a notification on the next signal // (this can cause a deadlock if there are no more spawned children // which can generate a different signal for us). A side effect of // pre-registering for signal notifications is that when the child // exits, we will have already registered for an additional // notification we don't need to consume. If another signal arrives, // this future's task will be notified/woken up again. Since the // futures model allows for spurious wake ups this extra wakeup // should not cause significant issues with parent futures. let registered_interest = self.signal.poll_recv(cx).is_pending(); if let Some(status) = self.inner_mut().try_wait()? { return Poll::Ready(Ok(status)); } // If our attempt to poll for the next signal was not ready, then // we've arranged for our task to get notified and we can bail out. if registered_interest { return Poll::Pending; } else { // Otherwise, if the signal stream delivered a signal to us, we // won't get notified at the next signal, so we'll loop and try // again. continue; } } } } impl<W, Q, S> Kill for Reaper<W, Q, S> where W: Kill + Wait, Q: OrphanQueue<W>, { fn kill(&mut self) -> io::Result<()> { self.inner_mut().kill() } } impl<W, Q, S> Drop for Reaper<W, Q, S> where W: Wait, Q: OrphanQueue<W>, { fn drop(&mut self) { if let Ok(Some(_)) = self.inner_mut().try_wait() { return; } let orphan = self.inner.take().unwrap(); self.orphan_queue.push_orphan(orphan); } } #[cfg(all(test, not(loom)))] mod test { use super::*; use crate::process::unix::orphan::test::MockQueue; use futures::future::FutureExt; use std::os::unix::process::ExitStatusExt; use std::process::ExitStatus; use std::task::Context; use std::task::Poll; #[derive(Debug)] struct MockWait { total_kills: usize, total_waits: usize, num_wait_until_status: usize, status: ExitStatus, } impl MockWait { fn new(status: ExitStatus, num_wait_until_status: usize) -> Self { Self { total_kills: 0, total_waits: 0, num_wait_until_status, status, } } } impl Wait for MockWait { fn id(&self) -> u32 { 0 } fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { let ret = if self.num_wait_until_status == self.total_waits { Some(self.status) } else { None }; self.total_waits += 1; Ok(ret) } } impl Kill for MockWait { fn kill(&mut self) -> io::Result<()> { self.total_kills += 1; Ok(()) } } struct MockStream { total_polls: usize, values: Vec<Option<()>>, } impl MockStream { fn new(values: Vec<Option<()>>) -> Self { Self { total_polls: 0, values, } } } impl InternalStream for MockStream { fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll<Option<()>> { self.total_polls += 1; match self.values.remove(0) { Some(()) => Poll::Ready(Some(())), None => Poll::Pending, } } } #[test] fn reaper() { let exit = ExitStatus::from_raw(0); let mock = MockWait::new(exit, 3); let mut grim = Reaper::new( mock, MockQueue::new(), MockStream::new(vec![None, Some(()), None, None, None]), ); let waker = futures::task::noop_waker(); let mut context = Context::from_waker(&waker); // Not yet exited, interest registered assert!(grim.poll_unpin(&mut context).is_pending()); assert_eq!(1, grim.signal.total_polls); assert_eq!(1, grim.total_waits); assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); // Not yet exited, couldn't register interest the first time // but managed to register interest the second time around assert!(grim.poll_unpin(&mut context).is_pending()); assert_eq!(3, grim.signal.total_polls); assert_eq!(3, grim.total_waits); assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); // Exited if let Poll::Ready(r) = grim.poll_unpin(&mut context) { assert!(r.is_ok()); let exit_code = r.unwrap(); assert_eq!(exit_code, exit); } else { unreachable!(); } assert_eq!(4, grim.signal.total_polls); assert_eq!(4, grim.total_waits); assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); } #[test] fn kill() { let exit = ExitStatus::from_raw(0); let mut grim = Reaper::new( MockWait::new(exit, 0), MockQueue::new(), MockStream::new(vec![None]), ); grim.kill().unwrap(); assert_eq!(1, grim.total_kills); assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); } #[test] fn drop_reaps_if_possible() { let exit = ExitStatus::from_raw(0); let mut mock = MockWait::new(exit, 0); { let queue = MockQueue::new(); let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); drop(grim); assert!(queue.all_enqueued.borrow().is_empty()); } assert_eq!(1, mock.total_waits); assert_eq!(0, mock.total_kills); } #[test] fn drop_enqueues_orphan_if_wait_fails() { let exit = ExitStatus::from_raw(0); let mut mock = MockWait::new(exit, 2); { let queue = MockQueue::<&mut MockWait>::new(); let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); drop(grim); assert_eq!(1, queue.all_enqueued.borrow().len()); } assert_eq!(1, mock.total_waits); assert_eq!(0, mock.total_kills); } }
rust
github
https://github.com/tokio-rs/tokio
tokio/src/process/unix/reap.rs
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "FoldInitTypeCheck.h" #include "clang/AST/ASTContext.h" #include "clang/ASTMatchers/ASTMatchFinder.h" using namespace clang::ast_matchers; namespace clang::tidy::bugprone { void FoldInitTypeCheck::registerMatchers(MatchFinder *Finder) { // We match functions of interest and bind the iterator and init value types. // Note: Right now we check only builtin types. const auto BuiltinTypeWithId = [](const char *ID) { return hasCanonicalType(builtinType().bind(ID)); }; const auto IteratorWithValueType = [&BuiltinTypeWithId](const char *ID) { return anyOf( // Pointer types. pointsTo(BuiltinTypeWithId(ID)), // Iterator types have an `operator*` whose return type is the type we // care about. // Notes: // - `operator*` can be in one of the bases of the iterator class. // - this does not handle cases when the `operator*` is defined // outside the iterator class. recordType( hasDeclaration(cxxRecordDecl(isSameOrDerivedFrom(has(functionDecl( hasOverloadedOperatorName("*"), returns(qualType(hasCanonicalType(anyOf( // `value_type& operator*();` references(BuiltinTypeWithId(ID)), // `value_type operator*();` BuiltinTypeWithId(ID), // `auto operator*();`, `decltype(auto) operator*();` autoType(hasDeducedType(BuiltinTypeWithId(ID))) // ))))))))))); }; const auto IteratorParam = parmVarDecl( hasType(hasCanonicalType(IteratorWithValueType("IterValueType")))); const auto Iterator2Param = parmVarDecl( hasType(hasCanonicalType(IteratorWithValueType("Iter2ValueType")))); const auto InitParam = parmVarDecl(hasType(BuiltinTypeWithId("InitType"))); // std::accumulate, std::reduce. Finder->addMatcher( callExpr(callee(functionDecl( hasAnyName("::std::accumulate", "::std::reduce"), hasParameter(0, IteratorParam), hasParameter(2, InitParam))), argumentCountIs(3)) .bind("Call"), this); // std::inner_product. Finder->addMatcher( callExpr(callee(functionDecl(hasName("::std::inner_product"), hasParameter(0, IteratorParam), hasParameter(2, Iterator2Param), hasParameter(3, InitParam))), argumentCountIs(4)) .bind("Call"), this); // std::reduce with a policy. Finder->addMatcher( callExpr(callee(functionDecl(hasName("::std::reduce"), hasParameter(1, IteratorParam), hasParameter(3, InitParam))), argumentCountIs(4)) .bind("Call"), this); // std::inner_product with a policy. Finder->addMatcher( callExpr(callee(functionDecl(hasName("::std::inner_product"), hasParameter(1, IteratorParam), hasParameter(3, Iterator2Param), hasParameter(4, InitParam))), argumentCountIs(5)) .bind("Call"), this); } /// Returns true if ValueType is allowed to fold into InitType, i.e. if: /// static_cast<InitType>(ValueType{some_value}) /// does not result in trucation. static bool isValidBuiltinFold(const BuiltinType &ValueType, const BuiltinType &InitType, const ASTContext &Context) { const auto ValueTypeSize = Context.getTypeSize(&ValueType); const auto InitTypeSize = Context.getTypeSize(&InitType); // It's OK to fold a float into a float of bigger or equal size, but not OK to // fold into an int. if (ValueType.isFloatingPoint()) return InitType.isFloatingPoint() && InitTypeSize >= ValueTypeSize; // It's OK to fold an int into: // - an int of the same size and signedness. // - a bigger int, regardless of signedness. // - FIXME: should it be a warning to fold into floating point? if (ValueType.isInteger()) { if (InitType.isInteger()) { if (InitType.isSignedInteger() == ValueType.isSignedInteger()) return InitTypeSize >= ValueTypeSize; return InitTypeSize > ValueTypeSize; } if (InitType.isFloatingPoint()) return InitTypeSize >= ValueTypeSize; } return false; } /// Prints a diagnostic if IterValueType doe snot fold into IterValueType (see // isValidBuiltinFold for details). void FoldInitTypeCheck::doCheck(const BuiltinType &IterValueType, const BuiltinType &InitType, const ASTContext &Context, const CallExpr &CallNode) { if (!isValidBuiltinFold(IterValueType, InitType, Context)) { diag(CallNode.getExprLoc(), "folding type %0 into type %1 might result in " "loss of precision") << IterValueType.desugar() << InitType.desugar(); } } void FoldInitTypeCheck::check(const MatchFinder::MatchResult &Result) { // Given the iterator and init value type retrieved by the matchers, // we check that the ::value_type of the iterator is compatible with // the init value type. const auto *InitType = Result.Nodes.getNodeAs<BuiltinType>("InitType"); const auto *IterValueType = Result.Nodes.getNodeAs<BuiltinType>("IterValueType"); assert(InitType != nullptr); assert(IterValueType != nullptr); const auto *CallNode = Result.Nodes.getNodeAs<CallExpr>("Call"); assert(CallNode != nullptr); doCheck(*IterValueType, *InitType, *Result.Context, *CallNode); if (const auto *Iter2ValueType = Result.Nodes.getNodeAs<BuiltinType>("Iter2ValueType")) doCheck(*Iter2ValueType, *InitType, *Result.Context, *CallNode); } } // namespace clang::tidy::bugprone
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/bugprone/FoldInitTypeCheck.cpp
""" This module contains the 'base' GEOSGeometry object -- all GEOS Geometries inherit from this object. """ from __future__ import unicode_literals import json from ctypes import addressof, byref, c_double from django.contrib.gis import gdal from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.base import GEOSBase from django.contrib.gis.geos.coordseq import GEOSCoordSeq from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.mutable_list import ListMixin from django.contrib.gis.geos.prepared import PreparedGeometry from django.contrib.gis.geos.prototypes.io import ( ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w, ) from django.utils import six from django.utils.encoding import force_bytes, force_text class GEOSGeometry(GEOSBase, ListMixin): "A class that, generally, encapsulates a GEOS geometry." _GEOS_CLASSES = None ptr_type = GEOM_PTR has_cs = False # Only Point, LineString, LinearRing have coordinate sequences def __init__(self, geo_input, srid=None): """ The base constructor for GEOS geometry objects, and may take the following inputs: * strings: - WKT - HEXEWKB (a PostGIS-specific canonical form) - GeoJSON (requires GDAL) * buffer: - WKB The `srid` keyword is used to specify the Source Reference Identifier (SRID) number for this Geometry. If not set, the SRID will be None. """ if isinstance(geo_input, bytes): geo_input = force_text(geo_input) if isinstance(geo_input, six.string_types): wkt_m = wkt_regex.match(geo_input) if wkt_m: # Handling WKT input. if wkt_m.group('srid'): srid = int(wkt_m.group('srid')) g = wkt_r().read(force_bytes(wkt_m.group('wkt'))) elif hex_regex.match(geo_input): # Handling HEXEWKB input. g = wkb_r().read(force_bytes(geo_input)) elif json_regex.match(geo_input): # Handling GeoJSON input. if not gdal.HAS_GDAL: raise ValueError('Initializing geometry from JSON input requires GDAL.') g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb) else: raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.') elif isinstance(geo_input, GEOM_PTR): # When the input is a pointer to a geometry (GEOM_PTR). g = geo_input elif isinstance(geo_input, six.memoryview): # When the input is a buffer (WKB). g = wkb_r().read(geo_input) elif isinstance(geo_input, GEOSGeometry): g = capi.geom_clone(geo_input.ptr) else: # Invalid geometry type. raise TypeError('Improper geometry input type: %s' % str(type(geo_input))) if g: # Setting the pointer object with a valid pointer. self.ptr = g else: raise GEOSException('Could not initialize GEOS Geometry with given input.') # Post-initialization setup. self._post_init(srid) def _post_init(self, srid): "Helper routine for performing post-initialization setup." # Setting the SRID, if given. if srid and isinstance(srid, int): self.srid = srid # Setting the class type (e.g., Point, Polygon, etc.) if GEOSGeometry._GEOS_CLASSES is None: # Lazy-loaded variable to avoid import conflicts with GEOSGeometry. from .linestring import LineString, LinearRing from .point import Point from .polygon import Polygon from .collections import ( GeometryCollection, MultiPoint, MultiLineString, MultiPolygon) GEOSGeometry._GEOS_CLASSES = { 0: Point, 1: LineString, 2: LinearRing, 3: Polygon, 4: MultiPoint, 5: MultiLineString, 6: MultiPolygon, 7: GeometryCollection, } self.__class__ = GEOSGeometry._GEOS_CLASSES[self.geom_typeid] # Setting the coordinate sequence for the geometry (will be None on # geometries that do not have coordinate sequences) self._set_cs() def __del__(self): """ Destroys this Geometry; in other words, frees the memory used by the GEOS C++ object. """ if self._ptr and capi: capi.destroy_geom(self._ptr) def __copy__(self): """ Returns a clone because the copy of a GEOSGeometry may contain an invalid pointer location if the original is garbage collected. """ return self.clone() def __deepcopy__(self, memodict): """ The `deepcopy` routine is used by the `Node` class of django.utils.tree; thus, the protocol routine needs to be implemented to return correct copies (clones) of these GEOS objects, which use C pointers. """ return self.clone() def __str__(self): "EWKT is used for the string representation." return self.ewkt def __repr__(self): "Short-hand representation because WKT may be very large." return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr))) # Pickling support def __getstate__(self): # The pickled state is simply a tuple of the WKB (in string form) # and the SRID. return bytes(self.wkb), self.srid def __setstate__(self, state): # Instantiating from the tuple state that was pickled. wkb, srid = state ptr = wkb_r().read(six.memoryview(wkb)) if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.') self.ptr = ptr self._post_init(srid) # Comparison operators def __eq__(self, other): """ Equivalence testing, a Geometry may be compared with another Geometry or a WKT representation. """ if isinstance(other, six.string_types): return self.wkt == other elif isinstance(other, GEOSGeometry): return self.equals_exact(other) else: return False def __ne__(self, other): "The not equals operator." return not (self == other) # ### Geometry set-like operations ### # Thanks to Sean Gillies for inspiration: # http://lists.gispython.org/pipermail/community/2007-July/001034.html # g = g1 | g2 def __or__(self, other): "Returns the union of this Geometry and the other." return self.union(other) # g = g1 & g2 def __and__(self, other): "Returns the intersection of this Geometry and the other." return self.intersection(other) # g = g1 - g2 def __sub__(self, other): "Return the difference this Geometry and the other." return self.difference(other) # g = g1 ^ g2 def __xor__(self, other): "Return the symmetric difference of this Geometry and the other." return self.sym_difference(other) # #### Coordinate Sequence Routines #### def _set_cs(self): "Sets the coordinate sequence for this Geometry." if self.has_cs: self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz) else: self._cs = None @property def coord_seq(self): "Returns a clone of the coordinate sequence for this Geometry." if self.has_cs: return self._cs.clone() # #### Geometry Info #### @property def geom_type(self): "Returns a string representing the Geometry type, e.g. 'Polygon'" return capi.geos_type(self.ptr).decode() @property def geom_typeid(self): "Returns an integer representing the Geometry type." return capi.geos_typeid(self.ptr) @property def num_geom(self): "Returns the number of geometries in the Geometry." return capi.get_num_geoms(self.ptr) @property def num_coords(self): "Returns the number of coordinates in the Geometry." return capi.get_num_coords(self.ptr) @property def num_points(self): "Returns the number points, or coordinates, in the Geometry." return self.num_coords @property def dims(self): "Returns the dimension of this Geometry (0=point, 1=line, 2=surface)." return capi.get_dims(self.ptr) def normalize(self): "Converts this Geometry to normal form (or canonical form)." return capi.geos_normalize(self.ptr) # #### Unary predicates #### @property def empty(self): """ Returns a boolean indicating whether the set of points in this Geometry are empty. """ return capi.geos_isempty(self.ptr) @property def hasz(self): "Returns whether the geometry has a 3D dimension." return capi.geos_hasz(self.ptr) @property def ring(self): "Returns whether or not the geometry is a ring." return capi.geos_isring(self.ptr) @property def simple(self): "Returns false if the Geometry not simple." return capi.geos_issimple(self.ptr) @property def valid(self): "This property tests the validity of this Geometry." return capi.geos_isvalid(self.ptr) @property def valid_reason(self): """ Returns a string containing the reason for any invalidity. """ return capi.geos_isvalidreason(self.ptr).decode() # #### Binary predicates. #### def contains(self, other): "Returns true if other.within(this) returns true." return capi.geos_contains(self.ptr, other.ptr) def crosses(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*T****** (for a point and a curve,a point and an area or a line and an area) 0******** (for two curves). """ return capi.geos_crosses(self.ptr, other.ptr) def disjoint(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is FF*FF****. """ return capi.geos_disjoint(self.ptr, other.ptr) def equals(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*F**FFF*. """ return capi.geos_equals(self.ptr, other.ptr) def equals_exact(self, other, tolerance=0): """ Returns true if the two Geometries are exactly equal, up to a specified tolerance. """ return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance)) def intersects(self, other): "Returns true if disjoint returns false." return capi.geos_intersects(self.ptr, other.ptr) def overlaps(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves). """ return capi.geos_overlaps(self.ptr, other.ptr) def relate_pattern(self, other, pattern): """ Returns true if the elements in the DE-9IM intersection matrix for the two Geometries match the elements in pattern. """ if not isinstance(pattern, six.string_types) or len(pattern) > 9: raise GEOSException('invalid intersection matrix pattern') return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern)) def touches(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is FT*******, F**T***** or F***T****. """ return capi.geos_touches(self.ptr, other.ptr) def within(self, other): """ Returns true if the DE-9IM intersection matrix for the two Geometries is T*F**F***. """ return capi.geos_within(self.ptr, other.ptr) # #### SRID Routines #### def get_srid(self): "Gets the SRID for the geometry, returns None if no SRID is set." s = capi.geos_get_srid(self.ptr) if s == 0: return None else: return s def set_srid(self, srid): "Sets the SRID for the geometry." capi.geos_set_srid(self.ptr, 0 if srid is None else srid) srid = property(get_srid, set_srid) # #### Output Routines #### @property def ewkt(self): """ Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values are only included in this representation if GEOS >= 3.3.0. """ if self.get_srid(): return 'SRID=%s;%s' % (self.srid, self.wkt) else: return self.wkt @property def wkt(self): "Returns the WKT (Well-Known Text) representation of this Geometry." return wkt_w(3 if self.hasz else 2).write(self).decode() @property def hex(self): """ Returns the WKB of this Geometry in hexadecimal form. Please note that the SRID is not included in this representation because it is not a part of the OGC specification (use the `hexewkb` property instead). """ # A possible faster, all-python, implementation: # str(self.wkb).encode('hex') return wkb_w(3 if self.hasz else 2).write_hex(self) @property def hexewkb(self): """ Returns the EWKB of this Geometry in hexadecimal form. This is an extension of the WKB specification that includes SRID value that are a part of this geometry. """ return ewkb_w(3 if self.hasz else 2).write_hex(self) @property def json(self): """ Returns GeoJSON representation of this Geometry. """ return json.dumps({'type': self.__class__.__name__, 'coordinates': self.coords}) geojson = json @property def wkb(self): """ Returns the WKB (Well-Known Binary) representation of this Geometry as a Python buffer. SRID and Z values are not included, use the `ewkb` property instead. """ return wkb_w(3 if self.hasz else 2).write(self) @property def ewkb(self): """ Return the EWKB representation of this Geometry as a Python buffer. This is an extension of the WKB specification that includes any SRID value that are a part of this geometry. """ return ewkb_w(3 if self.hasz else 2).write(self) @property def kml(self): "Returns the KML representation of this Geometry." gtype = self.geom_type return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype) @property def prepared(self): """ Returns a PreparedGeometry corresponding to this geometry -- it is optimized for the contains, intersects, and covers operations. """ return PreparedGeometry(self) # #### GDAL-specific output routines #### @property def ogr(self): "Returns the OGR Geometry for this Geometry." if not gdal.HAS_GDAL: raise GEOSException('GDAL required to convert to an OGRGeometry.') if self.srid: try: return gdal.OGRGeometry(self.wkb, self.srid) except gdal.SRSException: pass return gdal.OGRGeometry(self.wkb) @property def srs(self): "Returns the OSR SpatialReference for SRID of this Geometry." if not gdal.HAS_GDAL: raise GEOSException('GDAL required to return a SpatialReference object.') if self.srid: try: return gdal.SpatialReference(self.srid) except gdal.SRSException: pass return None @property def crs(self): "Alias for `srs` property." return self.srs def transform(self, ct, clone=False): """ Requires GDAL. Transforms the geometry according to the given transformation object, which may be an integer SRID, and WKT or PROJ.4 string. By default, the geometry is transformed in-place and nothing is returned. However if the `clone` keyword is set, then this geometry will not be modified and a transformed clone will be returned instead. """ srid = self.srid if ct == srid: # short-circuit where source & dest SRIDs match if clone: return self.clone() else: return if (srid is None) or (srid < 0): raise GEOSException("Calling transform() with no SRID set is not supported") if not gdal.HAS_GDAL: raise GEOSException("GDAL library is not available to transform() geometry.") # Creating an OGR Geometry, which is then transformed. g = self.ogr g.transform(ct) # Getting a new GEOS pointer ptr = wkb_r().read(g.wkb) if clone: # User wants a cloned transformed geometry returned. return GEOSGeometry(ptr, srid=g.srid) if ptr: # Reassigning pointer, and performing post-initialization setup # again due to the reassignment. capi.destroy_geom(self.ptr) self.ptr = ptr self._post_init(g.srid) else: raise GEOSException('Transformed WKB was invalid.') # #### Topology Routines #### def _topology(self, gptr): "Helper routine to return Geometry from the given pointer." return GEOSGeometry(gptr, srid=self.srid) @property def boundary(self): "Returns the boundary as a newly allocated Geometry object." return self._topology(capi.geos_boundary(self.ptr)) def buffer(self, width, quadsegs=8): """ Returns a geometry that represents all points whose distance from this Geometry is less than or equal to distance. Calculations are in the Spatial Reference System of this Geometry. The optional third parameter sets the number of segment used to approximate a quarter circle (defaults to 8). (Text from PostGIS documentation at ch. 6.1.3) """ return self._topology(capi.geos_buffer(self.ptr, width, quadsegs)) @property def centroid(self): """ The centroid is equal to the centroid of the set of component Geometries of highest dimension (since the lower-dimension geometries contribute zero "weight" to the centroid). """ return self._topology(capi.geos_centroid(self.ptr)) @property def convex_hull(self): """ Returns the smallest convex Polygon that contains all the points in the Geometry. """ return self._topology(capi.geos_convexhull(self.ptr)) def difference(self, other): """ Returns a Geometry representing the points making up this Geometry that do not make up other. """ return self._topology(capi.geos_difference(self.ptr, other.ptr)) @property def envelope(self): "Return the envelope for this geometry (a polygon)." return self._topology(capi.geos_envelope(self.ptr)) def intersection(self, other): "Returns a Geometry representing the points shared by this Geometry and other." return self._topology(capi.geos_intersection(self.ptr, other.ptr)) @property def point_on_surface(self): "Computes an interior point of this Geometry." return self._topology(capi.geos_pointonsurface(self.ptr)) def relate(self, other): "Returns the DE-9IM intersection matrix for this Geometry and the other." return capi.geos_relate(self.ptr, other.ptr).decode() def simplify(self, tolerance=0.0, preserve_topology=False): """ Returns the Geometry, simplified using the Douglas-Peucker algorithm to the specified tolerance (higher tolerance => less points). If no tolerance provided, defaults to 0. By default, this function does not preserve topology - e.g. polygons can be split, collapse to lines or disappear holes can be created or disappear, and lines can cross. By specifying preserve_topology=True, the result will have the same dimension and number of components as the input. This is significantly slower. """ if preserve_topology: return self._topology(capi.geos_preservesimplify(self.ptr, tolerance)) else: return self._topology(capi.geos_simplify(self.ptr, tolerance)) def sym_difference(self, other): """ Returns a set combining the points in this Geometry not in other, and the points in other not in this Geometry. """ return self._topology(capi.geos_symdifference(self.ptr, other.ptr)) def union(self, other): "Returns a Geometry representing all the points in this Geometry and other." return self._topology(capi.geos_union(self.ptr, other.ptr)) # #### Other Routines #### @property def area(self): "Returns the area of the Geometry." return capi.geos_area(self.ptr, byref(c_double())) def distance(self, other): """ Returns the distance between the closest points on this Geometry and the other. Units will be in those of the coordinate system of the Geometry. """ if not isinstance(other, GEOSGeometry): raise TypeError('distance() works only on other GEOS Geometries.') return capi.geos_distance(self.ptr, other.ptr, byref(c_double())) @property def extent(self): """ Returns the extent of this geometry as a 4-tuple, consisting of (xmin, ymin, xmax, ymax). """ from .point import Point env = self.envelope if isinstance(env, Point): xmin, ymin = env.tuple xmax, ymax = xmin, ymin else: xmin, ymin = env[0][0] xmax, ymax = env[0][2] return (xmin, ymin, xmax, ymax) @property def length(self): """ Returns the length of this Geometry (e.g., 0 for point, or the circumference of a Polygon). """ return capi.geos_length(self.ptr, byref(c_double())) def clone(self): "Clones this Geometry." return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid) class ProjectInterpolateMixin(object): """ Used for LineString and MultiLineString. """ def interpolate(self, distance): return self._topology(capi.geos_interpolate(self.ptr, distance)) def interpolate_normalized(self, distance): return self._topology(capi.geos_interpolate_normalized(self.ptr, distance)) def project(self, point): from .point import Point if not isinstance(point, Point): raise TypeError('locate_point argument must be a Point') return capi.geos_project(self.ptr, point.ptr) def project_normalized(self, point): from .point import Point if not isinstance(point, Point): raise TypeError('locate_point argument must be a Point') return capi.geos_project_normalized(self.ptr, point.ptr)
unknown
codeparrot/codeparrot-clean
/** * AOT support for application contexts. */ @NullMarked package org.springframework.context.aot; import org.jspecify.annotations.NullMarked;
java
github
https://github.com/spring-projects/spring-framework
spring-context/src/main/java/org/springframework/context/aot/package-info.java
# -*- coding: utf-8 -*- """ This script starts or resumes the training of an autoencoder or an encoder that is defined in model_definitions. Due to the large amout of input arguments, this should be run from a shell script. A documentation for the possible inputs is given in submit.sh. """ from keras.models import load_model from keras import backend as K import numpy as np import os import argparse from util.run_cnn import ( train_and_test_model, load_zero_center_data, h5_get_number_of_rows, setup_learning_rate, get_autoencoder_loss, look_up_latest_epoch, get_supervised_loss_and_metric, setup_autoencoder_model, setup_encoder_dense_model, setup_optimizer, setup_successive_training, switch_encoder_weights, make_encoder_stateful, unfreeze_conv_layers, lr_schedule ) from get_dataset_info import get_dataset_info def unpack_parsed_args(): """ Read out input arguments and return them as a tuple for the training They will be printed and returned as a dict. Returns: params (dict): Dict with all input parameters of the parser. """ parser = argparse.ArgumentParser(description='The main function for training \ autoencoder-based networks. See submit.sh for detailed explanations of all parameters.') parser.add_argument('modeltag', type=str, help='e.g vgg_3-sgd; -XXX indicates version number and \ is ommited when looking up model by modeltag') parser.add_argument('runs', help="How many new epochs should be trained by executing this script..", type=int) parser.add_argument("autoencoder_stage", help="Stage of autoencoder training: 0 for AE, 1 for enc,\ 2 for unfrozen, 3 for parallel", type=int) parser.add_argument("autoencoder_epoch", help="Epoch of AE network to be used", type=int) parser.add_argument("encoder_epoch", help="Epoch of encoder network to be used", type=int) parser.add_argument("class_type_bins", type=int) parser.add_argument("class_type_name", help="Name of target", type=str) parser.add_argument("zero_center", type=int) parser.add_argument("verbose", type=int) parser.add_argument("dataset", type=str, help="Name of test/training dataset to be used, \ eg xzt. n_bins is automatically selected") parser.add_argument("learning_rate", type=float) parser.add_argument("learning_rate_decay", help="LR decay per epoch \ (multipl.), or name of lr schedule") parser.add_argument("epsilon", type=int, help="Exponent of the \ epsilon used for adam.") #adam default: 1e-08 parser.add_argument("lambda_comp", type=int) parser.add_argument("optimizer", type=str) parser.add_argument("options", type=str) parser.add_argument("encoder_version", default="", nargs="?", type=str, help="e.g. -LeLu; Str added to the supervised file names\ to allow multiple runs on the same model.") parser.add_argument("--ae_loss_name", default="mse", nargs="?", type=str, help="Loss that is used during AE training. Default is mse.") parser.add_argument("--supervised_loss", default="auto", nargs="?", type=str, help="Loss that is used during supervised training.\ Default is 'auto', which is based on the number of output neurons.") parser.add_argument("--init_model", default=None, nargs="?", help="Path to a model that is used for initializing.") args = parser.parse_args() params = vars(args) print("\nArguments handed to parser:") for keyword in params: print(keyword, ":\t", params[keyword]) print("\n") return params def extra_autoencoder_stages_setup(autoencoder_stage, ae_loss_name, supervised_loss): """ Setup for special trainings that dont fit into regular categories. They still use the same network architectures as the standard categories, but have some additional properties during training. Returns: autoencoder_stage (int): Defines which architecture is loaded. ae_loss_name (str): Loss of autoencoder training. supervised_loss(str or None). Loss of supervised training. unfreeze_layer_training (bool): Parts of the network will be gradually unfrozen during training. is_AE_adevers_training (int): Defines the stage of adversarial autoencoder training. """ if autoencoder_stage==4: print("Autoencoder stage 4: Unfreeze Training. Setting up network like in AE stage 1...") autoencoder_stage=1 unfreeze_layer_training = True else: unfreeze_layer_training = False #If autoencoder stage 5,6 or 7 is selected: Adversarial autoencoders #AAE training: similar to stage 0, but with other loss and training setup #is_AE_adevers_training=0 no AAE training #1 or 2: train critic and generator alternating #3: Train only critic if autoencoder_stage==5: print("Starting AE training in adversarial setup (stage 5). Loss will\ be cat cross entropy and labels will eb fixed! Otherwise like stage 0.") #for adversarial AE training, setup like normal autoencoder autoencoder_stage=0 ae_loss_name = "categorical_crossentropy" supervised_loss = "cat_cross_inv" is_AE_adevers_training=1 elif autoencoder_stage==6: #preperation for AAE training: train only the critic autoencoder_stage=0 ae_loss_name = "categorical_crossentropy" supervised_loss = None is_AE_adevers_training=3 elif autoencoder_stage==7: #preperation for AAE training: train only the critic autoencoder_stage=0 ae_loss_name = "categorical_crossentropy" supervised_loss = None is_AE_adevers_training=4 else: is_AE_adevers_training=False unfreeze_layer_training=False return (autoencoder_stage, ae_loss_name, supervised_loss, unfreeze_layer_training, is_AE_adevers_training) def build_model(autoencoder_stage, modeltag, epoch, optimizer, ae_loss, options, custom_objects, model_folder, class_type, encoder_version, encoder_epoch, supervised_loss, supervised_metrics, init_model_path): """ Construct and compile the model. Also returns info for successive training. The basic architectures are: Stage 0: Autoencoder Stage 1: Encoder+dense w/ frozen encoder, initialized to give AE Variants of the above are: Stage 2: Encoder+dense completely unfrozen, random initialized. Stage 3: Like stage 1, but trained successively. Returns: model : The keras model to train. modelname (str) : Name of that model as a string. autoencoder_model (str) : For encoder+dense networks: Path of the autoencoder model file that the encoder part is taken from. is_autoencoder (bool) : Whether the model is an autoencoder or not. last_encoder_layer_index_override (int or None) : Manual definition of the layer that is the bottleneck. switch_autoencoder_model (list or None) : Successive training, when to switch the encoder weights. succ_autoencoder_epoch (int or None) : Successive training, The weights of which autoencoder epoch are being used right now. """ #Setup network: #If epoch is 0, a new model is created. Otherwise, the #existing model of the given epoch is loaded unchanged. number_of_output_neurons = int(class_type[0]) #Only relevant for stage 3: successive training: (last_encoder_layer_index_override, switch_autoencoder_model, succ_autoencoder_epoch) = None, None, None #Autoencoder self-supervised training. Epoch is the autoencoder epoch, #enc_epoch not relevant for this stage if autoencoder_stage==0: is_autoencoder=True modelname = modeltag + "_autoencoder" autoencoder_model = modelname print("\n\nAutoencoder stage 0") model = setup_autoencoder_model(modeltag, epoch, optimizer, ae_loss, options, custom_objects, model_folder, modelname) #Encoder+dense supervised training: #Load the encoder part of an autoencoder, import weights from #trained model, freeze it and add dense layers elif autoencoder_stage==1: print("\n\nAutoencoder stage 1") is_autoencoder=False #name of the autoencoder model file that the encoder part is taken from: autoencoder_model = model_folder + "trained_" + modeltag \ + "_autoencoder_epoch" + str(epoch) + '.h5' #name of the supervised model: modelname = modeltag + "_autoencoder_epoch" + str(epoch) \ + "_supervised_" + class_type[1] + encoder_version model = setup_encoder_dense_model( modeltag, encoder_epoch, modelname, autoencoder_stage, number_of_output_neurons, supervised_loss, supervised_metrics, optimizer, options, model_folder, custom_objects, autoencoder_model) #Unfrozen Encoder supervised training with completely unfrozen model. #No weights of autoencoders will be loaded in. elif autoencoder_stage==2: print("\n\nAutoencoder stage 2") is_autoencoder=False autoencoder_model=None #name of the supervised model: modelname = modeltag + "_supervised_" + class_type[1] + encoder_version model = setup_encoder_dense_model( modeltag, encoder_epoch, modelname, autoencoder_stage, number_of_output_neurons, supervised_loss, supervised_metrics, optimizer, options, model_folder, custom_objects, autoencoder_model) #Training of the supervised network on several different autoencoder epochs #epoch is calculated automatically and not used from user input #encoder epoch as usual elif autoencoder_stage==3: print("\n\nAutoencoder stage 3") is_autoencoder=False (switch_autoencoder_model, succ_autoencoder_epoch, make_stateful, last_encoder_layer_index_override) = setup_successive_training( modeltag, encoder_epoch) #name of the autoencoder model file that the encoder part is taken from: autoencoder_model = model_folder + "trained_" + modeltag \ + "_autoencoder_epoch" + str(succ_autoencoder_epoch) + '.h5' #name of the supervised model: modelname = modeltag + "_autoencoder_supervised_parallel_" + class_type[1] + encoder_version #Setup encoder+dense and load encoder weights (like autoencoder_stage=1) model = setup_encoder_dense_model( modeltag, encoder_epoch, modelname, 1, number_of_output_neurons, supervised_loss, supervised_metrics, optimizer, options, model_folder, custom_objects, autoencoder_model) if make_stateful==True: model = make_encoder_stateful(model) #Initialize the model with the weights of a saved one if init_model_path is not None and init_model_path != "None": print("Initializing model weights to", init_model_path) init_model = load_model(init_model_path, custom_objects=custom_objects) for i,layer in enumerate(model.layers): layer.set_weights(init_model.layers[i].get_weights()) return (model, modelname, autoencoder_model, is_autoencoder, last_encoder_layer_index_override, switch_autoencoder_model, succ_autoencoder_epoch, ) def train_model(model, dataset, zero_center, modelname, autoencoder_model, lr_schedule_number, runs, learning_rate, model_folder, last_encoder_layer_index_override, switch_autoencoder_model, autoencoder_stage, succ_autoencoder_epoch, modeltag, unfreeze_layer_training, custom_objects, class_type, lr, lr_decay, verbose, is_AE_adevers_training, is_autoencoder, epoch, encoder_epoch): """ Train, test and save the model and logfiles. """ #Get infos about the dataset dataset_info_dict = get_dataset_info(dataset) train_file=dataset_info_dict["train_file"] test_file=dataset_info_dict["test_file"] n_bins=dataset_info_dict["n_bins"] broken_simulations_mode=dataset_info_dict["broken_simulations_mode"] #def 0 filesize_factor=dataset_info_dict["filesize_factor"] filesize_factor_test=dataset_info_dict["filesize_factor_test"] batchsize=dataset_info_dict["batchsize"] #def 32 #The files to train and test on, together with the nummber of events in them train_tuple=[[train_file, int(h5_get_number_of_rows(train_file)*filesize_factor)]] test_tuple=[[test_file, int(h5_get_number_of_rows(test_file)*filesize_factor_test)]] #Zero-Center for the data. if zero center image does not exist, a new one #is calculated and saved if zero_center == True: xs_mean = load_zero_center_data(train_files=train_tuple, batchsize=batchsize, n_bins=n_bins, n_gpu=1) print("Using zero centering.") else: xs_mean = None print("Not using zero centering.") #Which epochs are the ones relevant for current stage if is_autoencoder==True: running_epoch=epoch #Stage 0 else: running_epoch=encoder_epoch #Stage 1,2,3 #Set LR of loaded model to new lr if lr_schedule_number != None: lr=lr_schedule(running_epoch+1, lr_schedule_number, learning_rate) K.set_value(model.optimizer.lr, lr) #Print info about the model and training model.summary() print("\n\nModel: ", modelname) print("Current State of optimizer: \n", model.optimizer.get_config()) filesize_hint="Filesize factor="+str(filesize_factor) if filesize_factor!=1 else "" filesize_hint_test="Filesize factor test="+str(filesize_factor_test) if filesize_factor_test!=1 else "" print("Train files:", train_tuple, filesize_hint) print("Test files:", test_tuple, filesize_hint_test) print("Using metrics:", model.metrics_names) if autoencoder_model is not None: print("Using autoencoder model:", autoencoder_model) #Main loop: Execute Training for current_epoch in range(running_epoch,running_epoch+runs): #This is before epoch current_epoch+1 print("\n") #Does the model we are about to save exist already? proposed_filename = model_folder + "trained_" + modelname + '_epoch' + str(current_epoch+1) + '.h5' if(os.path.isfile(proposed_filename)): raise NameError("Warning:", proposed_filename+ "exists already!") if lr_schedule_number != None: lr=lr_schedule(current_epoch+1, lr_schedule_number, learning_rate ) K.set_value(model.optimizer.lr, lr) #For autoencoder stage 3 (Successive training): #Load in weights of new encoders periodically #succ_autoencoder_epoch is the epoch of the autoencoder from which #the weights are loaded in if autoencoder_stage==3: if current_epoch in switch_autoencoder_model: succ_autoencoder_epoch+=1 autoencoder_model = model_folder + "trained_" + modeltag \ + "_autoencoder_epoch" + str(succ_autoencoder_epoch) + '.h5' print("Changing weights before epoch ",current_epoch+1," to ",autoencoder_model) switch_encoder_weights(model, load_model(autoencoder_model, custom_objects=custom_objects), last_encoder_layer_index_override) #For autoencoder stage 4 (Layer unfreeze training): if unfreeze_layer_training==True: #Unfreeze C layers of the model according to schedule #An additional C block is set trainable before these epochs unfreeze_a_c_block_at = np.array([5,10,15,20,25,30,35,40,]) how_many = np.where(unfreeze_a_c_block_at==current_epoch)[0] if len(how_many)>0: how_many=how_many[0]+1 model = unfreeze_conv_layers(model, how_many) #Train network, write logfile, save network, evaluate network, save evaluation to file lr = train_and_test_model(model=model, modelname=modelname, train_files=train_tuple, test_files=test_tuple, batchsize=batchsize, n_bins=n_bins, class_type=class_type, xs_mean=xs_mean, epoch=current_epoch, shuffle=False, lr=lr, lr_decay=lr_decay, tb_logger=False, swap_4d_channels=None, save_path=model_folder, is_autoencoder=is_autoencoder, verbose=verbose, broken_simulations_mode=broken_simulations_mode, dataset_info_dict=dataset_info_dict, is_AE_adevers_training=is_AE_adevers_training) def execute_network_training(): """ Main function for training autoencoder-based networks. """ params = unpack_parsed_args() # see submit.sh for a documentation of these variables modeltag = params["modeltag"] runs=params["runs"] autoencoder_stage=params["autoencoder_stage"] epoch=params["autoencoder_epoch"] encoder_epoch=params["encoder_epoch"] class_type = (params["class_type_bins"], params["class_type_name"]) zero_center = params["zero_center"] verbose=params["verbose"] dataset = params["dataset"] learning_rate = params["learning_rate"] learning_rate_decay = params["learning_rate_decay"] epsilon = params["epsilon"] #lambda_comp = params["lambda_comp"] not supported anymore use_opti = params["optimizer"] options = params["options"] encoder_version = params["encoder_version"] ae_loss_name=params["ae_loss_name"] supervised_loss=params["supervised_loss"] init_model_path=params["init_model"] #Every model architecture is identified by its modeltag #Each will be given its own directory in the main_folder automatically #e.g. if the modeltag is "vgg_3", all saved models will #be stored in: main_folder+"/vgg_3/" main_folder = "/home/woody/capn/mppi013h/Km3-Autoencoder/models" #Create the folder for the model if it does not exist already model_folder = main_folder + "/" + modeltag + "/" if not os.path.exists(model_folder): os.makedirs(model_folder) print("Created model folder", model_folder) #For AE stages other than 0,1,2,3: (autoencoder_stage, ae_loss_name, supervised_loss, unfreeze_layer_training, is_AE_adevers_training) = extra_autoencoder_stages_setup(autoencoder_stage, ae_loss_name, supervised_loss) #Number of output neurons of the supervised networks (AE ignore this) number_of_output_neurons = int(class_type[0]) if number_of_output_neurons<1: raise ValueError("number_of_output_neurons have to be >= 1") #define the loss function to use for a new AE #(saved autoencoders will continue to use their original one) ae_loss, custom_objects = get_autoencoder_loss(ae_loss_name) print("Using autoencoder loss:", ae_loss_name) #Define the loss function and additional metrics to use for a new #Encoder+dense network (saved nets will continue to use their original one) supervised_loss, supervised_metrics = get_supervised_loss_and_metric(supervised_loss, number_of_output_neurons) print("Using supervised loss:", supervised_loss) #automatically look for the epoch of the most recent saved model #of the current architecture if epoch=-1 was given: epoch, encoder_epoch = look_up_latest_epoch(autoencoder_stage, epoch, encoder_epoch, model_folder, modeltag, class_type, encoder_version) #Setup learning rate for the start of the training #learning_rate and learning_rate_decay is the input from the parser lr, lr_decay, lr_schedule_number = setup_learning_rate(learning_rate, learning_rate_decay, autoencoder_stage, epoch, encoder_epoch) if lr_schedule_number!= None: print("Using learning rate schedule", lr_schedule_number) #Optimizer for training: optimizer = setup_optimizer(use_opti, lr, epsilon) #Construct, initialize and compile model: ( model, modelname, autoencoder_model, is_autoencoder, last_encoder_layer_index_override, switch_autoencoder_model, succ_autoencoder_epoch, ) = build_model( autoencoder_stage, modeltag, epoch, optimizer, ae_loss, options, custom_objects, model_folder, class_type, encoder_version, encoder_epoch, supervised_loss, supervised_metrics, init_model_path) #Train, test, save the model: train_model(model, dataset, zero_center, modelname, autoencoder_model, lr_schedule_number, runs, learning_rate, model_folder, last_encoder_layer_index_override, switch_autoencoder_model, autoencoder_stage, succ_autoencoder_epoch, modeltag, unfreeze_layer_training, custom_objects, class_type, lr, lr_decay, verbose, is_AE_adevers_training, is_autoencoder, epoch, encoder_epoch) if __name__ == "__main__": execute_network_training()
unknown
codeparrot/codeparrot-clean
use crate::io::util::chain::{chain, Chain}; use crate::io::util::read::{read, Read}; use crate::io::util::read_buf::{read_buf, ReadBuf}; use crate::io::util::read_exact::{read_exact, ReadExact}; use crate::io::util::read_int::{ReadF32, ReadF32Le, ReadF64, ReadF64Le}; use crate::io::util::read_int::{ ReadI128, ReadI128Le, ReadI16, ReadI16Le, ReadI32, ReadI32Le, ReadI64, ReadI64Le, ReadI8, }; use crate::io::util::read_int::{ ReadU128, ReadU128Le, ReadU16, ReadU16Le, ReadU32, ReadU32Le, ReadU64, ReadU64Le, ReadU8, }; use crate::io::util::read_to_end::{read_to_end, ReadToEnd}; use crate::io::util::read_to_string::{read_to_string, ReadToString}; use crate::io::util::take::{take, Take}; use crate::io::AsyncRead; use bytes::BufMut; cfg_io_util! { /// Defines numeric reader macro_rules! read_impl { ( $( $(#[$outer:meta])* fn $name:ident(&mut self) -> $($fut:ident)*; )* ) => { $( $(#[$outer])* fn $name(&mut self) -> $($fut)*<&mut Self> where Self: Unpin { $($fut)*::new(self) } )* } } /// Reads bytes from a source. /// /// Implemented as an extension trait, adding utility methods to all /// [`AsyncRead`] types. Callers will tend to import this trait instead of /// [`AsyncRead`]. /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = [0; 10]; /// /// // The `read` method is defined by this trait. /// let n = f.read(&mut buffer[..]).await?; /// /// Ok(()) /// } /// # } /// ``` /// /// See [module][crate::io] documentation for more details. /// /// [`AsyncRead`]: AsyncRead pub trait AsyncReadExt: AsyncRead { /// Creates a new `AsyncRead` instance that chains this stream with /// `next`. /// /// The returned `AsyncRead` instance will first read all bytes from this object /// until EOF is encountered. Afterwards the output is equivalent to the /// output of `next`. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `AsyncRead`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let f1 = File::open("foo.txt").await?; /// let f2 = File::open("bar.txt").await?; /// /// let mut handle = f1.chain(f2); /// let mut buffer = String::new(); /// /// // read the value into a String. We could use any AsyncRead /// // method here, this is just one example. /// handle.read_to_string(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` fn chain<R>(self, next: R) -> Chain<Self, R> where Self: Sized, R: AsyncRead, { chain(self, next) } /// Pulls some bytes from this source into the specified buffer, /// returning how many bytes were read. /// /// Equivalent to: /// /// ```ignore /// async fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>; /// ``` /// /// This method does not provide any guarantees about whether it /// completes immediately or asynchronously. /// /// # Return /// /// If the return value of this method is `Ok(n)`, then it must be /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates /// that the buffer `buf` has been filled in with `n` bytes of data from /// this source. If `n` is `0`, then it can indicate one of two /// scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified was 0 bytes in length. /// /// No guarantees are provided about the contents of `buf` when this /// function is called, implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that *implementations* /// only write data to `buf` instead of reading its contents. /// /// Correspondingly, however, *callers* of this method may not assume /// any guarantees about how the implementation uses `buf`. It is /// possible that the code that's supposed to write to the buffer might /// also read from it. It is your responsibility to make sure that `buf` /// is initialized before calling `read`. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no data was read. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = [0; 10]; /// /// // read up to 10 bytes /// let n = f.read(&mut buffer[..]).await?; /// /// println!("The bytes: {:?}", &buffer[..n]); /// Ok(()) /// } /// # } /// ``` fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> where Self: Unpin, { read(self, buf) } /// Pulls some bytes from this source into the specified buffer, /// advancing the buffer's internal cursor. /// /// Equivalent to: /// /// ```ignore /// async fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> io::Result<usize>; /// ``` /// /// Usually, only a single `read` syscall is issued, even if there is /// more space in the supplied buffer. /// /// This method does not provide any guarantees about whether it /// completes immediately or asynchronously. /// /// # Return /// /// A nonzero `n` value indicates that the buffer `buf` has been filled /// in with `n` bytes of data from this source. If `n` is `0`, then it /// can indicate one of two scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified had a remaining capacity of zero. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. /// /// # Cancel safety /// /// This method is cancel safe. If you use it as the event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, then it is guaranteed that no data was read. /// /// # Examples /// /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]: /// /// [`File`]: crate::fs::File /// [`BytesMut`]: bytes::BytesMut /// [`BufMut`]: bytes::BufMut /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// use bytes::BytesMut; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = BytesMut::with_capacity(10); /// /// assert!(buffer.is_empty()); /// assert!(buffer.capacity() >= 10); /// /// // note that the return value is not needed to access the data /// // that was read as `buffer`'s internal cursor is updated. /// // /// // this might read more than 10 bytes if the capacity of `buffer` /// // is larger than 10. /// f.read_buf(&mut buffer).await?; /// /// println!("The bytes: {:?}", &buffer[..]); /// Ok(()) /// } /// # } /// ``` fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B> where Self: Unpin, B: BufMut + ?Sized, { read_buf(self, buf) } /// Reads the exact number of bytes required to fill `buf`. /// /// Equivalent to: /// /// ```ignore /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<usize>; /// ``` /// /// This function reads as many bytes as necessary to completely fill /// the specified buffer `buf`. /// /// # Errors /// /// If the operation encounters an "end of file" before completely /// filling the buffer, it returns an error of the kind /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified /// in this case. /// /// If any other read error is encountered then the operation /// immediately returns. The contents of `buf` are unspecified in this /// case. /// /// If this operation returns an error, it is unspecified how many bytes /// it has read, but it will never read more than would be necessary to /// completely fill the buffer. /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may already have been /// read into `buf`. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::fs::File; /// use tokio::io::{self, AsyncReadExt}; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let len = 10; /// let mut buffer = vec![0; len]; /// /// // read exactly 10 bytes /// f.read_exact(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` /// /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> where Self: Unpin, { read_exact(self, buf) } read_impl! { /// Reads an unsigned 8 bit integer from the underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u8(&mut self) -> io::Result<u8>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is cancel safe. If this method is used as an event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no data were read. /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![2, 5]); /// /// assert_eq!(2, reader.read_u8().await?); /// assert_eq!(5, reader.read_u8().await?); /// /// Ok(()) /// # } /// ``` fn read_u8(&mut self) -> ReadU8; /// Reads a signed 8 bit integer from the underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i8(&mut self) -> io::Result<i8>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is cancel safe. If this method is used as an event in a /// [`tokio::select!`](crate::select) statement and some other branch /// completes first, it is guaranteed that no data were read. /// /// # Examples /// /// Read unsigned 8 bit integers from an `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x02, 0xfb]); /// /// assert_eq!(2, reader.read_i8().await?); /// assert_eq!(-5, reader.read_i8().await?); /// /// Ok(()) /// # } /// ``` fn read_i8(&mut self) -> ReadI8; /// Reads an unsigned 16-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u16(&mut self) -> io::Result<u16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); /// /// assert_eq!(517, reader.read_u16().await?); /// assert_eq!(768, reader.read_u16().await?); /// Ok(()) /// # } /// ``` fn read_u16(&mut self) -> ReadU16; /// Reads a signed 16-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i16(&mut self) -> io::Result<i16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 16 bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); /// /// assert_eq!(193, reader.read_i16().await?); /// assert_eq!(-132, reader.read_i16().await?); /// Ok(()) /// # } /// ``` fn read_i16(&mut self) -> ReadI16; /// Reads an unsigned 32-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u32(&mut self) -> io::Result<u32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); /// /// assert_eq!(267, reader.read_u32().await?); /// Ok(()) /// # } /// ``` fn read_u32(&mut self) -> ReadU32; /// Reads a signed 32-bit integer in big-endian order from the /// underlying reader. /// /// /// Equivalent to: /// /// ```ignore /// async fn read_i32(&mut self) -> io::Result<i32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 32-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); /// /// assert_eq!(-34253, reader.read_i32().await?); /// Ok(()) /// # } /// ``` fn read_i32(&mut self) -> ReadI32; /// Reads an unsigned 64-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u64(&mut self) -> io::Result<u64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(918733457491587, reader.read_u64().await?); /// Ok(()) /// # } /// ``` fn read_u64(&mut self) -> ReadU64; /// Reads an signed 64-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i64(&mut self) -> io::Result<i64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 64-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); /// /// assert_eq!(i64::MIN, reader.read_i64().await?); /// Ok(()) /// # } /// ``` fn read_i64(&mut self) -> ReadI64; /// Reads an unsigned 128-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u128(&mut self) -> io::Result<u128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?); /// Ok(()) /// # } /// ``` fn read_u128(&mut self) -> ReadU128; /// Reads an signed 128-bit integer in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i128(&mut self) -> io::Result<i128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 128-bit big-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x80, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0 /// ]); /// /// assert_eq!(i128::MIN, reader.read_i128().await?); /// Ok(()) /// # } /// ``` fn read_i128(&mut self) -> ReadI128; /// Reads an 32-bit floating point type in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f32(&mut self) -> io::Result<f32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 32-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0x7f, 0xff, 0xff]); /// /// assert_eq!(f32::MIN, reader.read_f32().await?); /// Ok(()) /// # } /// ``` fn read_f32(&mut self) -> ReadF32; /// Reads an 64-bit floating point type in big-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f64(&mut self) -> io::Result<f64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 64-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff /// ]); /// /// assert_eq!(f64::MIN, reader.read_f64().await?); /// Ok(()) /// # } /// ``` fn read_f64(&mut self) -> ReadF64; /// Reads an unsigned 16-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u16_le(&mut self) -> io::Result<u16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 16 bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); /// /// assert_eq!(1282, reader.read_u16_le().await?); /// assert_eq!(3, reader.read_u16_le().await?); /// Ok(()) /// # } /// ``` fn read_u16_le(&mut self) -> ReadU16Le; /// Reads a signed 16-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i16_le(&mut self) -> io::Result<i16>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 16 bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); /// /// assert_eq!(-16128, reader.read_i16_le().await?); /// assert_eq!(31999, reader.read_i16_le().await?); /// Ok(()) /// # } /// ``` fn read_i16_le(&mut self) -> ReadI16Le; /// Reads an unsigned 32-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u32_le(&mut self) -> io::Result<u32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 32-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); /// /// assert_eq!(184614912, reader.read_u32_le().await?); /// Ok(()) /// # } /// ``` fn read_u32_le(&mut self) -> ReadU32Le; /// Reads a signed 32-bit integer in little-endian order from the /// underlying reader. /// /// /// Equivalent to: /// /// ```ignore /// async fn read_i32_le(&mut self) -> io::Result<i32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 32-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); /// /// assert_eq!(863698943, reader.read_i32_le().await?); /// Ok(()) /// # } /// ``` fn read_i32_le(&mut self) -> ReadI32Le; /// Reads an unsigned 64-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u64_le(&mut self) -> io::Result<u64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 64-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(9477368352180732672, reader.read_u64_le().await?); /// Ok(()) /// # } /// ``` fn read_u64_le(&mut self) -> ReadU64Le; /// Reads an signed 64-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i64_le(&mut self) -> io::Result<i64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 64-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); /// /// assert_eq!(128, reader.read_i64_le().await?); /// Ok(()) /// # } /// ``` fn read_i64_le(&mut self) -> ReadI64Le; /// Reads an unsigned 128-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_u128_le(&mut self) -> io::Result<u128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read unsigned 128-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 /// ]); /// /// assert_eq!(174826588484952389081207917399662330624, reader.read_u128_le().await?); /// Ok(()) /// # } /// ``` fn read_u128_le(&mut self) -> ReadU128Le; /// Reads an signed 128-bit integer in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_i128_le(&mut self) -> io::Result<i128>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read signed 128-bit little-endian integers from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0x80, 0, 0, 0, 0, 0, 0, 0, /// 0, 0, 0, 0, 0, 0, 0, 0 /// ]); /// /// assert_eq!(128, reader.read_i128_le().await?); /// Ok(()) /// # } /// ``` fn read_i128_le(&mut self) -> ReadI128Le; /// Reads an 32-bit floating point type in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f32_le(&mut self) -> io::Result<f32>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 32-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7f, 0xff]); /// /// assert_eq!(f32::MIN, reader.read_f32_le().await?); /// Ok(()) /// # } /// ``` fn read_f32_le(&mut self) -> ReadF32Le; /// Reads an 64-bit floating point type in little-endian order from the /// underlying reader. /// /// Equivalent to: /// /// ```ignore /// async fn read_f64_le(&mut self) -> io::Result<f64>; /// ``` /// /// It is recommended to use a buffered reader to avoid excessive /// syscalls. /// /// # Errors /// /// This method returns the same errors as [`AsyncReadExt::read_exact`]. /// /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact /// /// # Cancel safety /// /// This method is not cancellation safe. If the method is used as the /// event in a [`tokio::select!`](crate::select) statement and some /// other branch completes first, then some data may be lost. /// /// # Examples /// /// Read 64-bit floating point type from a `AsyncRead`: /// /// ```rust /// use tokio::io::{self, AsyncReadExt}; /// /// use std::io::Cursor; /// /// # #[tokio::main(flavor = "current_thread")] /// # async fn main() -> io::Result<()> { /// let mut reader = Cursor::new(vec![ /// 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xff /// ]); /// /// assert_eq!(f64::MIN, reader.read_f64_le().await?); /// Ok(()) /// # } /// ``` fn read_f64_le(&mut self) -> ReadF64Le; } /// Reads all bytes until EOF in this source, placing them into `buf`. /// /// Equivalent to: /// /// ```ignore /// async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>; /// ``` /// /// All bytes read from this source will be appended to the specified /// buffer `buf`. This function will continuously call [`read()`] to /// append more data to `buf` until [`read()`] returns `Ok(0)`. /// /// If successful, the total number of bytes read is returned. /// /// [`read()`]: AsyncReadExt::read /// /// # Errors /// /// If a read error is encountered then the `read_to_end` operation /// immediately completes. Any bytes which have already been read will /// be appended to `buf`. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncReadExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = Vec::new(); /// /// // read the whole file /// f.read_to_end(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` /// /// (See also the [`tokio::fs::read`] convenience function for reading from a /// file.) /// /// [`tokio::fs::read`]: fn@crate::fs::read fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, Self> where Self: Unpin, { read_to_end(self, buf) } /// Reads all bytes until EOF in this source, appending them to `buf`. /// /// Equivalent to: /// /// ```ignore /// async fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize>; /// ``` /// /// If successful, the number of bytes which were read and appended to /// `buf` is returned. /// /// # Errors /// /// If the data in this stream is *not* valid UTF-8 then an error is /// returned and `buf` is unchanged. /// /// See [`read_to_end`][AsyncReadExt::read_to_end] for other error semantics. /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncReadExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let mut f = File::open("foo.txt").await?; /// let mut buffer = String::new(); /// /// f.read_to_string(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` /// /// (See also the [`crate::fs::read_to_string`] convenience function for /// reading from a file.) /// /// [`crate::fs::read_to_string`]: fn@crate::fs::read_to_string fn read_to_string<'a>(&'a mut self, dst: &'a mut String) -> ReadToString<'a, Self> where Self: Unpin, { read_to_string(self, dst) } /// Creates an adaptor which reads at most `limit` bytes from it. /// /// This function returns a new instance of `AsyncRead` which will read /// at most `limit` bytes, after which it will always return EOF /// (`Ok(0)`). Any read errors will not count towards the number of /// bytes read and future calls to [`read()`] may succeed. /// /// [`read()`]: fn@crate::io::AsyncReadExt::read /// /// [read]: AsyncReadExt::read /// /// # Examples /// /// [`File`][crate::fs::File]s implement `Read`: /// /// ```no_run /// # #[cfg(not(target_family = "wasm"))] /// # { /// use tokio::io::{self, AsyncReadExt}; /// use tokio::fs::File; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let f = File::open("foo.txt").await?; /// let mut buffer = [0; 5]; /// /// // read at most five bytes /// let mut handle = f.take(5); /// /// handle.read(&mut buffer).await?; /// Ok(()) /// } /// # } /// ``` fn take(self, limit: u64) -> Take<Self> where Self: Sized, { take(self, limit) } } } impl<R: AsyncRead + ?Sized> AsyncReadExt for R {}
rust
github
https://github.com/tokio-rs/tokio
tokio/src/io/util/async_read_ext.rs
# Copyright 1999-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import print_function import logging import textwrap import portage from portage import os from portage.emaint.modules.logs.logs import CleanLogs from portage.news import count_unread_news, display_news_notifications from portage.output import colorize from portage.util._dyn_libs.display_preserved_libs import \ display_preserved_libs from portage.util._info_files import chk_updated_info_files from .chk_updated_cfg_files import chk_updated_cfg_files from .emergelog import emergelog from ._flush_elog_mod_echo import _flush_elog_mod_echo def clean_logs(settings): if "clean-logs" not in settings.features: return logdir = settings.get("PORT_LOGDIR") if logdir is None or not os.path.isdir(logdir): return cleanlogs = CleanLogs() returncode, msgs = cleanlogs.clean(settings=settings) if not returncode: out = portage.output.EOutput() for msg in msgs: out.eerror(msg) def display_news_notification(root_config, myopts): if "news" not in root_config.settings.features: return False portdb = root_config.trees["porttree"].dbapi vardb = root_config.trees["vartree"].dbapi news_counts = count_unread_news(portdb, vardb) if all(v == 0 for v in news_counts.values()): return False display_news_notifications(news_counts) return True def show_depclean_suggestion(): out = portage.output.EOutput() msg = "After world updates, it is important to remove " + \ "obsolete packages with emerge --depclean. Refer " + \ "to `man emerge` for more information." for line in textwrap.wrap(msg, 72): out.ewarn(line) def post_emerge(myaction, myopts, myfiles, target_root, trees, mtimedb, retval): """ Misc. things to run at the end of a merge session. Update Info Files Update Config Files Update News Items Commit mtimeDB Display preserved libs warnings @param myaction: The action returned from parse_opts() @type myaction: String @param myopts: emerge options @type myopts: dict @param myfiles: emerge arguments @type myfiles: list @param target_root: The target EROOT for myaction @type target_root: String @param trees: A dictionary mapping each ROOT to it's package databases @type trees: dict @param mtimedb: The mtimeDB to store data needed across merge invocations @type mtimedb: MtimeDB class instance @param retval: Emerge's return value @type retval: Int """ root_config = trees[target_root]["root_config"] vardbapi = trees[target_root]['vartree'].dbapi settings = vardbapi.settings info_mtimes = mtimedb["info"] # Load the most current variables from ${ROOT}/etc/profile.env settings.unlock() settings.reload() settings.regenerate() settings.lock() config_protect = portage.util.shlex_split( settings.get("CONFIG_PROTECT", "")) infodirs = settings.get("INFOPATH","").split(":") + \ settings.get("INFODIR","").split(":") os.chdir("/") if retval == os.EX_OK: exit_msg = " *** exiting successfully." else: exit_msg = " *** exiting unsuccessfully with status '%s'." % retval emergelog("notitles" not in settings.features, exit_msg) _flush_elog_mod_echo() if not vardbapi._pkgs_changed: # GLEP 42 says to display news *after* an emerge --pretend if "--pretend" in myopts: display_news_notification(root_config, myopts) # If vdb state has not changed then there's nothing else to do. return vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH) portage.util.ensure_dirs(vdb_path) vdb_lock = None if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts: vardbapi.lock() vdb_lock = True if vdb_lock: try: if "noinfo" not in settings.features: chk_updated_info_files(target_root, infodirs, info_mtimes) mtimedb.commit() finally: if vdb_lock: vardbapi.unlock() # Explicitly load and prune the PreservedLibsRegistry in order # to ensure that we do not display stale data. vardbapi._plib_registry.load() if vardbapi._plib_registry.hasEntries(): if "--quiet" in myopts: print() print(colorize("WARN", "!!!") + " existing preserved libs found") else: print() print(colorize("WARN", "!!!") + " existing preserved libs:") display_preserved_libs(vardbapi) print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries") chk_updated_cfg_files(settings['EROOT'], config_protect) display_news_notification(root_config, myopts) postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, "bin", "post_emerge") if os.access(postemerge, os.X_OK): hook_retval = portage.process.spawn( [postemerge], env=settings.environ()) if hook_retval != os.EX_OK: portage.util.writemsg_level( " %s spawn failed of %s\n" % (colorize("BAD", "*"), postemerge,), level=logging.ERROR, noiselevel=-1) clean_logs(settings) if "--quiet" not in myopts and \ myaction is None and "@world" in myfiles: show_depclean_suggestion()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # This script concatenates in place JS files in the order specified # using <script> tags in a given 'order.html' file. from HTMLParser import HTMLParser from cStringIO import StringIO import jsmin import os.path import sys class OrderedJSFilesExtractor(HTMLParser): def __init__(self, order_html_name): HTMLParser.__init__(self) self.ordered_js_files = [] order_html = open(order_html_name, 'r') self.feed(order_html.read()) def handle_starttag(self, tag, attrs): if tag == 'script': attrs_dict = dict(attrs) if ('type' in attrs_dict and attrs_dict['type'] == 'text/javascript' and 'src' in attrs_dict): self.ordered_js_files.append(attrs_dict['src']) class PathExpander: def __init__(self, paths): self.paths = paths def expand(self, filename): last_path = None expanded_name = None for path in self.paths: fname = "%s/%s" % (path, filename) if (os.access(fname, os.F_OK)): if (last_path != None): raise Exception('Ambiguous file %s: found in %s and %s' % (filename, last_path, path)) expanded_name = fname last_path = path return expanded_name def main(argv): if len(argv) < 3: print('usage: %s order.html input_source_dir_1 input_source_dir_2 ... ' 'output_file' % argv[0]) return 1 output_file_name = argv.pop() input_order_file_name = argv[1] extractor = OrderedJSFilesExtractor(input_order_file_name) extractor.ordered_js_files.append('DevTools.js') extractor.ordered_js_files.append('Tests.js') expander = PathExpander(argv[2:]) output = StringIO() for input_file_name in extractor.ordered_js_files: full_path = expander.expand(input_file_name) if (full_path is None): raise Exception('File %s referenced in %s not found on any source paths, ' 'check source tree for consistency' % (input_file_name, input_order_file_name)) output.write('/* %s */\n\n' % input_file_name) input_file = open(full_path, 'r') output.write(input_file.read()) output.write('\n') input_file.close() output_file = open(output_file_name, 'w') output_file.write(jsmin.jsmin(output.getvalue())) output_file.close() output.close() # Touch output file directory to make sure that Xcode will copy # modified resource files. if sys.platform == 'darwin': output_dir_name = os.path.dirname(output_file_name) os.utime(output_dir_name, None) if __name__ == '__main__': sys.exit(main(sys.argv))
unknown
codeparrot/codeparrot-clean
import pytest from cfme import test_requirements from cfme.cloud.provider import CloudProvider from cfme.common.provider import BaseProvider from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.provider.scvmm import SCVMMProvider from cfme.markers.env_markers.provider import providers from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.generators import random_vm_name from cfme.utils.log import logger from cfme.utils.providers import ProviderFilter pytestmark = [ pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers', 'provider'), pytest.mark.tier(2), pytest.mark.provider( gen_func=providers, filters=[ProviderFilter(classes=[BaseProvider]), ProviderFilter(classes=[SCVMMProvider, RHEVMProvider], inverted=True)], scope='module'), ] @pytest.fixture(scope="function") def vm_crud(provider, small_template): collection = provider.appliance.provider_based_collection(provider) return collection.instantiate(random_vm_name(context='genealogy'), provider, template_name=small_template.name) # uncollected above in pytest_generate_tests @pytest.mark.meta(blockers=["GH#ManageIQ/manageiq:473"]) @pytest.mark.parametrize("from_edit", [True, False], ids=["via_edit", "via_summary"]) @test_requirements.genealogy @pytest.mark.uncollectif(lambda provider, from_edit: provider.one_of(CloudProvider) and not from_edit, reason='Cloud provider genealogy only shown on edit') def test_vm_genealogy_detected( request, setup_provider, provider, small_template, soft_assert, from_edit, vm_crud): """Tests vm genealogy from what CFME can detect. Prerequisities: * A provider that is set up and having suitable templates for provisioning. Steps: * Provision the VM * Then, depending on whether you want to check it via ``Genealogy`` or edit page: * Open the edit page of the VM and you can see the parent template in the dropdown. Assert that it corresponds with the template the VM was deployed from. * Open VM Genealogy via details page and see the the template being an ancestor of the VM. Note: The cloud providers appear to not have Genealogy option available in the details view. So the only possibility available is to do the check via edit form. Metadata: test_flag: genealogy, provision Polarion: assignee: spusater casecomponent: Infra caseimportance: medium initialEstimate: 1/4h """ vm_crud.create_on_provider(find_in_cfme=True, allow_skip="default") request.addfinalizer(lambda: vm_crud.cleanup_on_provider()) vm_crud.mgmt.wait_for_steady_state() if from_edit: vm_crud.open_edit() view = navigate_to(vm_crud, 'Edit') opt = view.form.parent_vm.all_selected_options[0] parent = opt.strip() assert parent.startswith(small_template.name), "The parent template not detected!" else: try: vm_crud_ancestors = vm_crud.genealogy.ancestors except NameError: logger.exception("The parent template not detected!") raise pytest.fail("The parent template not detected!") assert small_template.name in vm_crud_ancestors, \ "{} is not in {}'s ancestors".format(small_template.name, vm_crud.name) @pytest.mark.manual @pytest.mark.tier(1) @test_requirements.genealogy def test_compare_button_enabled(): """ Test that compare button is enabled Polarion: assignee: spusater casecomponent: Infra caseimportance: medium initialEstimate: 1/6h startsin: 5.10.4 setup: 1. Have a provider with some VMs added testSteps: 1. Set the parent-child relationship for at least two VMs 2. Open one of the VM's genealogy screen from its summary 3. Check at least two checkboxes in the genealogy tree expectedResults: 1. Genealogy set 2. Genealogy screen displayed 3. Compare button enabled Bugzilla: 1694712 """ pass @pytest.mark.manual @test_requirements.genealogy @pytest.mark.tier(2) def test_cloud_infra_genealogy(): """ Edit infra vm and cloud instance When editing cloud instance, genealogy should be present on the edit page. When you have two providers - one infra and one cloud - added, there should be no cloud vms displayed when setting genealogy for infra vm and vice-versa. Polarion: assignee: spusater casecomponent: Infra caseimportance: medium initialEstimate: 1/6h setup: Have a cloud instance and an infra vm testSteps: 1. Navigate to instance/vm details, choose Genealogy 2. Verify that for cloud instance no infra vms are displayed 3. Verify that for infra vm no cloud instances are displayed expectedResults: 1. Genealogy displayed 2. No infra vms displayed 3. No cloud instances displayed Bugzilla: 1399141 1399144 """ pass
unknown
codeparrot/codeparrot-clean
#include <c10/core/Scalar.h> #include <c10/core/ScalarType.h> #include <c10/util/Exception.h> #include <c10/util/SmallVector.h> #include <c10/util/typeid.h> #include <cstdint> #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/BlasBackend.h> #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/OpMathType.h> #include <ATen/TensorUtils.h> #include <ATen/core/NamedTensor.h> #include <ATen/core/Tensor.h> #include <ATen/native/GroupedMMUtils.h> #include <ATen/native/Resize.h> #include <c10/util/MaybeOwned.h> #include <ATen/ceil_div.h> #include <ATen/xpu/XPUScaledBlas.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_addmm_activation_native.h> #include <ATen/ops/_efficientzerotensor.h> #include <ATen/ops/_scaled_mm_native.h> #include <ATen/ops/_unsafe_view_native.h> #include <ATen/ops/abs.h> #include <ATen/ops/addmm_native.h> #include <ATen/ops/addmv_native.h> #include <ATen/ops/baddbmm_native.h> #include <ATen/ops/bmm_native.h> #include <ATen/ops/copy_native.h> #include <ATen/ops/dot_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/empty_strided.h> #include <ATen/ops/gelu.h> #include <ATen/ops/max.h> #include <ATen/ops/mm_native.h> #include <ATen/ops/mul.h> #include <ATen/ops/ones.h> #include <ATen/ops/relu.h> #include <ATen/ops/scalar_tensor_native.h> #include <ATen/ops/vdot_native.h> #endif using at::blas::ScalingType; namespace at::native::onednn::scaled { /** * Both inputs must be fp8, * Each needs a single scale, {Tensorwise (float)} */ bool check_tensorwise_recipe( c10::ScalarType type_a, std::vector<ScalingType>& recipe_a, ArrayRef<Tensor>& scales_a, c10::ScalarType type_b, std::vector<ScalingType>& recipe_b, ArrayRef<Tensor>& scales_b) { // both types must be fp8 if (!isFloat8Type(type_a) || !isFloat8Type(type_b)) { return false; } // 1 scale each, {Tensorwise, float} if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { return false; } // Need {Blockwise_1x32, e8m0} for A & B if (recipe_a[0] != ScalingType::TensorWise) return false; if (scales_a[0].scalar_type() != ScalarType::Float) return false; if (recipe_b[0] != ScalingType::TensorWise) return false; if (scales_b[0].scalar_type() != ScalarType::Float) return false; return true; } /** * Both inputs must be fp8, * Each needs scales, {Rowwise (float)} */ bool check_rowwise_recipe( c10::ScalarType type_a, std::vector<ScalingType>& recipe_a, ArrayRef<Tensor>& scales_a, c10::ScalarType type_b, std::vector<ScalingType>& recipe_b, ArrayRef<Tensor>& scales_b) { // both types must be fp8 if (!isFloat8Type(type_a) || !isFloat8Type(type_b)) { return false; } // 1 scale each, {Tensorwise, float} if (scales_a.size() != 1 || recipe_a.size() != 1 || scales_b.size() != 1 || recipe_b.size() != 1) { return false; } // Need {RowWise, dp32} for A & B if (recipe_a[0] != ScalingType::RowWise) return false; if (scales_a[0].scalar_type() != ScalarType::Float) return false; if (recipe_b[0] != ScalingType::RowWise) return false; if (scales_b[0].scalar_type() != ScalarType::Float) return false; return true; } } // namespace at::native::onednn::scaled
cpp
github
https://github.com/pytorch/pytorch
aten/src/ATen/xpu/XPUScaledBlas.cpp
""" Copyright (c) 2013, SMART Technologies ULC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Copyright holder (SMART Technologies ULC) nor the names of its contributors (Joshua Henn) may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from region.transform import Transform, RegionRight transforms = { Transform.CONTEXT_PREVIOUS: [], \ Transform.CONTEXT_CURRENT: [], \ Transform.CONTEXT_NEXT: [ \ RegionRight(), # The next search region should be to the right of the current match ], \ Transform.CONTEXT_MATCH: [], \ Transform.CONTEXT_FINAL: [], \ Transform.CONTEXT_ENTITY: [] }
unknown
codeparrot/codeparrot-clean
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package os import ( "internal/poll" "io" "syscall" ) var ( pollCopyFileRange = poll.CopyFileRange pollSplice = poll.Splice ) func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) { pfd, network := getPollFDAndNetwork(w) // TODO(panjf2000): same as File.spliceToFile. if pfd == nil || !pfd.IsStream || !isUnixOrTCP(string(network)) { return } sc, err := f.SyscallConn() if err != nil { return } rerr := sc.Read(func(fd uintptr) (done bool) { written, err, handled = poll.SendFile(pfd, fd, 0) return true }) if err == nil { err = rerr } return written, handled, wrapSyscallError("sendfile", err) } func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) { // Neither copy_file_range(2) nor splice(2) supports destinations opened with // O_APPEND, so don't bother to try zero-copy with these system calls. // // Visit https://man7.org/linux/man-pages/man2/copy_file_range.2.html#ERRORS and // https://man7.org/linux/man-pages/man2/splice.2.html#ERRORS for details. if f.appendMode { return 0, false, nil } written, handled, err = f.copyFileRange(r) if handled { return } return f.spliceToFile(r) } func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error) { var ( remain int64 lr *io.LimitedReader ) if lr, r, remain = tryLimitedReader(r); remain <= 0 { return 0, true, nil } pfd, _ := getPollFDAndNetwork(r) // TODO(panjf2000): run some tests to see if we should unlock the non-streams for splice. // Streams benefit the most from the splice(2), non-streams are not even supported in old kernels // where splice(2) will just return EINVAL; newer kernels support non-streams like UDP, but I really // doubt that splice(2) could help non-streams, cuz they usually send small frames respectively // and one splice call would result in one frame. // splice(2) is suitable for large data but the generation of fragments defeats its edge here. // Therefore, don't bother to try splice if the r is not a streaming descriptor. if pfd == nil || !pfd.IsStream { return } written, handled, err = pollSplice(&f.pfd, pfd, remain) if lr != nil { lr.N = remain - written } return written, handled, wrapSyscallError("splice", err) } func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err error) { var ( remain int64 lr *io.LimitedReader ) if lr, r, remain = tryLimitedReader(r); remain <= 0 { return 0, true, nil } var src *File switch v := r.(type) { case *File: src = v case fileWithoutWriteTo: src = v.File default: return 0, false, nil } if src.checkValid("ReadFrom") != nil { // Avoid returning the error as we report handled as false, // leave further error handling as the responsibility of the caller. return 0, false, nil } written, handled, err = pollCopyFileRange(&f.pfd, &src.pfd, remain) if lr != nil { lr.N -= written } return written, handled, wrapSyscallError("copy_file_range", err) } // getPollFDAndNetwork tries to get the poll.FD and network type from the given interface // by expecting the underlying type of i to be the implementation of syscall.Conn // that contains a *net.rawConn. func getPollFDAndNetwork(i any) (*poll.FD, poll.String) { sc, ok := i.(syscall.Conn) if !ok { return nil, "" } rc, err := sc.SyscallConn() if err != nil { return nil, "" } irc, ok := rc.(interface { PollFD() *poll.FD Network() poll.String }) if !ok { return nil, "" } return irc.PollFD(), irc.Network() } func isUnixOrTCP(network string) bool { switch network { case "tcp", "tcp4", "tcp6", "unix": return true default: return false } }
go
github
https://github.com/golang/go
src/os/zero_copy_linux.go
#![deny(unused_qualifications)] use tests_build::tokio; pub use tokio::runtime; #[tokio::main] async fn main() { if true {} }
rust
github
https://github.com/tokio-rs/tokio
tests-build/tests/pass/use_builder_outer.rs
# # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import socket import struct import traceback import uuid from functools import partial from ansible.module_utils._text import to_bytes, to_native, to_text def send_data(s, data): packed_len = struct.pack('!Q', len(data)) return s.sendall(packed_len + data) def recv_data(s): header_len = 8 # size of a packed unsigned long long data = to_bytes("") while len(data) < header_len: d = s.recv(header_len - len(data)) if not d: return None data += d data_len = struct.unpack('!Q', data[:header_len])[0] data = data[header_len:] while len(data) < data_len: d = s.recv(data_len - len(data)) if not d: return None data += d return data def exec_command(module, command): try: sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sf.connect(module._socket_path) data = "EXEC: %s" % command send_data(sf, to_bytes(data.strip())) rc = int(recv_data(sf), 10) stdout = recv_data(sf) stderr = recv_data(sf) except socket.error as e: sf.close() module.fail_json(msg='unable to connect to socket', err=to_native(e), exception=traceback.format_exc()) sf.close() return rc, to_native(stdout, errors='surrogate_or_strict'), to_native(stderr, errors='surrogate_or_strict') def request_builder(method, *args, **kwargs): reqid = str(uuid.uuid4()) req = {'jsonrpc': '2.0', 'method': method, 'id': reqid} params = list(args) or kwargs or None if params: req['params'] = params return req class Connection: def __init__(self, module): self._module = module def __getattr__(self, name): try: return self.__dict__[name] except KeyError: if name.startswith('_'): raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) return partial(self.__rpc__, name) def __rpc__(self, name, *args, **kwargs): """Executes the json-rpc and returns the output received from remote device. :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0 :args: Ordered list of params passed as arguments to rpc method :kwargs: Dict of valid key, value pairs passed as arguments to rpc method For usage refer the respective connection plugin docs. """ req = request_builder(name, *args, **kwargs) reqid = req['id'] if not self._module._socket_path: self._module.fail_json(msg='provider support not available for this host') if not os.path.exists(self._module._socket_path): self._module.fail_json(msg='provider socket does not exist, is the provider running?') try: data = self._module.jsonify(req) rc, out, err = exec_command(self._module, data) except socket.error as e: self._module.fail_json(msg='unable to connect to socket', err=to_native(e), exception=traceback.format_exc()) try: response = self._module.from_json(to_text(out, errors='surrogate_then_replace')) except ValueError as exc: self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace')) if response['id'] != reqid: self._module.fail_json(msg='invalid id received') if 'error' in response: msg = response['error'].get('data') or response['error']['message'] self._module.fail_json(msg=to_text(msg, errors='surrogate_then_replace')) return response['result']
unknown
codeparrot/codeparrot-clean
import WebIDL def WebIDLTest(parser, harness): parser.parse(""" callback Function = any(any... arguments); interface TestTreatNonCallableAsNull1 { [TreatNonCallableAsNull] attribute Function? onfoo; attribute Function? onbar; }; """) results = parser.finish() iface = results[1] attr = iface.members[0] harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value") attr = iface.members[1] harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value") parser = parser.reset() threw = False try: parser.parse(""" callback Function = any(any... arguments); interface TestTreatNonCallableAsNull2 { [TreatNonCallableAsNull] attribute Function onfoo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown.") parser = parser.reset() threw = False try: parser.parse(""" callback Function = any(any... arguments); [TreatNonCallableAsNull] interface TestTreatNonCallableAsNull3 { attribute Function onfoo; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown.")
unknown
codeparrot/codeparrot-clean
import type { NextConfig } from "next"; const nextConfig: NextConfig = { cacheComponents: true, }; export default nextConfig;
typescript
github
https://github.com/vercel/next.js
examples/with-supabase/next.config.ts
# Updating OpenAPI spec Change the desired types, then run these commands, with the linked Enterprise repo: ``` go test --tags "pro" -timeout 30s -run ^TestIntegrationOpenAPIs$ github.com/grafana/grafana/pkg/extensions/apiserver/tests -count=1 ``` ``` ./hack/update-codegen.sh scope ``` This should generate a diff in the Enterprise repo. Make sure to open a PR there too.
unknown
github
https://github.com/grafana/grafana
apps/scope/README.md
#!/usr/bin/python import time, webbrowser from operator import add from SimpleCV import Color, ColorCurve, Kinect, Image, pg, np from SimpleCV.Display import Display d = Display(flags = pg.FULLSCREEN) #create video streams cam = Kinect() #initialize the camera compositeframe = Image((640, 480)) #populate the compositeframe offtime = 5.0 laststroke = time.time() while not d.isDone(): img = cam.getImage() imgscene = img.copy() depth = cam.getDepth() mindepth = np.min(depth.getNumpy()) if mindepth < 180: depthbin = depth.binarize(np.min(depth.getNumpy()) + np.std(depth.getNumpy()) / 4).erode(3) #take the front 1/4 stdev of the depth map img = img.crop(0,25, 605, 455).scale(640,480) #img.dl().blit(img.crop(100, 25, 515, 455), (125,0)) #this is a bit of a hack to compensate for the offset between cam and depth sensor #img = img.applyLayers() img = img - depthbin.invert() #img.save(d) meanred, meangrn, meanblue = img.meanColor() if meanred > meanblue and meanred > meangrn: depthbin, junk, junk = depthbin.splitChannels(grayscale = False) if meanblue > meanred and meanblue > meangrn: junk, junk, depthbin = depthbin.splitChannels(grayscale = False) if meangrn > meanred and meangrn > meanblue: junk, depthbin, junk = depthbin.splitChannels(grayscale = False) laststroke = time.time() compositeframe = compositeframe + depthbin #we're painting -- keep adding to the composite frame else: if (time.time() - laststroke > offtime): #if we're not painting for a certain amount of time, reset compositeframe = Image(cam.getImage().getEmpty()) frame = ((imgscene - compositeframe.binarize(10).invert()) + compositeframe).flipHorizontal() #subtract our composite frame from our camera image, then add it back in in red. False = Show red channel as red, [0] = first (red) channel frame.save(d) #show in browser if d.mouseLeft: d.done = True pg.quit() time.sleep(0.01) #yield to the webserver
unknown
codeparrot/codeparrot-clean
""" Views related to content libraries. A content library is a structure containing XBlocks which can be re-used in the multiple courses. """ from __future__ import absolute_import import json import logging from contentstore.views.item import create_xblock_info from contentstore.utils import reverse_library_url, add_instructor from django.http import HttpResponseNotAllowed, Http404 from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.conf import settings from django.utils.translation import ugettext as _ from django.views.decorators.http import require_http_methods from django.views.decorators.csrf import ensure_csrf_cookie from edxmako.shortcuts import render_to_response from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator from xmodule.modulestore.exceptions import DuplicateCourseError from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.django import modulestore from .user import user_with_role from .component import get_component_templates, CONTAINER_TEMPLATES from student.auth import ( STUDIO_VIEW_USERS, STUDIO_EDIT_ROLES, get_user_permissions, has_studio_read_access, has_studio_write_access ) from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole from util.json_request import expect_json, JsonResponse, JsonResponseBadRequest __all__ = ['library_handler', 'manage_library_users'] log = logging.getLogger(__name__) LIBRARIES_ENABLED = settings.FEATURES.get('ENABLE_CONTENT_LIBRARIES', False) @login_required @ensure_csrf_cookie @require_http_methods(('GET', 'POST')) def library_handler(request, library_key_string=None): """ RESTful interface to most content library related functionality. """ if not LIBRARIES_ENABLED: log.exception("Attempted to use the content library API when the libraries feature is disabled.") raise Http404 # Should never happen because we test the feature in urls.py also if library_key_string is not None and request.method == 'POST': return HttpResponseNotAllowed(("POST",)) if request.method == 'POST': return _create_library(request) # request method is get, since only GET and POST are allowed by @require_http_methods(('GET', 'POST')) if library_key_string: return _display_library(library_key_string, request) return _list_libraries(request) def _display_library(library_key_string, request): """ Displays single library """ library_key = CourseKey.from_string(library_key_string) if not isinstance(library_key, LibraryLocator): log.exception("Non-library key passed to content libraries API.") # Should never happen due to url regex raise Http404 # This is not a library if not has_studio_read_access(request.user, library_key): log.exception( u"User %s tried to access library %s without permission", request.user.username, unicode(library_key) ) raise PermissionDenied() library = modulestore().get_library(library_key) if library is None: log.exception(u"Library %s not found", unicode(library_key)) raise Http404 response_format = 'html' if ( request.REQUEST.get('format', 'html') == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'text/html') ): response_format = 'json' return library_blocks_view(library, request.user, response_format) def _list_libraries(request): """ List all accessible libraries """ lib_info = [ { "display_name": lib.display_name, "library_key": unicode(lib.location.library_key), } for lib in modulestore().get_libraries() if has_studio_read_access(request.user, lib.location.library_key) ] return JsonResponse(lib_info) @expect_json def _create_library(request): """ Helper method for creating a new library. """ display_name = None try: display_name = request.json['display_name'] org = request.json['org'] library = request.json.get('number', None) if library is None: library = request.json['library'] store = modulestore() with store.default_store(ModuleStoreEnum.Type.split): new_lib = store.create_library( org=org, library=library, user_id=request.user.id, fields={"display_name": display_name}, ) # Give the user admin ("Instructor") role for this library: add_instructor(new_lib.location.library_key, request.user, request.user) except KeyError as error: log.exception("Unable to create library - missing required JSON key.") return JsonResponseBadRequest({ "ErrMsg": _("Unable to create library - missing required field '{field}'").format(field=error.message) }) except InvalidKeyError as error: log.exception("Unable to create library - invalid key.") return JsonResponseBadRequest({ "ErrMsg": _("Unable to create library '{name}'.\n\n{err}").format(name=display_name, err=error.message) }) except DuplicateCourseError: log.exception("Unable to create library - one already exists with the same key.") return JsonResponseBadRequest({ 'ErrMsg': _( 'There is already a library defined with the same ' 'organization and library code. Please ' 'change your library code so that it is unique within your organization.' ) }) lib_key_str = unicode(new_lib.location.library_key) return JsonResponse({ 'url': reverse_library_url('library_handler', lib_key_str), 'library_key': lib_key_str, }) def library_blocks_view(library, user, response_format): """ The main view of a course's content library. Shows all the XBlocks in the library, and allows adding/editing/deleting them. Can be called with response_format="json" to get a JSON-formatted list of the XBlocks in the library along with library metadata. Assumes that read permissions have been checked before calling this. """ assert isinstance(library.location.library_key, LibraryLocator) assert isinstance(library.location, LibraryUsageLocator) children = library.children if response_format == "json": # The JSON response for this request is short and sweet: prev_version = library.runtime.course_entry.structure['previous_version'] return JsonResponse({ "display_name": library.display_name, "library_id": unicode(library.location.library_key), "version": unicode(library.runtime.course_entry.course_key.version), "previous_version": unicode(prev_version) if prev_version else None, "blocks": [unicode(x) for x in children], }) can_edit = has_studio_write_access(user, library.location.library_key) xblock_info = create_xblock_info(library, include_ancestor_info=False, graders=[]) component_templates = get_component_templates(library, library=True) if can_edit else [] return render_to_response('library.html', { 'can_edit': can_edit, 'context_library': library, 'component_templates': component_templates, 'xblock_info': xblock_info, 'templates': CONTAINER_TEMPLATES, }) def manage_library_users(request, library_key_string): """ Studio UI for editing the users within a library. Uses the /course_team/:library_key/:user_email/ REST API to make changes. """ library_key = CourseKey.from_string(library_key_string) if not isinstance(library_key, LibraryLocator): raise Http404 # This is not a library user_perms = get_user_permissions(request.user, library_key) if not user_perms & STUDIO_VIEW_USERS: raise PermissionDenied() library = modulestore().get_library(library_key) if library is None: raise Http404 # Segment all the users explicitly associated with this library, ensuring each user only has one role listed: instructors = set(CourseInstructorRole(library_key).users_with_role()) staff = set(CourseStaffRole(library_key).users_with_role()) - instructors users = set(LibraryUserRole(library_key).users_with_role()) - instructors - staff formatted_users = [] for user in instructors: formatted_users.append(user_with_role(user, 'instructor')) for user in staff: formatted_users.append(user_with_role(user, 'staff')) for user in users: formatted_users.append(user_with_role(user, 'library_user')) return render_to_response('manage_users_lib.html', { 'context_library': library, 'users': formatted_users, 'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES), 'library_key': unicode(library_key), 'lib_users_url': reverse_library_url('manage_library_users', library_key_string), 'show_children_previews': library.show_children_previews })
unknown
codeparrot/codeparrot-clean
import glob import subprocess import unittest from buildscripts.resmokelib.parser import set_run_options from buildscripts.resmokelib.suitesconfig import get_suite class TestFindSuites(unittest.TestCase): def test_find_suites(self): jstests = glob.glob("jstests/core/testing/*.js") resmoke_process = subprocess.run( ["python3", "buildscripts/resmoke.py", "find-suites", jstests[0]], stdout=subprocess.PIPE, text=True, check=False, ) self.assertEqual( 0, resmoke_process.returncode, # Give a very verbose failure message - this can be read by users well # outside of resmoke-areas in case of failures on malformatted yaml configs msg=f"find-suites subcommand did not execute successfully:\n\n{resmoke_process.stdout}", ) self.assertTrue(resmoke_process.stdout, msg="find-suites output must not be empty") set_run_options() # check that find-suites output is a list of suites, one per line. for line in resmoke_process.stdout.splitlines(): suite = get_suite(line) self.assertTrue( suite, msg=f"find-suites output line does not match suite name format: '{line}'", )
python
github
https://github.com/mongodb/mongo
buildscripts/tests/resmoke_validation/test_find_suites.py
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: nxos_system extends_documentation_fragment: nxos version_added: "2.3" author: "Peter Sprygada (@privateip)" short_description: Manage the system attributes on Cisco NXOS devices description: - This module provides declarative management of node system attributes on Cisco NXOS devices. It provides an option to configure host system parameters or remove those parameters from the device active configuration. options: hostname: description: - Configure the device hostname parameter. This option takes an ASCII string value. domain_name: description: - Configures the default domain name suffix to be used when referencing this node by its FQDN. This argument accepts either a list of domain names or a list of dicts that configure the domain name and VRF name. See examples. domain_lookup: description: - Enables or disables the DNS lookup feature in Cisco NXOS. This argument accepts boolean values. When enabled, the system will try to resolve hostnames using DNS and when disabled, hostnames will not be resolved. domain_search: description: - Configures a list of domain name suffixes to search when performing DNS name resolution. This argument accepts either a list of domain names or a list of dicts that configure the domain name and VRF name. See examples. name_servers: description: - List of DNS name servers by IP address to use to perform name resolution lookups. This argument accepts either a list of DNS servers or a list of hashes that configure the name server and VRF name. See examples. system_mtu: description: - Specifies the mtu, must be an integer. state: description: - State of the configuration values in the device's current active configuration. When set to I(present), the values should be configured in the device active configuration and when set to I(absent) the values should not be in the device active configuration default: present choices: ['present', 'absent'] """ EXAMPLES = """ - name: configure hostname and domain-name nxos_system: hostname: nxos01 domain_name: test.example.com - name: remove configuration nxos_system: state: absent - name: configure name servers nxos_system: name_servers: - 8.8.8.8 - 8.8.4.4 - name: configure name servers with VRF support nxos_system: name_servers: - { server: 8.8.8.8, vrf: mgmt } - { server: 8.8.4.4, vrf: mgmt } """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always type: list sample: - hostname nxos01 - ip domain-name test.example.com """ import re from ansible.module_utils.network.nxos.nxos import get_config, load_config from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems from ansible.module_utils.network.common.config import NetworkConfig from ansible.module_utils.network.common.utils import ComplexList _CONFIGURED_VRFS = None def has_vrf(module, vrf): global _CONFIGURED_VRFS if _CONFIGURED_VRFS is not None: return vrf in _CONFIGURED_VRFS config = get_config(module) _CONFIGURED_VRFS = re.findall(r'vrf context (\S+)', config) return vrf in _CONFIGURED_VRFS def map_obj_to_commands(want, have, module): commands = list() state = module.params['state'] def needs_update(x): return want.get(x) and (want.get(x) != have.get(x)) def difference(x, y, z): return [item for item in x[z] if item not in y[z]] def remove(cmd, commands, vrf=None): if vrf: commands.append('vrf context %s' % vrf) commands.append(cmd) if vrf: commands.append('exit') def add(cmd, commands, vrf=None): if vrf: if not has_vrf(module, vrf): module.fail_json(msg='invalid vrf name %s' % vrf) return remove(cmd, commands, vrf) if state == 'absent': if have['hostname']: commands.append('no hostname') for item in have['domain_name']: cmd = 'no ip domain-name %s' % item['name'] remove(cmd, commands, item['vrf']) for item in have['domain_search']: cmd = 'no ip domain-list %s' % item['name'] remove(cmd, commands, item['vrf']) for item in have['name_servers']: cmd = 'no ip name-server %s' % item['server'] remove(cmd, commands, item['vrf']) if have['system_mtu']: commands.append('no system jumbomtu') if state == 'present': if needs_update('hostname'): commands.append('hostname %s' % want['hostname']) if needs_update('domain_lookup'): cmd = 'ip domain-lookup' if want['domain_lookup'] is False: cmd = 'no %s' % cmd commands.append(cmd) if want['domain_name']: for item in difference(have, want, 'domain_name'): cmd = 'no ip domain-name %s' % item['name'] remove(cmd, commands, item['vrf']) for item in difference(want, have, 'domain_name'): cmd = 'ip domain-name %s' % item['name'] add(cmd, commands, item['vrf']) if want['domain_search']: for item in difference(have, want, 'domain_search'): cmd = 'no ip domain-list %s' % item['name'] remove(cmd, commands, item['vrf']) for item in difference(want, have, 'domain_search'): cmd = 'ip domain-list %s' % item['name'] add(cmd, commands, item['vrf']) if want['name_servers']: for item in difference(have, want, 'name_servers'): cmd = 'no ip name-server %s' % item['server'] remove(cmd, commands, item['vrf']) for item in difference(want, have, 'name_servers'): cmd = 'ip name-server %s' % item['server'] add(cmd, commands, item['vrf']) if needs_update('system_mtu'): commands.append('system jumbomtu %s' % want['system_mtu']) return commands def parse_hostname(config): match = re.search(r'^hostname (\S+)', config, re.M) if match: return match.group(1) def parse_domain_name(config, vrf_config): objects = list() regex = re.compile(r'ip domain-name (\S+)') match = regex.search(config, re.M) if match: objects.append({'name': match.group(1), 'vrf': None}) for vrf, cfg in iteritems(vrf_config): match = regex.search(cfg, re.M) if match: objects.append({'name': match.group(1), 'vrf': vrf}) return objects def parse_domain_search(config, vrf_config): objects = list() for item in re.findall(r'^ip domain-list (\S+)', config, re.M): objects.append({'name': item, 'vrf': None}) for vrf, cfg in iteritems(vrf_config): for item in re.findall(r'ip domain-list (\S+)', cfg, re.M): objects.append({'name': item, 'vrf': vrf}) return objects def parse_name_servers(config, vrf_config, vrfs): objects = list() match = re.search('^ip name-server (.+)$', config, re.M) if match: for addr in match.group(1).split(' '): if addr == 'use-vrf' or addr in vrfs: continue objects.append({'server': addr, 'vrf': None}) for vrf, cfg in iteritems(vrf_config): vrf_match = re.search('ip name-server (.+)', cfg, re.M) if vrf_match: for addr in vrf_match.group(1).split(' '): objects.append({'server': addr, 'vrf': vrf}) return objects def parse_system_mtu(config): match = re.search(r'^system jumbomtu (\d+)', config, re.M) if match: return int(match.group(1)) def map_config_to_obj(module): config = get_config(module) configobj = NetworkConfig(indent=2, contents=config) vrf_config = {} vrfs = re.findall(r'^vrf context (\S+)$', config, re.M) for vrf in vrfs: config_data = configobj.get_block_config(path=['vrf context %s' % vrf]) vrf_config[vrf] = config_data return { 'hostname': parse_hostname(config), 'domain_lookup': 'no ip domain-lookup' not in config, 'domain_name': parse_domain_name(config, vrf_config), 'domain_search': parse_domain_search(config, vrf_config), 'name_servers': parse_name_servers(config, vrf_config, vrfs), 'system_mtu': parse_system_mtu(config) } def validate_system_mtu(value, module): if not 1500 <= value <= 9216: module.fail_json(msg='system_mtu must be between 1500 and 9216') def map_params_to_obj(module): obj = { 'hostname': module.params['hostname'], 'domain_lookup': module.params['domain_lookup'], 'system_mtu': module.params['system_mtu'] } domain_name = ComplexList(dict( name=dict(key=True), vrf=dict() ), module) domain_search = ComplexList(dict( name=dict(key=True), vrf=dict() ), module) name_servers = ComplexList(dict( server=dict(key=True), vrf=dict() ), module) for arg, cast in [('domain_name', domain_name), ('domain_search', domain_search), ('name_servers', name_servers)]: if module.params[arg] is not None: obj[arg] = cast(module.params[arg]) else: obj[arg] = None return obj def main(): """ main entry point for module execution """ argument_spec = dict( hostname=dict(), domain_lookup=dict(type='bool'), # { name: <str>, vrf: <str> } domain_name=dict(type='list'), # {name: <str>, vrf: <str> } domain_search=dict(type='list'), # { server: <str>; vrf: <str> } name_servers=dict(type='list'), system_mtu=dict(type='int'), state=dict(default='present', choices=['present', 'absent']) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands(want, have, module) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # Author: OpenDrive Ltda # Copyright (c) 2014 Opendrive Ltda # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsibility of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # guarantees and support are strongly advised to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## import account_invoice
unknown
codeparrot/codeparrot-clean
# sqlite/base.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php r""" .. dialect:: sqlite :name: SQLite .. _sqlite_datetime: Date and Time Types ------------------- SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide out of the box functionality for translating values between Python `datetime` objects and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime` and related types provide date formatting and parsing functionality when SQlite is used. The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`. These types represent dates and times as ISO formatted strings, which also nicely support ordering. There's no reliance on typical "libc" internals for these functions so historical dates are fully supported. Ensuring Text affinity ^^^^^^^^^^^^^^^^^^^^^^ The DDL rendered for these types is the standard ``DATE``, ``TIME`` and ``DATETIME`` indicators. However, custom storage formats can also be applied to these types. When the storage format is detected as containing no alpha characters, the DDL for these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, so that the column continues to have textual affinity. .. seealso:: `Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ - in the SQLite documentation .. _sqlite_autoincrement: SQLite Auto Incrementing Behavior ---------------------------------- Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html Key concepts: * SQLite has an implicit "auto increment" feature that takes place for any non-composite primary-key column that is specifically created using "INTEGER PRIMARY KEY" for the type + primary key. * SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not** equivalent to the implicit autoincrement feature; this keyword is not recommended for general use. SQLAlchemy does not render this keyword unless a special SQLite-specific directive is used (see below). However, it still requires that the column's type is named "INTEGER". Using the AUTOINCREMENT Keyword ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To specifically render the AUTOINCREMENT keyword on the primary key column when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table construct:: Table('sometable', metadata, Column('id', Integer, primary_key=True), sqlite_autoincrement=True) Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SQLite's typing model is based on naming conventions. Among other things, this means that any type name which contains the substring ``"INT"`` will be determined to be of "integer affinity". A type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be of "integer" affinity. However, **the SQLite autoincrement feature, whether implicitly or explicitly enabled, requires that the name of the column's type is exactly the string "INTEGER"**. Therefore, if an application uses a type like :class:`.BigInteger` for a primary key, on SQLite this type will need to be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE TABLE`` statement in order for the autoincrement behavior to be available. One approach to achieve this is to use :class:`.Integer` on SQLite only using :meth:`.TypeEngine.with_variant`:: table = Table( "my_table", metadata, Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True) ) Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name to be ``INTEGER`` when compiled against SQLite:: from sqlalchemy import BigInteger from sqlalchemy.ext.compiler import compiles class SLBigInteger(BigInteger): pass @compiles(SLBigInteger, 'sqlite') def bi_c(element, compiler, **kw): return "INTEGER" @compiles(SLBigInteger) def bi_c(element, compiler, **kw): return compiler.visit_BIGINT(element, **kw) table = Table( "my_table", metadata, Column("id", SLBigInteger(), primary_key=True) ) .. seealso:: :meth:`.TypeEngine.with_variant` :ref:`sqlalchemy.ext.compiler_toplevel` `Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_ .. _sqlite_concurrency: Database Locking Behavior / Concurrency --------------------------------------- SQLite is not designed for a high level of write concurrency. The database itself, being a file, is locked completely during write operations within transactions, meaning exactly one "connection" (in reality a file handle) has exclusive access to the database during this period - all other "connections" will be blocked during this time. The Python DBAPI specification also calls for a connection model that is always in a transaction; there is no ``connection.begin()`` method, only ``connection.commit()`` and ``connection.rollback()``, upon which a new transaction is to be begun immediately. This may seem to imply that the SQLite driver would in theory allow only a single filehandle on a particular database file at any time; however, there are several factors both within SQlite itself as well as within the pysqlite driver which loosen this restriction significantly. However, no matter what locking modes are used, SQLite will still always lock the database file once a transaction is started and DML (e.g. INSERT, UPDATE, DELETE) has at least been emitted, and this will block other transactions at least at the point that they also attempt to emit DML. By default, the length of time on this block is very short before it times out with an error. This behavior becomes more critical when used in conjunction with the SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs within a transaction, and with its autoflush model, may emit DML preceding any SELECT statement. This may lead to a SQLite database that locks more quickly than is expected. The locking mode of SQLite and the pysqlite driver can be manipulated to some degree, however it should be noted that achieving a high degree of write-concurrency with SQLite is a losing battle. For more information on SQLite's lack of write concurrency by design, please see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_ near the bottom of the page. The following subsections introduce areas that are impacted by SQLite's file-based architecture and additionally will usually require workarounds to work when using the pysqlite driver. .. _sqlite_isolation_level: Transaction Isolation Level ---------------------------- SQLite supports "transaction isolation" in a non-standard way, along two axes. One is that of the `PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_ instruction. This setting can essentially switch SQLite between its default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation mode normally referred to as ``READ UNCOMMITTED``. SQLAlchemy ties into this PRAGMA statement using the :paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`. Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"`` and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively. SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by the pysqlite driver's default behavior. The other axis along which SQLite's transactional locking is impacted is via the nature of the ``BEGIN`` statement used. The three varieties are "deferred", "immediate", and "exclusive", as described at `BEGIN TRANSACTION <http://sqlite.org/lang_transaction.html>`_. A straight ``BEGIN`` statement uses the "deferred" mode, where the the database file is not locked until the first read or write operation, and read access remains open to other transactions until the first write operation. But again, it is critical to note that the pysqlite driver interferes with this behavior by *not even emitting BEGIN* until the first write operation. .. warning:: SQLite's transactional scope is impacted by unresolved issues in the pysqlite driver, which defers BEGIN statements to a greater degree than is often feasible. See the section :ref:`pysqlite_serializable` for techniques to work around this behavior. SAVEPOINT Support ---------------------------- SQLite supports SAVEPOINTs, which only function once a transaction is begun. SQLAlchemy's SAVEPOINT support is available using the :meth:`.Connection.begin_nested` method at the Core level, and :meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs won't work at all with pysqlite unless workarounds are taken. .. warning:: SQLite's SAVEPOINT feature is impacted by unresolved issues in the pysqlite driver, which defers BEGIN statements to a greater degree than is often feasible. See the section :ref:`pysqlite_serializable` for techniques to work around this behavior. Transactional DDL ---------------------------- The SQLite database supports transactional :term:`DDL` as well. In this case, the pysqlite driver is not only failing to start transactions, it also is ending any existing transction when DDL is detected, so again, workarounds are required. .. warning:: SQLite's transactional DDL is impacted by unresolved issues in the pysqlite driver, which fails to emit BEGIN and additionally forces a COMMIT to cancel any transaction when DDL is encountered. See the section :ref:`pysqlite_serializable` for techniques to work around this behavior. .. _sqlite_foreign_keys: Foreign Key Support ------------------- SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, however by default these constraints have no effect on the operation of the table. Constraint checking on SQLite has three prerequisites: * At least version 3.6.19 of SQLite must be in use * The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY or SQLITE_OMIT_TRIGGER symbols enabled. * The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections before use. SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for new connections through the usage of events:: from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() .. warning:: When SQLite foreign keys are enabled, it is **not possible** to emit CREATE or DROP statements for tables that contain mutually-dependent foreign key constraints; to emit the DDL for these tables requires that ALTER TABLE be used to create or drop these constraints separately, for which SQLite has no support. .. seealso:: `SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ - on the SQLite web site. :ref:`event_toplevel` - SQLAlchemy event API. :ref:`use_alter` - more information on SQLAlchemy's facilities for handling mutually-dependent foreign key constraints. .. _sqlite_type_reflection: Type Reflection --------------- SQLite types are unlike those of most other database backends, in that the string name of the type usually does not correspond to a "type" in a one-to-one fashion. Instead, SQLite links per-column typing behavior to one of five so-called "type affinities" based on a string matching pattern for the type. SQLAlchemy's reflection process, when inspecting types, uses a simple lookup table to link the keywords returned to provided SQLAlchemy types. This lookup table is present within the SQLite dialect as it is for all other dialects. However, the SQLite dialect has a different "fallback" routine for when a particular type name is not located in the lookup map; it instead implements the SQLite "type affinity" scheme located at http://www.sqlite.org/datatype3.html section 2.1. The provided typemap will make direct associations from an exact string name match for the following types: :class:`~.types.BIGINT`, :class:`~.types.BLOB`, :class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`, :class:`~.types.CHAR`, :class:`~.types.DATE`, :class:`~.types.DATETIME`, :class:`~.types.FLOAT`, :class:`~.types.DECIMAL`, :class:`~.types.FLOAT`, :class:`~.types.INTEGER`, :class:`~.types.INTEGER`, :class:`~.types.NUMERIC`, :class:`~.types.REAL`, :class:`~.types.SMALLINT`, :class:`~.types.TEXT`, :class:`~.types.TIME`, :class:`~.types.TIMESTAMP`, :class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`, :class:`~.types.NCHAR` When a type name does not match one of the above types, the "type affinity" lookup is used instead: * :class:`~.types.INTEGER` is returned if the type name includes the string ``INT`` * :class:`~.types.TEXT` is returned if the type name includes the string ``CHAR``, ``CLOB`` or ``TEXT`` * :class:`~.types.NullType` is returned if the type name includes the string ``BLOB`` * :class:`~.types.REAL` is returned if the type name includes the string ``REAL``, ``FLOA`` or ``DOUB``. * Otherwise, the :class:`~.types.NUMERIC` type is used. .. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting columns. .. _sqlite_partial_index: Partial Indexes --------------- A partial index, e.g. one which uses a WHERE clause, can be specified with the DDL system using the argument ``sqlite_where``:: tbl = Table('testtbl', m, Column('data', Integer)) idx = Index('test_idx1', tbl.c.data, sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10)) The index will be rendered at create time as:: CREATE INDEX test_idx1 ON testtbl (data) WHERE data > 5 AND data < 10 .. versionadded:: 0.9.9 .. _sqlite_dotted_column_names: Dotted Column Names ------------------- Using table or column names that explicitly have periods in them is **not recommended**. While this is generally a bad idea for relational databases in general, as the dot is a syntactically significant character, the SQLite driver up until version **3.10.0** of SQLite has a bug which requires that SQLAlchemy filter out these dots in result sets. .. versionchanged:: 1.1 The following SQLite issue has been resolved as of version 3.10.0 of SQLite. SQLAlchemy as of **1.1** automatically disables its internal workarounds based on detection of this version. The bug, entirely outside of SQLAlchemy, can be illustrated thusly:: import sqlite3 assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version" conn = sqlite3.connect(":memory:") cursor = conn.cursor() cursor.execute("create table x (a integer, b integer)") cursor.execute("insert into x (a, b) values (1, 1)") cursor.execute("insert into x (a, b) values (2, 2)") cursor.execute("select x.a, x.b from x") assert [c[0] for c in cursor.description] == ['a', 'b'] cursor.execute(''' select x.a, x.b from x where a=1 union select x.a, x.b from x where a=2 ''') assert [c[0] for c in cursor.description] == ['a', 'b'], \ [c[0] for c in cursor.description] The second assertion fails:: Traceback (most recent call last): File "test.py", line 19, in <module> [c[0] for c in cursor.description] AssertionError: ['x.a', 'x.b'] Where above, the driver incorrectly reports the names of the columns including the name of the table, which is entirely inconsistent vs. when the UNION is not present. SQLAlchemy relies upon column names being predictable in how they match to the original statement, so the SQLAlchemy dialect has no choice but to filter these out:: from sqlalchemy import create_engine eng = create_engine("sqlite://") conn = eng.connect() conn.execute("create table x (a integer, b integer)") conn.execute("insert into x (a, b) values (1, 1)") conn.execute("insert into x (a, b) values (2, 2)") result = conn.execute("select x.a, x.b from x") assert result.keys() == ["a", "b"] result = conn.execute(''' select x.a, x.b from x where a=1 union select x.a, x.b from x where a=2 ''') assert result.keys() == ["a", "b"] Note that above, even though SQLAlchemy filters out the dots, *both names are still addressable*:: >>> row = result.first() >>> row["a"] 1 >>> row["x.a"] 1 >>> row["b"] 1 >>> row["x.b"] 1 Therefore, the workaround applied by SQLAlchemy only impacts :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API. In the very specific case where an application is forced to use column names that contain dots, and the functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` is required to return these dotted names unmodified, the ``sqlite_raw_colnames`` execution option may be provided, either on a per-:class:`.Connection` basis:: result = conn.execution_options(sqlite_raw_colnames=True).execute(''' select x.a, x.b from x where a=1 union select x.a, x.b from x where a=2 ''') assert result.keys() == ["x.a", "x.b"] or on a per-:class:`.Engine` basis:: engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True}) When using the per-:class:`.Engine` execution option, note that **Core and ORM queries that use UNION may not function properly**. """ import datetime import re from ... import processors from ... import sql, exc from ... import types as sqltypes, schema as sa_schema from ... import util from ...engine import default, reflection from ...sql import compiler from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT, INTEGER, REAL, NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR) class _DateTimeMixin(object): _reg = None _storage_format = None def __init__(self, storage_format=None, regexp=None, **kw): super(_DateTimeMixin, self).__init__(**kw) if regexp is not None: self._reg = re.compile(regexp) if storage_format is not None: self._storage_format = storage_format @property def format_is_text_affinity(self): """return True if the storage format will automatically imply a TEXT affinity. If the storage format contains no non-numeric characters, it will imply a NUMERIC storage format on SQLite; in this case, the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, TIME_CHAR. .. versionadded:: 1.0.0 """ spec = self._storage_format % { "year": 0, "month": 0, "day": 0, "hour": 0, "minute": 0, "second": 0, "microsecond": 0 } return bool(re.search(r'[^0-9]', spec)) def adapt(self, cls, **kw): if issubclass(cls, _DateTimeMixin): if self._storage_format: kw["storage_format"] = self._storage_format if self._reg: kw["regexp"] = self._reg return super(_DateTimeMixin, self).adapt(cls, **kw) def literal_processor(self, dialect): bp = self.bind_processor(dialect) def process(value): return "'%s'" % bp(value) return process class DATETIME(_DateTimeMixin, sqltypes.DateTime): r"""Represent a Python datetime object in SQLite using a string. The default string storage format is:: "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\ %(second)02d.%(microsecond)06d" e.g.:: 2011-03-15 12:05:57.10558 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import DATETIME dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d " "%(hour)02d:%(min)02d:%(second)02d", regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" ) :param storage_format: format string which will be applied to the dict with keys year, month, day, hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python datetime() constructor as keyword arguments. Otherwise, if positional groups are used, the datetime() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = ( "%(year)04d-%(month)02d-%(day)02d " "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" ) def __init__(self, *args, **kwargs): truncate_microseconds = kwargs.pop('truncate_microseconds', False) super(DATETIME, self).__init__(*args, **kwargs) if truncate_microseconds: assert 'storage_format' not in kwargs, "You can specify only "\ "one of truncate_microseconds or storage_format." assert 'regexp' not in kwargs, "You can specify only one of "\ "truncate_microseconds or regexp." self._storage_format = ( "%(year)04d-%(month)02d-%(day)02d " "%(hour)02d:%(minute)02d:%(second)02d" ) def bind_processor(self, dialect): datetime_datetime = datetime.datetime datetime_date = datetime.date format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_datetime): return format % { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, } elif isinstance(value, datetime_date): return format % { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0, } else: raise TypeError("SQLite DateTime type only accepts Python " "datetime and date objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.datetime) else: return processors.str_to_datetime class DATE(_DateTimeMixin, sqltypes.Date): r"""Represent a Python date object in SQLite using a string. The default string storage format is:: "%(year)04d-%(month)02d-%(day)02d" e.g.:: 2011-03-15 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import DATE d = DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)") ) :param storage_format: format string which will be applied to the dict with keys year, month, and day. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python date() constructor as keyword arguments. Otherwise, if positional groups are used, the date() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = "%(year)04d-%(month)02d-%(day)02d" def bind_processor(self, dialect): datetime_date = datetime.date format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_date): return format % { 'year': value.year, 'month': value.month, 'day': value.day, } else: raise TypeError("SQLite Date type only accepts Python " "date objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.date) else: return processors.str_to_date class TIME(_DateTimeMixin, sqltypes.Time): r"""Represent a Python time object in SQLite using a string. The default string storage format is:: "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" e.g.:: 12:05:57.10558 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import TIME t = TIME(storage_format="%(hour)02d-%(minute)02d-" "%(second)02d-%(microsecond)06d", regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") ) :param storage_format: format string which will be applied to the dict with keys hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python time() constructor as keyword arguments. Otherwise, if positional groups are used, the time() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" def __init__(self, *args, **kwargs): truncate_microseconds = kwargs.pop('truncate_microseconds', False) super(TIME, self).__init__(*args, **kwargs) if truncate_microseconds: assert 'storage_format' not in kwargs, "You can specify only "\ "one of truncate_microseconds or storage_format." assert 'regexp' not in kwargs, "You can specify only one of "\ "truncate_microseconds or regexp." self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d" def bind_processor(self, dialect): datetime_time = datetime.time format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_time): return format % { 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, } else: raise TypeError("SQLite Time type only accepts Python " "time objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.time) else: return processors.str_to_time colspecs = { sqltypes.Date: DATE, sqltypes.DateTime: DATETIME, sqltypes.Time: TIME, } ischema_names = { 'BIGINT': sqltypes.BIGINT, 'BLOB': sqltypes.BLOB, 'BOOL': sqltypes.BOOLEAN, 'BOOLEAN': sqltypes.BOOLEAN, 'CHAR': sqltypes.CHAR, 'DATE': sqltypes.DATE, 'DATE_CHAR': sqltypes.DATE, 'DATETIME': sqltypes.DATETIME, 'DATETIME_CHAR': sqltypes.DATETIME, 'DOUBLE': sqltypes.FLOAT, 'DECIMAL': sqltypes.DECIMAL, 'FLOAT': sqltypes.FLOAT, 'INT': sqltypes.INTEGER, 'INTEGER': sqltypes.INTEGER, 'NUMERIC': sqltypes.NUMERIC, 'REAL': sqltypes.REAL, 'SMALLINT': sqltypes.SMALLINT, 'TEXT': sqltypes.TEXT, 'TIME': sqltypes.TIME, 'TIME_CHAR': sqltypes.TIME, 'TIMESTAMP': sqltypes.TIMESTAMP, 'VARCHAR': sqltypes.VARCHAR, 'NVARCHAR': sqltypes.NVARCHAR, 'NCHAR': sqltypes.NCHAR, } class SQLiteCompiler(compiler.SQLCompiler): extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'month': '%m', 'day': '%d', 'year': '%Y', 'second': '%S', 'hour': '%H', 'doy': '%j', 'minute': '%M', 'epoch': '%s', 'dow': '%w', 'week': '%W', }) def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_localtimestamp_func(self, func, **kw): return 'DATETIME(CURRENT_TIMESTAMP, "localtime")' def visit_true(self, expr, **kw): return '1' def visit_false(self, expr, **kw): return '0' def visit_char_length_func(self, fn, **kw): return "length%s" % self.function_argspec(fn) def visit_cast(self, cast, **kwargs): if self.dialect.supports_cast: return super(SQLiteCompiler, self).visit_cast(cast, **kwargs) else: return self.process(cast.clause, **kwargs) def visit_extract(self, extract, **kw): try: return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( self.extract_map[extract.field], self.process(extract.expr, **kw) ) except KeyError: raise exc.CompileError( "%s is not a valid extract argument." % extract.field) def limit_clause(self, select, **kw): text = "" if select._limit_clause is not None: text += "\n LIMIT " + self.process(select._limit_clause, **kw) if select._offset_clause is not None: if select._limit_clause is None: text += "\n LIMIT " + self.process(sql.literal(-1)) text += " OFFSET " + self.process(select._offset_clause, **kw) else: text += " OFFSET " + self.process(sql.literal(0), **kw) return text def for_update_clause(self, select, **kw): # sqlite has no "FOR UPDATE" AFAICT return '' def visit_is_distinct_from_binary(self, binary, operator, **kw): return "%s IS NOT %s" % (self.process(binary.left), self.process(binary.right)) def visit_isnot_distinct_from_binary(self, binary, operator, **kw): return "%s IS %s" % (self.process(binary.left), self.process(binary.right)) class SQLiteDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): coltype = self.dialect.type_compiler.process( column.type, type_expression=column) colspec = self.preparer.format_column(column) + " " + coltype default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" if column.primary_key: if ( column.autoincrement is True and len(column.table.primary_key.columns) != 1 ): raise exc.CompileError( "SQLite does not support autoincrement for " "composite primary keys") if (column.table.dialect_options['sqlite']['autoincrement'] and len(column.table.primary_key.columns) == 1 and issubclass(column.type._type_affinity, sqltypes.Integer) and not column.foreign_keys): colspec += " PRIMARY KEY AUTOINCREMENT" return colspec def visit_primary_key_constraint(self, constraint): # for columns with sqlite_autoincrement=True, # the PRIMARY KEY constraint can only be inline # with the column itself. if len(constraint.columns) == 1: c = list(constraint)[0] if (c.primary_key and c.table.dialect_options['sqlite']['autoincrement'] and issubclass(c.type._type_affinity, sqltypes.Integer) and not c.foreign_keys): return None return super(SQLiteDDLCompiler, self).visit_primary_key_constraint( constraint) def visit_foreign_key_constraint(self, constraint): local_table = constraint.elements[0].parent.table remote_table = constraint.elements[0].column.table if local_table.schema != remote_table.schema: return None else: return super( SQLiteDDLCompiler, self).visit_foreign_key_constraint(constraint) def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table, use_schema=False) def visit_create_index(self, create, include_schema=False, include_table_schema=True): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=True), preparer.format_table(index.table, use_schema=False), ', '.join( self.sql_compiler.process( expr, include_table=False, literal_binds=True) for expr in index.expressions) ) whereclause = index.dialect_options["sqlite"]["where"] if whereclause is not None: where_compiled = self.sql_compiler.process( whereclause, include_table=False, literal_binds=True) text += " WHERE " + where_compiled return text class SQLiteTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_, **kw): return self.visit_BLOB(type_) def visit_DATETIME(self, type_, **kw): if not isinstance(type_, _DateTimeMixin) or \ type_.format_is_text_affinity: return super(SQLiteTypeCompiler, self).visit_DATETIME(type_) else: return "DATETIME_CHAR" def visit_DATE(self, type_, **kw): if not isinstance(type_, _DateTimeMixin) or \ type_.format_is_text_affinity: return super(SQLiteTypeCompiler, self).visit_DATE(type_) else: return "DATE_CHAR" def visit_TIME(self, type_, **kw): if not isinstance(type_, _DateTimeMixin) or \ type_.format_is_text_affinity: return super(SQLiteTypeCompiler, self).visit_TIME(type_) else: return "TIME_CHAR" class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([ 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc', 'attach', 'autoincrement', 'before', 'begin', 'between', 'by', 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit', 'conflict', 'constraint', 'create', 'cross', 'current_date', 'current_time', 'current_timestamp', 'database', 'default', 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct', 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive', 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob', 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index', 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query', 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual', 'when', 'where', ]) def format_index(self, index, use_schema=True, name=None): """Prepare a quoted index and schema name.""" if name is None: name = index.name result = self.quote(name, index.quote) if (not self.omit_schema and use_schema and getattr(index.table, "schema", None)): result = self.quote_schema( index.table.schema, index.table.quote_schema) + "." + result return result class SQLiteExecutionContext(default.DefaultExecutionContext): @util.memoized_property def _preserve_raw_colnames(self): return not self.dialect._broken_dotted_colnames or \ self.execution_options.get("sqlite_raw_colnames", False) def _translate_colname(self, colname): # TODO: detect SQLite version 3.10.0 or greater; # see [ticket:3633] # adjust for dotted column names. SQLite # in the case of UNION may store col names as # "tablename.colname", or if using an attached database, # "database.tablename.colname", in cursor.description if not self._preserve_raw_colnames and "." in colname: return colname.split(".")[-1], colname else: return colname, None class SQLiteDialect(default.DefaultDialect): name = 'sqlite' supports_alter = False supports_unicode_statements = True supports_unicode_binds = True supports_default_values = True supports_empty_insert = False supports_cast = True supports_multivalues_insert = True default_paramstyle = 'qmark' execution_ctx_cls = SQLiteExecutionContext statement_compiler = SQLiteCompiler ddl_compiler = SQLiteDDLCompiler type_compiler = SQLiteTypeCompiler preparer = SQLiteIdentifierPreparer ischema_names = ischema_names colspecs = colspecs isolation_level = None supports_cast = True supports_default_values = True construct_arguments = [ (sa_schema.Table, { "autoincrement": False }), (sa_schema.Index, { "where": None, }), ] _broken_fk_pragma_quotes = False _broken_dotted_colnames = False def __init__(self, isolation_level=None, native_datetime=False, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level # this flag used by pysqlite dialect, and perhaps others in the # future, to indicate the driver is handling date/timestamp # conversions (and perhaps datetime/time as well on some hypothetical # driver ?) self.native_datetime = native_datetime if self.dbapi is not None: self.supports_right_nested_joins = ( self.dbapi.sqlite_version_info >= (3, 7, 16)) self._broken_dotted_colnames = ( self.dbapi.sqlite_version_info < (3, 10, 0) ) self.supports_default_values = ( self.dbapi.sqlite_version_info >= (3, 3, 8)) self.supports_cast = ( self.dbapi.sqlite_version_info >= (3, 2, 3)) self.supports_multivalues_insert = ( # http://www.sqlite.org/releaselog/3_7_11.html self.dbapi.sqlite_version_info >= (3, 7, 11)) # see http://www.sqlalchemy.org/trac/ticket/2568 # as well as http://www.sqlite.org/src/info/600482d161 self._broken_fk_pragma_quotes = ( self.dbapi.sqlite_version_info < (3, 6, 14)) _isolation_lookup = { 'READ UNCOMMITTED': 1, 'SERIALIZABLE': 0, } def set_isolation_level(self, connection, level): try: isolation_level = self._isolation_lookup[level.replace('_', ' ')] except KeyError: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) cursor.close() def get_isolation_level(self, connection): cursor = connection.cursor() cursor.execute('PRAGMA read_uncommitted') res = cursor.fetchone() if res: value = res[0] else: # http://www.sqlite.org/changes.html#version_3_3_3 # "Optional READ UNCOMMITTED isolation (instead of the # default isolation level of SERIALIZABLE) and # table level locking when database connections # share a common cache."" # pre-SQLite 3.3.0 default to 0 value = 0 cursor.close() if value == 0: return "SERIALIZABLE" elif value == 1: return "READ UNCOMMITTED" else: assert False, "Unknown isolation level %s" % value def on_connect(self): if self.isolation_level is not None: def connect(conn): self.set_isolation_level(conn, self.isolation_level) return connect else: return None @reflection.cache def get_schema_names(self, connection, **kw): s = "PRAGMA database_list" dl = connection.execute(s) return [db[1] for db in dl if db[1] != "temp"] @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema else: master = "sqlite_master" s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (master,) rs = connection.execute(s) return [row[0] for row in rs] @reflection.cache def get_temp_table_names(self, connection, **kw): s = "SELECT name FROM sqlite_temp_master "\ "WHERE type='table' ORDER BY name " rs = connection.execute(s) return [row[0] for row in rs] @reflection.cache def get_temp_view_names(self, connection, **kw): s = "SELECT name FROM sqlite_temp_master "\ "WHERE type='view' ORDER BY name " rs = connection.execute(s) return [row[0] for row in rs] def has_table(self, connection, table_name, schema=None): info = self._get_table_pragma( connection, "table_info", table_name, schema=schema) return bool(info) @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema else: master = "sqlite_master" s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (master,) rs = connection.execute(s) return [row[0] for row in rs] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema s = ("SELECT sql FROM %s WHERE name = '%s'" "AND type='view'") % (master, view_name) rs = connection.execute(s) else: try: s = ("SELECT sql FROM " " (SELECT * FROM sqlite_master UNION ALL " " SELECT * FROM sqlite_temp_master) " "WHERE name = '%s' " "AND type='view'") % view_name rs = connection.execute(s) except exc.DBAPIError: s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " "AND type='view'") % view_name rs = connection.execute(s) result = rs.fetchall() if result: return result[0].sql @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): info = self._get_table_pragma( connection, "table_info", table_name, schema=schema) columns = [] for row in info: (name, type_, nullable, default, primary_key) = ( row[1], row[2].upper(), not row[3], row[4], row[5]) columns.append(self._get_column_info(name, type_, nullable, default, primary_key)) return columns def _get_column_info(self, name, type_, nullable, default, primary_key): coltype = self._resolve_type_affinity(type_) if default is not None: default = util.text_type(default) return { 'name': name, 'type': coltype, 'nullable': nullable, 'default': default, 'autoincrement': 'auto', 'primary_key': primary_key, } def _resolve_type_affinity(self, type_): """Return a data type from a reflected column, using affinity tules. SQLite's goal for universal compatibility introduces some complexity during reflection, as a column's defined type might not actually be a type that SQLite understands - or indeed, my not be defined *at all*. Internally, SQLite handles this with a 'data type affinity' for each column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', 'REAL', or 'NONE' (raw bits). The algorithm that determines this is listed in http://www.sqlite.org/datatype3.html section 2.1. This method allows SQLAlchemy to support that algorithm, while still providing access to smarter reflection utilities by regcognizing column definitions that SQLite only supports through affinity (like DATE and DOUBLE). """ match = re.match(r'([\w ]+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = '' args = '' if coltype in self.ischema_names: coltype = self.ischema_names[coltype] elif 'INT' in coltype: coltype = sqltypes.INTEGER elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype: coltype = sqltypes.TEXT elif 'BLOB' in coltype or not coltype: coltype = sqltypes.NullType elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype: coltype = sqltypes.REAL else: coltype = sqltypes.NUMERIC if args is not None: args = re.findall(r'(\d+)', args) try: coltype = coltype(*[int(a) for a in args]) except TypeError: util.warn( "Could not instantiate type %s with " "reflected arguments %s; using no arguments." % (coltype, args)) coltype = coltype() else: coltype = coltype() return coltype @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): constraint_name = None table_data = self._get_table_sql(connection, table_name, schema=schema) if table_data: PK_PATTERN = r'CONSTRAINT (\w+) PRIMARY KEY' result = re.search(PK_PATTERN, table_data, re.I) constraint_name = result.group(1) if result else None cols = self.get_columns(connection, table_name, schema, **kw) pkeys = [] for col in cols: if col['primary_key']: pkeys.append(col['name']) return {'constrained_columns': pkeys, 'name': constraint_name} @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): # sqlite makes this *extremely difficult*. # First, use the pragma to get the actual FKs. pragma_fks = self._get_table_pragma( connection, "foreign_key_list", table_name, schema=schema ) fks = {} for row in pragma_fks: (numerical_id, rtbl, lcol, rcol) = ( row[0], row[2], row[3], row[4]) if rcol is None: rcol = lcol if self._broken_fk_pragma_quotes: rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) if numerical_id in fks: fk = fks[numerical_id] else: fk = fks[numerical_id] = { 'name': None, 'constrained_columns': [], 'referred_schema': schema, 'referred_table': rtbl, 'referred_columns': [], 'options': {} } fks[numerical_id] = fk fk['constrained_columns'].append(lcol) fk['referred_columns'].append(rcol) def fk_sig(constrained_columns, referred_table, referred_columns): return tuple(constrained_columns) + (referred_table,) + \ tuple(referred_columns) # then, parse the actual SQL and attempt to find DDL that matches # the names as well. SQLite saves the DDL in whatever format # it was typed in as, so need to be liberal here. keys_by_signature = dict( ( fk_sig( fk['constrained_columns'], fk['referred_table'], fk['referred_columns']), fk ) for fk in fks.values() ) table_data = self._get_table_sql(connection, table_name, schema=schema) if table_data is None: # system tables, etc. return [] def parse_fks(): FK_PATTERN = ( r'(?:CONSTRAINT (\w+) +)?' r'FOREIGN KEY *\( *(.+?) *\) +' r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *' r'((?:ON (?:DELETE|UPDATE) ' r'(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)' ) for match in re.finditer(FK_PATTERN, table_data, re.I): ( constraint_name, constrained_columns, referred_quoted_name, referred_name, referred_columns, onupdatedelete) = \ match.group(1, 2, 3, 4, 5, 6) constrained_columns = list( self._find_cols_in_sig(constrained_columns)) if not referred_columns: referred_columns = constrained_columns else: referred_columns = list( self._find_cols_in_sig(referred_columns)) referred_name = referred_quoted_name or referred_name options = {} for token in re.split(r" *\bON\b *", onupdatedelete.upper()): if token.startswith("DELETE"): options['ondelete'] = token[6:].strip() elif token.startswith("UPDATE"): options["onupdate"] = token[6:].strip() yield ( constraint_name, constrained_columns, referred_name, referred_columns, options) fkeys = [] for ( constraint_name, constrained_columns, referred_name, referred_columns, options) in parse_fks(): sig = fk_sig( constrained_columns, referred_name, referred_columns) if sig not in keys_by_signature: util.warn( "WARNING: SQL-parsed foreign key constraint " "'%s' could not be located in PRAGMA " "foreign_keys for table %s" % ( sig, table_name )) continue key = keys_by_signature.pop(sig) key['name'] = constraint_name key['options'] = options fkeys.append(key) # assume the remainders are the unnamed, inline constraints, just # use them as is as it's extremely difficult to parse inline # constraints fkeys.extend(keys_by_signature.values()) return fkeys def _find_cols_in_sig(self, sig): for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): yield match.group(1) or match.group(2) @reflection.cache def get_unique_constraints(self, connection, table_name, schema=None, **kw): auto_index_by_sig = {} for idx in self.get_indexes( connection, table_name, schema=schema, include_auto_indexes=True, **kw): if not idx['name'].startswith("sqlite_autoindex"): continue sig = tuple(idx['column_names']) auto_index_by_sig[sig] = idx table_data = self._get_table_sql( connection, table_name, schema=schema, **kw) if not table_data: return [] unique_constraints = [] def parse_uqs(): UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' INLINE_UNIQUE_PATTERN = ( r'(?:(".+?")|([a-z0-9]+)) ' r'+[a-z0-9_ ]+? +UNIQUE') for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): name, cols = match.group(1, 2) yield name, list(self._find_cols_in_sig(cols)) # we need to match inlines as well, as we seek to differentiate # a UNIQUE constraint from a UNIQUE INDEX, even though these # are kind of the same thing :) for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): cols = list( self._find_cols_in_sig(match.group(1) or match.group(2))) yield None, cols for name, cols in parse_uqs(): sig = tuple(cols) if sig in auto_index_by_sig: auto_index_by_sig.pop(sig) parsed_constraint = { 'name': name, 'column_names': cols } unique_constraints.append(parsed_constraint) # NOTE: auto_index_by_sig might not be empty here, # the PRIMARY KEY may have an entry. return unique_constraints @reflection.cache def get_check_constraints(self, connection, table_name, schema=None, **kw): table_data = self._get_table_sql( connection, table_name, schema=schema, **kw) if not table_data: return [] CHECK_PATTERN = ( r'(?:CONSTRAINT (\w+) +)?' r'CHECK *\( *(.+) *\),? *' ) check_constraints = [] # NOTE: we aren't using re.S here because we actually are # taking advantage of each CHECK constraint being all on one # line in the table definition in order to delineate. This # necessarily makes assumptions as to how the CREATE TABLE # was emitted. for match in re.finditer(CHECK_PATTERN, table_data, re.I): check_constraints.append({ 'sqltext': match.group(2), 'name': match.group(1) }) return check_constraints @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): pragma_indexes = self._get_table_pragma( connection, "index_list", table_name, schema=schema) indexes = [] include_auto_indexes = kw.pop('include_auto_indexes', False) for row in pragma_indexes: # ignore implicit primary key index. # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html if (not include_auto_indexes and row[1].startswith('sqlite_autoindex')): continue indexes.append(dict(name=row[1], column_names=[], unique=row[2])) # loop thru unique indexes to get the column names. for idx in indexes: pragma_index = self._get_table_pragma( connection, "index_info", idx['name']) for row in pragma_index: idx['column_names'].append(row[2]) return indexes @reflection.cache def _get_table_sql(self, connection, table_name, schema=None, **kw): try: s = ("SELECT sql FROM " " (SELECT * FROM sqlite_master UNION ALL " " SELECT * FROM sqlite_temp_master) " "WHERE name = '%s' " "AND type = 'table'") % table_name rs = connection.execute(s) except exc.DBAPIError: s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " "AND type = 'table'") % table_name rs = connection.execute(s) return rs.scalar() def _get_table_pragma(self, connection, pragma, table_name, schema=None): quote = self.identifier_preparer.quote_identifier if schema is not None: statement = "PRAGMA %s." % quote(schema) else: statement = "PRAGMA " qtable = quote(table_name) statement = "%s%s(%s)" % (statement, pragma, qtable) cursor = connection.execute(statement) if not cursor._soft_closed: # work around SQLite issue whereby cursor.description # is blank when PRAGMA returns no rows: # http://www.sqlite.org/cvstrac/tktview?tn=1884 result = cursor.fetchall() else: result = [] return result
unknown
codeparrot/codeparrot-clean
use actix_web::{Responder, HttpResponse, App}; use actix_web_codegen::*; #[get("/config")] async fn config() -> impl Responder { HttpResponse::Ok() } #[actix_web::main] async fn main() { let srv = actix_test::start(|| App::new().service(config)); let request = srv.get("/config"); let response = request.send().await.unwrap(); assert!(response.status().is_success()); }
rust
github
https://github.com/actix/actix-web
actix-web-codegen/tests/trybuild/simple.rs
import os import time import pytest from ..base import BaseTopazTest class TestKernel(BaseTopazTest): def test_puts_nil(self, space, capfd): space.execute("puts nil") out, err = capfd.readouterr() assert out == "\n" def test_print(self, space, capfd): space.execute("print 1, 3") out, err = capfd.readouterr() assert out == "13" def test_p(self, space, capfd): space.execute("p 1,2,3") out, err = capfd.readouterr() assert out == "1\n2\n3\n" def test_lambda(self, space): w_res = space.execute(""" l = lambda { |x| 3 } return [l.class, l.lambda?] """) w_cls, w_lambda = space.listview(w_res) assert w_cls is space.w_proc assert w_lambda is space.w_true def test_proc(self, space): w_res = space.execute(""" l = proc { |x| 3 } return [l.class, l.lambda?] """) w_cls, w_lambda = space.listview(w_res) assert w_cls is space.w_proc assert w_lambda is space.w_false def test_singleton_methods(self, space): w_res = space.execute(""" class X end return X.new.singleton_methods """) assert self.unwrap(space, w_res) == [] w_res = space.execute(""" def X.foo end return X.singleton_methods """) assert self.unwrap(space, w_res) == ["foo"] w_res = space.execute(""" class Y < X end return [Y.singleton_methods, Y.singleton_methods(false)] """) assert self.unwrap(space, w_res) == [["foo"], []] def test_raise(self, space): with self.raises(space, "RuntimeError", "foo"): space.execute("raise 'foo'") with self.raises(space, "TypeError", "foo"): space.execute("raise TypeError, 'foo'") with self.raises(space, "TypeError", "foo"): space.execute("fail TypeError, 'foo'") with self.raises(space, "TypeError", "exception class/object expected"): space.execute("fail nil") with self.raises(space, "TypeError", "exception object expected"): space.execute(""" class A def exception(msg=nil) end end raise A.new """) with self.raises(space, "RuntimeError"): space.execute(""" class A def exception(msg=nil); RuntimeError.new(msg); end end raise A.new """) with self.raises(space, "RuntimeError"): space.execute("raise") def test_overriding_raise(self, space): w_res = space.execute(""" class A def raise(*args); args; end def do_raise; raise 'foo'; end end return A.new.do_raise """) assert self.unwrap(space, w_res) == ['foo'] def test_raise_error_subclass(self, space): with self.raises(space, "CustomError", 'foo'): space.execute(""" class CustomError < StandardError; end raise CustomError, 'foo' """) def test_Array(self, space): w_res = space.execute(""" class A def to_ary; ["to_ary"]; end def to_a; ["to_a"]; end end class B def to_a; ["to_a"]; end end return Array(A.new), Array(B.new) """) assert self.unwrap(space, w_res) == [["to_ary"], ["to_a"]] assert self.unwrap(space, space.execute("return Array(1)")) == [1] def test_String(self, space): w_res = space.execute("return [String('hello'), String(4)]") assert self.unwrap(space, w_res) == ["hello", "4"] def test_Integer(self, space): w_res = space.execute("return [Integer(4), Integer('123')]") assert self.unwrap(space, w_res) == [4, 123] def test_exit(self, space): with self.raises(space, "SystemExit"): space.execute("Kernel.exit") with self.raises(space, "SystemExit"): space.execute("exit") def test_block_given_p(self, space): assert space.execute("return block_given?") is space.w_false assert space.execute("return iterator?") is space.w_false assert space.execute("return (proc { block_given? })[]") is space.w_false w_res = space.execute(""" def foo block_given? end return foo, foo { } """) assert self.unwrap(space, w_res) == [False, True] w_res = space.execute(""" def foo bar { block_given? } end def bar yield end return foo, foo { } """) assert self.unwrap(space, w_res) == [False, True] def test_eqlp(self, space): w_res = space.execute(""" x = Object.new return [x.eql?(x), x.eql?(4)] """) assert self.unwrap(space, w_res) == [True, False] def test_eval(self, space): w_res = space.execute(""" a = 4 return eval('a + 2') """) assert space.int_w(w_res) == 6 def test_responds_to(self, space): w_res = space.execute("return [4.respond_to?(:foo_bar), nil.respond_to?(:object_id)]") assert self.unwrap(space, w_res) == [False, True] def test_Float(self, space): assert space.float_w(space.execute("return Float(1)")) == 1.0 assert space.float_w(space.execute("return Float(1.1)")) == 1.1 assert space.float_w(space.execute("return Float('1.1')")) == 1.1 assert space.float_w(space.execute("return Float('1.1e10')")) == 11000000000.0 with self.raises(space, "TypeError"): space.execute("Float(nil)") with self.raises(space, "ArgumentError"): space.execute("Float('a')") w_res = space.execute(""" class A; def to_f; 1.1; end; end return Float(A.new) """) assert space.float_w(w_res) == 1.1 def test_loop(self, space): w_res = space.execute(""" res = [] i = 0 loop { i += 1 res << i break if i == 3 } return res """) assert self.unwrap(space, w_res) == [1, 2, 3] def test_sleep(self, space): now = time.time() w_res = space.execute("return sleep 0.001") assert space.int_w(w_res) == 0 assert time.time() - now >= 0.001 now = time.time() w_res = space.execute("return sleep 0.002") assert space.int_w(w_res) == 0 assert time.time() - now >= 0.002 def test_trust(self, space): w_res = space.execute("return 'a'.untrusted?") assert self.unwrap(space, w_res) is False w_res = space.execute(""" a = 'a' a.untrust return a.untrusted?, a.dup.untrusted?, a.clone.untrusted? """) assert self.unwrap(space, w_res) == [True, True, True] w_res = space.execute(""" a = 'a' a.untrust a.trust return a.untrusted?, a.dup.untrusted?, a.clone.untrusted? """) assert self.unwrap(space, w_res) == [False, False, False] def test_taint(self, space): w_res = space.execute("return 'a'.tainted?") assert self.unwrap(space, w_res) is False w_res = space.execute(""" a = 'a' a.taint return a.tainted?, a.dup.tainted?, a.clone.tainted? """) assert self.unwrap(space, w_res) == [True, True, True] w_res = space.execute(""" a = 'a' a.taint a.untaint return a.tainted?, a.dup.tainted?, a.clone.tainted? """) assert self.unwrap(space, w_res) == [False, False, False] def test_freeze(self, space): w_res = space.execute("return 'a'.frozen?") assert self.unwrap(space, w_res) is False w_res = space.execute(""" a = 'a' a.freeze return a.frozen?, a.dup.frozen?, a.clone.frozen? """) assert self.unwrap(space, w_res) == [True, False, True] def test_backtick(self, space): w_res = space.execute("return `echo 10`") assert self.unwrap(space, w_res) == "10\n" def test_backtick_sets_process_status(self, space): w_res = space.execute(""" $? = nil `echo` return $?.class.name """) assert self.unwrap(space, w_res) == "Process::Status" class TestRequire(BaseTopazTest): def test_simple(self, space, tmpdir): f = tmpdir.join("t.rb") f.write(""" def t(a, b) a - b end """) w_res = space.execute(""" require '%s' return t(5, 10) """ % f) assert space.int_w(w_res) == -5 def test_no_ext(self, space, tmpdir): f = tmpdir.join("t.rb") f.write(""" def t(a, b) a - b end """) w_res = space.execute(""" require '%s' return t(12, 21) """ % str(f)[:-3]) assert space.int_w(w_res) == -9 def test_load_path(self, space, tmpdir): f = tmpdir.join("t.rb") f.write(""" def t(a, b) a - b end """) w_res = space.execute(""" $LOAD_PATH[0..-1] = ['%s'] require 't.rb' return t(2, 5) """ % tmpdir) assert space.int_w(w_res) == -3 def test_stdlib_default_load_path(self, space): w_res = space.execute(""" return require 'prettyprint' """) assert w_res is space.w_true def test_nonexistance(self, space): with self.raises(space, "LoadError"): space.execute("require 'xxxxxxx'") def test_already_loaded(self, space, tmpdir): f = tmpdir.join("f.rb") f.write(""" @a += 1 """) w_res = space.execute(""" @a = 0 require '%s' require '%s' require '%s' return @a """ % (f, f, f)) assert space.int_w(w_res) == 1 def test_load(self, space, tmpdir): f = tmpdir.join("f.rb") f.write(""" @a += 1 """) w_res = space.execute(""" @a = 0 load '%s' load '%s' load '%s' return @a """ % (f, f, f)) assert space.int_w(w_res) == 3 def test_no_ext_on_path(self, space, tmpdir): f = tmpdir.join("t.txt") f.write(""" @a = 5 """) w_res = space.execute(""" require '%s' return @a """ % f) assert space.int_w(w_res) == 5 def test_null_bytes(self, space): with self.raises(space, "ArgumentError", "string contains null byte"): space.execute('require "b\\0"') with self.raises(space, "ArgumentError", "string contains null byte"): space.execute(""" $LOAD_PATH.unshift "\\0" require 'pp' """) def test_load_path_element_coerce(self, space, tmpdir): f = tmpdir.join("t.rb") f.write(""" $success = true """) w_res = space.execute(""" class A def to_path "%s" end end $LOAD_PATH.unshift A.new require 't' return $success """ % tmpdir) assert w_res is space.w_true def test_path_ambigious_directory_file(self, space, tmpdir): f = tmpdir.join("t.rb") f.write(""" $success = true """) tmpdir.join("t").ensure(dir=True) w_res = space.execute(""" $LOAD_PATH << '%s' require '%s' return $success """ % (tmpdir, tmpdir.join("t"))) assert w_res is space.w_true class TestExec(BaseTopazTest): def fork_and_wait(self, space, capfd, code): cpid = os.fork() if cpid == 0: try: space.execute(code) finally: os._exit(0) else: os.waitpid(cpid, 0) out, err = capfd.readouterr() return out def test_exec_with_sh(self, space, capfd): out = self.fork_and_wait(space, capfd, "exec 'echo $0'") assert out == "sh\n" def test_exec_directly(self, space, capfd): out = self.fork_and_wait(space, capfd, "exec '/bin/echo', '$0'") assert out == "$0\n" def test_exec_with_custom_argv0(self, space, capfd): out = self.fork_and_wait(space, capfd, "exec ['/bin/sh', 'argv0'], '-c', 'echo $0'") assert out == "argv0\n" @pytest.mark.xfail def test_exec_with_path_search(self, space, capfd): out = self.fork_and_wait(space, capfd, "exec 'echo', '$0'") assert out == "$0\n" def test_exec_with_null_bytes(self, space): with self.raises(space, "ArgumentError", "string contains null byte"): space.execute('exec "\\0"') with self.raises(space, "ArgumentError", "string contains null byte"): space.execute('exec ["\\0", "none"]') with self.raises(space, "ArgumentError", "string contains null byte"): space.execute('exec ["none", "\\0"]') with self.raises(space, "ArgumentError", "string contains null byte"): space.execute('exec "none", "\\0"') class TestSetTraceFunc(BaseTopazTest): def test_class(self, space): w_res = space.execute(""" output = [] set_trace_func proc { |event, file, line, id, binding, classname| output << [event, file, line, id, classname] } class << self end set_trace_func nil return output """) assert self.unwrap(space, w_res) == [ ["c-return", "-e", 3, "set_trace_func", "Kernel"], ["line", "-e", 7, None, None], ["class", "-e", 7, None, None], ["end", "-e", 7, None, None], ["line", "-e", 10, None, None], ["c-call", "-e", 10, "set_trace_func", "Kernel"] ]
unknown
codeparrot/codeparrot-clean
package daemon import ( "github.com/moby/moby/v2/daemon/container" ) func (daemon *Daemon) saveAppArmorConfig(container *container.Container) error { return nil }
go
github
https://github.com/moby/moby
daemon/container_windows.go
from typing import List, Dict, Any from data_tools.wrappers.users import is_read_permitted, is_write_permitted, get_all_read_permitted_records from data_tools.db_models import Analysis, Collection, User, db from data_tools.util import AuthException def get_analyses(user: User, filter_by: Dict[str, Any] = None) -> List[Analysis]: """ Get all the analyses the user is allowed to view :param user: :param filter_by: :return: """ return get_all_read_permitted_records(user, Analysis, filter_by) def get_analysis(user: User, analysis_id: int) -> Analysis: """ Get analysis information :param user: :param analysis_id: :return: """ analysis = Analysis.query.filter_by(id=analysis_id).first() if is_read_permitted(user, analysis): return analysis raise AuthException(f'User {user.email} is not permitted to access analysis {analysis_id}') def update_analysis(user: User, analysis: Analysis, new_data: Dict[str, Any]) -> Analysis: """ Update the analysis with the data in new_data :param user: :param analysis: :param new_data: :return: """ if is_write_permitted(user, analysis): if 'id' in new_data: if analysis.id != int(new_data['id']) and Analysis.query.filter_by(id=new_data['id']) is not None: raise ValueError(f'Analysis with id {new_data["id"]} already exists!') analysis.update(new_data) analysis.last_editor = user db.session.commit() return analysis raise AuthException(f'User {user.email} is not permitted to modify analysis {analysis.id}') def create_analysis(user: User, data: Dict[str, Any], collections: List[Collection] = None) -> Analysis: """ Create a new analysis with the metdata in data :param user: :param data: :param collections: :return: """ if 'id' in data: # cannot create with designated id del data['id'] analysis = Analysis(creator=user, owner=user, last_editor=user, name=data['name']) if collections is not None: analysis.collections = collections db.session.add(analysis) db.session.commit() update_analysis(user, analysis, data) return analysis def delete_analysis(user: User, analysis: Analysis) -> Dict[str, str]: """ Remove the record associated with this analysis from the database :param user: :param analysis: :return: """ if is_write_permitted(user, analysis): db.session.delete(analysis) db.session.commit() return {'message': f'Analysis {analysis.id} deleted'} raise AuthException(f'User {user.email} is not permitted to modify analysis {analysis.id}') def attach_collection(user: User, analysis: Analysis, collection: Collection) -> Dict[str, Any]: """ Add a collection to the list of collections belonging to an analysis :param user: :param analysis: :param collection: :return: """ # check read permissions on analysis and collection if is_read_permitted(user, collection) and is_write_permitted(user, analysis): if collection not in analysis.collections: analysis.collections.append(collection) db.session.commit() return {'message': f'collection {collection.id} attached to analysis {analysis.id}'} return {'message': f'Collection {collection.id} already attached to analysis {analysis.id}'} raise AuthException(f'User {user.email} is not permitted to attach collection {collection.id} ' f'to analysis {analysis.id}') def detach_collection(user: User, analysis: Analysis, collection: Collection) -> Dict[str, Any]: """ Remove a collection from the list of collections belonging to an analysis :param user: :param analysis: :param collection: :return: """ if is_write_permitted(user, analysis): analysis.collections.remove(collection) db.session.commit() return {'message': f'collection {collection.id} detached from analysis {analysis.id}'} raise AuthException(f'User {user.email} is not permitted to modify analysis {analysis.id}') def get_attached_collections(user: User, analysis: Analysis) -> List[Collection]: """ Get all collections which belong to an analysis :param user: :param analysis: :return: """ if is_read_permitted(user, analysis): return analysis.collections raise AuthException(f'User {user.email} is not permitted to access analysis {analysis.id}') def get_attached_analyses(user: User, collection: Collection) -> List[Analysis]: """ Get all analysis that a collection belongs to :param user: :param collection: :return: """ if is_read_permitted(user, collection): return get_all_read_permitted_records(user, collection.analyses) raise AuthException(f'User {user.email} not permitted to access collection {collection.id}')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<i@binux.me> # http://binux.me # Created on 2012-11-14 17:09:50 from __future__ import unicode_literals, division, absolute_import import time import logging from collections import deque try: from UserDict import DictMixin except ImportError: from collections import Mapping as DictMixin import six from six import iteritems from six.moves import cPickle class BaseCounter(object): def __init__(self): raise NotImplementedError def event(self, value=1): """Fire a event.""" raise NotImplementedError def value(self, value): """Set counter value.""" raise NotImplementedError @property def avg(self): """Get average value""" raise NotImplementedError @property def sum(self): """Get sum of counter""" raise NotImplementedError def empty(self): """Clear counter""" raise NotImplementedError class TotalCounter(BaseCounter): """Total counter""" def __init__(self): self.cnt = 0 def event(self, value=1): self.cnt += value def value(self, value): self.cnt = value @property def avg(self): return self.cnt @property def sum(self): return self.cnt def empty(self): return self.cnt == 0 class AverageWindowCounter(BaseCounter): """ Record last N(window) value """ def __init__(self, window_size=300): self.window_size = window_size self.values = deque(maxlen=window_size) def event(self, value=1): self.values.append(value) value = event @property def avg(self): return self.sum / len(self.values) @property def sum(self): return sum(self.values) def empty(self): if not self.values: return True class TimebaseAverageEventCounter(BaseCounter): """ Record last window_size * window_interval seconds event. records will trim ever window_interval seconds """ def __init__(self, window_size=30, window_interval=10): self.max_window_size = window_size self.window_size = 0 self.window_interval = window_interval self.values = deque(maxlen=window_size) self.events = deque(maxlen=window_size) self.times = deque(maxlen=window_size) self.cache_value = 0 self.cache_event = 0 self.cache_start = None self._first_data_time = None def event(self, value=1): now = time.time() if self._first_data_time is None: self._first_data_time = now if self.cache_start is None: self.cache_value = value self.cache_event = 1 self.cache_start = now elif now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.events.append(self.cache_event) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = value self.cache_event = 1 self.cache_start = now else: self.cache_value += value self.cache_event += 1 return self def value(self, value): self.cache_value = value def _trim_window(self): now = time.time() if self.cache_start and now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.events.append(self.cache_event) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = 0 self.cache_start = None if self.window_size != self.max_window_size and self._first_data_time is not None: time_passed = now - self._first_data_time self.window_size = min(self.max_window_size, time_passed / self.window_interval) window_limit = now - self.window_size * self.window_interval while self.times and self.times[0] < window_limit: self.times.popleft() self.events.popleft() self.values.popleft() @property def avg(self): events = (sum(self.events) + self.cache_event) if not events: return 0 return float(self.sum) / events @property def sum(self): self._trim_window() return sum(self.values) + self.cache_value def empty(self): self._trim_window() if not self.values and not self.cache_start: return True def on_append(self, value, time): pass class TimebaseAverageWindowCounter(BaseCounter): """ Record last window_size * window_interval seconds values. records will trim ever window_interval seconds """ def __init__(self, window_size=30, window_interval=10): self.max_window_size = window_size self.window_size = 0 self.window_interval = window_interval self.values = deque(maxlen=window_size) self.times = deque(maxlen=window_size) self.cache_value = 0 self.cache_start = None self._first_data_time = None def event(self, value=1): now = time.time() if self._first_data_time is None: self._first_data_time = now if self.cache_start is None: self.cache_value = value self.cache_start = now elif now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = value self.cache_start = now else: self.cache_value += value return self def value(self, value): self.cache_value = value def _trim_window(self): now = time.time() if self.cache_start and now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = 0 self.cache_start = None if self.window_size != self.max_window_size and self._first_data_time is not None: time_passed = now - self._first_data_time self.window_size = min(self.max_window_size, time_passed / self.window_interval) window_limit = now - self.window_size * self.window_interval while self.times and self.times[0] < window_limit: self.times.popleft() self.values.popleft() @property def avg(self): sum = float(self.sum) if not self.window_size: return 0 return sum / self.window_size / self.window_interval @property def sum(self): self._trim_window() return sum(self.values) + self.cache_value def empty(self): self._trim_window() if not self.values and not self.cache_start: return True def on_append(self, value, time): pass class CounterValue(DictMixin): """ A dict like value item for CounterManager. """ def __init__(self, manager, keys): self.manager = manager self._keys = keys def __getitem__(self, key): if key == '__value__': key = self._keys return self.manager.counters[key] else: key = self._keys + (key, ) available_keys = [] for _key in self.manager.counters: if _key[:len(key)] == key: available_keys.append(_key) if len(available_keys) == 0: raise KeyError elif len(available_keys) == 1: if available_keys[0] == key: return self.manager.counters[key] else: return CounterValue(self.manager, key) else: return CounterValue(self.manager, key) def __len__(self): return len(self.keys()) def __iter__(self): return iter(self.keys()) def __contains__(self, key): return key in self.keys() def keys(self): result = set() for key in self.manager.counters: if key[:len(self._keys)] == self._keys: key = key[len(self._keys):] result.add(key[0] if key else '__value__') return result def to_dict(self, get_value=None): """Dump counters as a dict""" result = {} for key, value in iteritems(self): if isinstance(value, BaseCounter): if get_value is not None: value = getattr(value, get_value) result[key] = value else: result[key] = value.to_dict(get_value) return result class CounterManager(DictMixin): """ A dict like counter manager. When using a tuple as event key, say: ('foo', 'bar'), You can visite counter with manager['foo']['bar']. Or get all counters which first element is 'foo' by manager['foo']. It's useful for a group of counters. """ def __init__(self, cls=TimebaseAverageWindowCounter): """init manager with Counter cls""" self.cls = cls self.counters = {} def event(self, key, value=1): """Fire a event of a counter by counter key""" if isinstance(key, six.string_types): key = (key, ) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].event(value) return self def value(self, key, value=1): """Set value of a counter by counter key""" if isinstance(key, six.string_types): key = (key, ) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].value(value) return self def trim(self): """Clear not used counters""" for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key] def __getitem__(self, key): key = (key, ) available_keys = [] for _key in self.counters: if _key[:len(key)] == key: available_keys.append(_key) if len(available_keys) == 0: raise KeyError elif len(available_keys) == 1: if available_keys[0] == key: return self.counters[key] else: return CounterValue(self, key) else: return CounterValue(self, key) def __iter__(self): return iter(self.keys()) def __len__(self): return len(self.keys()) def keys(self): result = set() for key in self.counters: result.add(key[0] if key else ()) return result def to_dict(self, get_value=None): """Dump counters as a dict""" self.trim() result = {} for key, value in iteritems(self): if isinstance(value, BaseCounter): if get_value is not None: value = getattr(value, get_value) result[key] = value else: result[key] = value.to_dict(get_value) return result def dump(self, filename): """Dump counters to file""" try: with open(filename, 'wb') as fp: cPickle.dump(self.counters, fp) except: logging.error("can't dump counter to file: %s" % filename) return False return True def load(self, filename): """Load counters to file""" try: with open(filename) as fp: self.counters = cPickle.load(fp) except: logging.debug("can't load counter from file: %s" % filename) return False return True
unknown
codeparrot/codeparrot-clean
import sys from unittest import TestCase from django.core.exceptions import ImproperlyConfigured from embed_video.utils import import_by_path class ModuleImportTestCase(TestCase): """ Taken from Django: https://github.com/django/django/blob/master/tests/utils_tests/test_module_loading.py """ def test_incorrect_path(self): self.assertRaises(ImproperlyConfigured, import_by_path, 'wrongpath') def test_incorrect_classname(self): self.assertRaises(ImproperlyConfigured, import_by_path, 'embed_video.foo') def test_import_by_path(self): cls = import_by_path( 'embed_video.utils.import_by_path') self.assertEqual(cls, import_by_path) # Test exceptions raised for path in ('no_dots_in_path', 'unexistent.path', 'utils_tests.unexistent'): self.assertRaises(ImproperlyConfigured, import_by_path, path) with self.assertRaises(ImproperlyConfigured) as cm: import_by_path('unexistent.module.path', error_prefix="Foo") self.assertTrue(str(cm.exception).startswith('Foo'))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python #--------------------------------------------------------------------------- ## Copyright (C) 2010- Alexey Petrov ## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR) ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## ## See http://sourceforge.net/projects/pythonflu ## ## Author : Ivor CLIFFORD ## #--------------------------------------------------------------------------- """ Example icoFoam PISO algorithm """ from salome_version import getVersion as SalomeVersion if SalomeVersion() > '5.1.4': import os print "Not supported Salome version. Use Salome 5.1.4 or 5.1.3" os._exit( os.EX_OK ) pass from Foam import ref, man from Foam import fvm, fvc from Tkinter import * import Pmw class pyIcoFoam: def __init__(self, runTime, U, p, phi, transportProperties, pRefCell=0, pRefValue=0.0): self.runTime = runTime self.U = U self.p = p self.phi = phi self.transportProperties = transportProperties self.pRefCell=pRefCell self.pRefValue=pRefValue self.pressureRes = 0.0 self.velocityRes = 0.0 def step(self, nCorr=1, nNonOrthCorr=1): U_ = self.U p_ = self.p phi_ = self.phi runTime_ = self.runTime mesh_ = U_.mesh() runTime_.increment() # Read transport properties nu = ref.dimensionedScalar(self.transportProperties.lookup(ref.word("nu"))) tmp_UEqn = ( ref.fvm.ddt( U_ ) + ref.fvm.div( phi_, U_ ) - ref.fvm.laplacian( nu, U_ ) ) UEqn = tmp_UEqn() self.velocityRes = ref.solve( UEqn == -ref.fvc.grad( p_ ) ).initialResidual() # --- PISO loop for corr in range(nCorr): tmp_rUA = 1.0 / UEqn.A() rUA = tmp_rUA() U_ << rUA * UEqn.H() phi_ << ( ref.fvc.interpolate(U_) & mesh_.Sf() ) for nonOrth in range(nNonOrthCorr): tmp_pEqn = ( ref.fvm.laplacian( rUA, p_ ) == ref.fvc.div( phi_ ) ) pEqn = tmp_pEqn() pEqn.setReference( self.pRefCell, self.pRefValue ) pressureRes = pEqn.solve().initialResidual() if nonOrth == 0: self.pressureRes = pressureRes if nonOrth == nNonOrthCorr: phi_ -= pEqn.flux() # Continuity errors tmp_contErr = ref.fvc.div( phi_ ); contErr = tmp_contErr() sumLocalContErr = ( runTime_.deltaT().value() * contErr.mag().weightedAverage( mesh_.V() ).value() ) globalContErr = ( runTime_.deltaT().value() * contErr.weightedAverage( mesh_.V() ).value() ) print "time step continuity errors : sum local = " + str(sumLocalContErr) + ", global = " + str(globalContErr) # Correct velocity U_-= rUA * ref.fvc.grad( p_ ) U_.correctBoundaryConditions() # Create root and case import os root = ref.fileName( os.path.join( os.environ[ "HYBRIDFLU_ROOT_DIR" ], 'hybridFlu', 'examples' ) ) case = ref.fileName( "case_icoFoam_piso" ) # Create time runTime = man.Time(ref.word("controlDict"), root, case) runTime.controlDict().remove(ref.word("startTime")) runTime.controlDict().remove(ref.word("endTime")) runTime.controlDict().remove(ref.word("deltaT")) runTime.controlDict().add(ref.word("startTime"), 0) runTime.controlDict().add(ref.word("endTime"), 0.5) runTime.controlDict().add(ref.word("deltaT"), 0.005) runTime.read() # Create mesh mesh = man.fvMesh( man.IOobject( ref.word("region0"), ref.fileName(runTime.timeName()), runTime, ref.IOobject.MUST_READ, ref.IOobject.NO_WRITE)) # Create transport properties transportProperties = ref.IOdictionary(ref.IOobject( ref.word("transportProperties"), ref.fileName(runTime.constant()), mesh, ref.IOobject.MUST_READ, ref.IOobject.AUTO_WRITE)) nu = ref.dimensionedScalar(transportProperties.lookup(ref.word("nu"))) nu.setValue(0.05) # Create pressure field: read p = man.volScalarField( man.IOobject( ref.word("p"), ref.fileName(runTime.timeName()), mesh, ref.IOobject.MUST_READ, ref.IOobject.AUTO_WRITE ), mesh ) # Create velocity field: read U = man.volVectorField( man.IOobject( ref.word("U"), ref.fileName(runTime.timeName()), mesh, ref.IOobject.MUST_READ, ref.IOobject.AUTO_WRITE ), mesh) phi = ref.createPhi( runTime, mesh, U ) print "Time: " + str(runTime.timeName()) solver = pyIcoFoam(runTime, U, p, phi, transportProperties, 0, 0.0) pRes = [] #initial pressure residual uRes = [] #initial velocity residual it = [] iteration = [] iteration.append(0) # Graphics related stuff master = Tk() g = Pmw.Blt.Graph(master) g.pack(expand=1,fill='both') # Graph related commands: g.line_create("p-Residual", xdata=iteration[0], ydata=None) g.element_configure("p-Residual", color = "red", dashes = 1, symbol = "", linewidth = 1) g.line_create("u-Residual", xdata=iteration[0], ydata=None) g.element_configure("u-Residual", color = "blue", dashes = 1, symbol = "", linewidth = 1) g.axis_configure("y",logscale = 1) # Main iterate function def iterate(niter): for i in xrange(niter): i += iteration[0] it.append(i) solver.step(2,1) runTime.value() pRes.append(solver.pressureRes) uRes.append(solver.velocityRes) pResTpl = tuple(pRes) uResTpl = tuple(uRes) # Update residual plot g.axis_configure("y",logscale = 1) g.element_configure("p-Residual", xdata=tuple(it), ydata = pResTpl, color = "red", dashes = 0, symbol = "", linewidth = 1) g.element_configure("u-Residual", xdata=tuple(it), ydata = uResTpl, color = "blue", dashes = 0, symbol = "", linewidth = 1) master.update_idletasks() iteration[0] += niter iterate(10) runTime.writeNow() #--------------------------------------------------------------------------------------
unknown
codeparrot/codeparrot-clean
""" Tests the usecols functionality during parsing for all of the parsers defined in parsers.py """ from io import StringIO import pytest from pandas import ( DataFrame, Index, Timestamp, ) import pandas._testing as tm pytestmark = pytest.mark.filterwarnings( "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") _msg_pyarrow_requires_names = ( "The pyarrow engine does not allow 'usecols' to be integer column " "positions. Pass a list of string column names instead." ) @skip_pyarrow # pyarrow.lib.ArrowKeyError: Column 'fdate' in include_columns def test_usecols_with_parse_dates2(all_parsers): # see gh-13604 parser = all_parsers data = """2008-02-07 09:40,1032.43 2008-02-07 09:50,1042.54 2008-02-07 10:00,1051.65""" names = ["date", "values"] usecols = names[:] parse_dates = [0] index = Index( [ Timestamp("2008-02-07 09:40"), Timestamp("2008-02-07 09:50"), Timestamp("2008-02-07 10:00"), ], name="date", ) cols = {"values": [1032.43, 1042.54, 1051.65]} expected = DataFrame(cols, index=index) result = parser.read_csv( StringIO(data), parse_dates=parse_dates, index_col=0, usecols=usecols, header=None, names=names, ) tm.assert_frame_equal(result, expected) def test_usecols_with_parse_dates3(all_parsers): # see gh-14792 parser = all_parsers data = """a,b,c,d,e,f,g,h,i,j 2016/09/21,1,1,2,3,4,5,6,7,8""" usecols = list("abcdefghij") parse_dates = [0] cols = { "a": Timestamp("2016-09-21"), "b": [1], "c": [1], "d": [2], "e": [3], "f": [4], "g": [5], "h": [6], "i": [7], "j": [8], } expected = DataFrame(cols, columns=usecols) result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates) tm.assert_frame_equal(result, expected)
python
github
https://github.com/pandas-dev/pandas
pandas/tests/io/parser/usecols/test_parse_dates.py
import os import platform from twisted.internet import defer from .. import data, helper from p2pool.util import pack P2P_PREFIX = 'fbc0b6db'.decode('hex') P2P_PORT = 9333 ADDRESS_VERSION = 48 RPC_PORT = 9332 RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue( 'litecoinaddress' in (yield bitcoind.rpc_help()) and not (yield bitcoind.rpc_getinfo())['testnet'] )) SUBSIDY_FUNC = lambda height: 50*100000000 >> (height + 1)//840000 POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data)) BLOCK_PERIOD = 150 # s SYMBOL = 'LTC' CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Litecoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Litecoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.litecoin'), 'litecoin.conf') BLOCK_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/block/' ADDRESS_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/address/' TX_EXPLORER_URL_PREFIX = 'http://explorer.litecoin.net/tx/' SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1) DUMB_SCRYPT_DIFF = 2**16 DUST_THRESHOLD = 0.03e8
unknown
codeparrot/codeparrot-clean
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ from __future__ import division, absolute_import, print_function __author__ = "Pierre GF Gerard-Marchant" import warnings import pickle import operator import itertools from functools import reduce import numpy as np import numpy.ma.core import numpy.core.fromnumeric as fromnumeric import numpy.core.umath as umath from numpy.testing import TestCase, run_module_suite, assert_raises from numpy import ndarray from numpy.compat import asbytes, asbytes_nested from numpy.ma.testutils import ( assert_, assert_array_equal, assert_equal, assert_almost_equal, assert_equal_records, fail_if_equal, assert_not_equal, assert_mask_equal, ) from numpy.ma.core import ( MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2, arcsin, arctan, argsort, array, asarray, choose, concatenate, conjugate, cos, cosh, count, default_fill_value, diag, divide, empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, flatten_structured_array, fromflex, getmask, getmaskarray, greater, greater_equal, identity, inner, isMaskedArray, less, less_equal, log, log10, make_mask, make_mask_descr, mask_or, masked, masked_array, masked_equal, masked_greater, masked_greater_equal, masked_inside, masked_less, masked_less_equal, masked_not_equal, masked_outside, masked_print_option, masked_values, masked_where, max, maximum, maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, mvoid, nomask, not_equal, ones, outer, power, product, put, putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, ) pi = np.pi class TestMaskedArray(TestCase): # Base test class for MaskedArrays. def setUp(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) def test_basicattributes(self): # Tests some basic array attributes. a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a.ndim, 1) assert_equal(b.ndim, 1) assert_equal(a.size, 3) assert_equal(b.size, 3) assert_equal(a.shape, (3,)) assert_equal(b.shape, (3,)) def test_basic0d(self): # Checks masking a scalar x = masked_array(0) assert_equal(str(x), '0') x = masked_array(0, mask=True) assert_equal(str(x), str(masked_print_option)) x = masked_array(0, mask=False) assert_equal(str(x), '0') x = array(0, mask=1) self.assertTrue(x.filled().dtype is x._data.dtype) def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d self.assertTrue(not isMaskedArray(x)) self.assertTrue(isMaskedArray(xm)) self.assertTrue((xm - ym).filled(0).any()) fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s self.assertTrue(not isMaskedArray(x)) self.assertTrue(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) def test_concatenate_basic(self): # Tests concatenations. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) assert_equal(np.concatenate((x, y)), concatenate((xm, y))) assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) def test_concatenate_alongaxis(self): # Tests concatenations. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) assert_equal(np.concatenate((x, y), 1), xmym) assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) x = zeros(2) y = array(ones(2), mask=[False, True]) z = concatenate((x, y)) assert_array_equal(z, [0, 0, 1, 1]) assert_array_equal(z.mask, [False, False, False, True]) z = concatenate((y, x)) assert_array_equal(z, [1, 1, 0, 0]) assert_array_equal(z.mask, [False, True, False, False]) def test_concatenate_flexible(self): # Tests the concatenation on flexible arrays. data = masked_array(list(zip(np.random.rand(10), np.arange(10))), dtype=[('a', float), ('b', int)]) test = concatenate([data[:5], data[5:]]) assert_equal_records(test, data) def test_creation_ndmin(self): # Check the use of ndmin x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) assert_equal(x.shape, (1, 3)) assert_equal(x._data, [[1, 2, 3]]) assert_equal(x._mask, [[1, 0, 0]]) def test_creation_ndmin_from_maskedarray(self): # Make sure we're not losing the original mask w/ ndmin x = array([1, 2, 3]) x[-1] = masked xx = array(x, ndmin=2, dtype=float) assert_equal(x.shape, x._mask.shape) assert_equal(xx.shape, xx._mask.shape) def test_creation_maskcreation(self): # Tests how masks are initialized at the creation of Maskedarrays. data = arange(24, dtype=float) data[[3, 6, 15]] = masked dma_1 = MaskedArray(data) assert_equal(dma_1.mask, data.mask) dma_2 = MaskedArray(dma_1) assert_equal(dma_2.mask, dma_1.mask) dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) fail_if_equal(dma_3.mask, dma_1.mask) def test_creation_with_list_of_maskedarrays(self): # Tests creaating a masked array from alist of masked arrays. x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) x.mask = nomask data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) self.assertTrue(data.mask is nomask) def test_asarray(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) assert_equal(xmm._data, xm._data) assert_equal(xmm._mask, xm._mask) assert_equal(xmm.fill_value, xm.fill_value) assert_equal(xmm._hardmask, xm._hardmask) def test_fix_invalid(self): # Checks fix_invalid. with np.errstate(invalid='ignore'): data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) data_fixed = fix_invalid(data) assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) assert_equal(data_fixed._mask, [1., 0., 1.]) def test_maskedelement(self): # Test of masked element x = arange(6) x[1] = masked self.assertTrue(str(masked) == '--') self.assertTrue(x[1] is masked) assert_equal(filled(x[1], 0), 0) # don't know why these should raise an exception... #self.assertRaises(Exception, lambda x,y: x+y, masked, masked) #self.assertRaises(Exception, lambda x,y: x+y, masked, 2) #self.assertRaises(Exception, lambda x,y: x+y, masked, xx) #self.assertRaises(Exception, lambda x,y: x+y, xx, masked) def test_set_element_as_object(self): # Tests setting elements with object a = empty(1, dtype=object) x = (1, 2, 3, 4, 5) a[0] = x assert_equal(a[0], x) self.assertTrue(a[0] is x) import datetime dt = datetime.datetime.now() a[0] = dt self.assertTrue(a[0] is dt) def test_indexing(self): # Tests conversions and indexing x1 = np.array([1, 2, 4, 3]) x2 = array(x1, mask=[1, 0, 0, 0]) x3 = array(x1, mask=[0, 1, 0, 1]) x4 = array(x1) # test conversion to strings str(x2) # raises? repr(x2) # raises? assert_equal(np.sort(x1), sort(x2, endwith=False)) # tests of indexing assert_(type(x2[1]) is type(x1[1])) assert_(x1[1] == x2[1]) assert_(x2[0] is masked) assert_equal(x1[2], x2[2]) assert_equal(x1[2:5], x2[2:5]) assert_equal(x1[:], x2[:]) assert_equal(x1[1:], x3[1:]) x1[2] = 9 x2[2] = 9 assert_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 assert_equal(x1, x2) x2[1] = masked assert_equal(x1, x2) x2[1:3] = masked assert_equal(x1, x2) x2[:] = x1 x2[1] = masked assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) assert_(allequal(x4, array([1, 2, 3, 4]))) x1 = np.arange(5) * 1.0 x2 = masked_values(x1, 3.0) assert_equal(x1, x2) assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) assert_equal(3.0, x2.fill_value) x1 = array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] assert_equal(type(s2), str) assert_equal(type(s1), str) assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) def test_matrix_indexing(self): # Tests conversions and indexing x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]]) x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]]) x4 = array(x1) # test conversion to strings str(x2) # raises? repr(x2) # raises? # tests of indexing assert_(type(x2[1, 0]) is type(x1[1, 0])) assert_(x1[1, 0] == x2[1, 0]) assert_(x2[1, 1] is masked) assert_equal(x1[0, 2], x2[0, 2]) assert_equal(x1[0, 1:], x2[0, 1:]) assert_equal(x1[:, 2], x2[:, 2]) assert_equal(x1[:], x2[:]) assert_equal(x1[1:], x3[1:]) x1[0, 2] = 9 x2[0, 2] = 9 assert_equal(x1, x2) x1[0, 1:] = 99 x2[0, 1:] = 99 assert_equal(x1, x2) x2[0, 1] = masked assert_equal(x1, x2) x2[0, 1:] = masked assert_equal(x1, x2) x2[0, :] = x1[0, :] x2[0, 1] = masked assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) assert_(allequal(getmask(x3)[1], array([1, 1, 0]))) assert_(allequal(getmask(x3[1]), array([1, 1, 0]))) x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) assert_(allequal(getmask(x4[1]), array([1, 1, 0]))) assert_(allequal(x4[1], array([1, 2, 3]))) x1 = np.matrix(np.arange(5) * 1.0) x2 = masked_values(x1, 3.0) assert_equal(x1, x2) assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) assert_equal(3.0, x2.fill_value) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] m = make_mask(n) m2 = make_mask(m) self.assertTrue(m is m2) m3 = make_mask(m, copy=1) self.assertTrue(m is not m3) x1 = np.arange(5) y1 = array(x1, mask=m) #self.assertTrue( y1._data is x1) assert_equal(y1._data.__array_interface__, x1.__array_interface__) self.assertTrue(allequal(x1, y1.data)) #self.assertTrue( y1.mask is m) assert_equal(y1._mask.__array_interface__, m.__array_interface__) y1a = array(y1) self.assertTrue(y1a._data.__array_interface__ == y1._data.__array_interface__) self.assertTrue(y1a.mask is y1.mask) y2 = array(x1, mask=m) self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) #self.assertTrue( y2.mask is m) self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) self.assertTrue(y2[2] is masked) y2[2] = 9 self.assertTrue(y2[2] is not masked) #self.assertTrue( y2.mask is not m) self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) self.assertTrue(allequal(y2.mask, 0)) y3 = array(x1 * 1.0, mask=m) self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) x4 = arange(4) x4[2] = masked y4 = resize(x4, (8,)) assert_equal(concatenate([x4, x4]), y4) assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) y5 = repeat(x4, (2, 2, 2, 2), axis=0) assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = repeat(x4, 2, axis=0) assert_equal(y5, y6) y7 = x4.repeat((2, 2, 2, 2), axis=0) assert_equal(y5, y7) y8 = x4.repeat(2, 0) assert_equal(y5, y8) y9 = x4.copy() assert_equal(y9._data, x4._data) assert_equal(y9._mask, x4._mask) x = masked_array([1, 2, 3], mask=[0, 1, 0]) # Copy is False by default y = masked_array(x) assert_equal(y._data.ctypes.data, x._data.ctypes.data) assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) y = masked_array(x, copy=True) assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) def test_deepcopy(self): from copy import deepcopy a = array([0, 1, 2], mask=[False, True, False]) copied = deepcopy(a) assert_equal(copied.mask, a.mask) assert_not_equal(id(a._mask), id(copied._mask)) copied[1] = 1 assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) copied = deepcopy(a) assert_equal(copied.mask, a.mask) copied.mask[1] = False assert_equal(copied.mask, [0, 0, 0]) assert_equal(a.mask, [0, 1, 0]) def test_str_repr(self): a = array([0, 1, 2], mask=[False, True, False]) assert_equal(str(a), '[0 -- 2]') assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' ' mask = [False True False],\n' ' fill_value = 999999)\n') def test_pickling(self): # Tests pickling a = arange(10) a[::3] = masked a.fill_value = 999 a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled._data, a._data) assert_equal(a_pickled.fill_value, 999) def test_pickling_subbaseclass(self): # Test pickling w/ a subclass of ndarray a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) self.assertTrue(isinstance(a_pickled._data, np.matrix)) def test_pickling_maskedconstant(self): # Test pickling MaskedConstant mc = np.ma.masked mc_pickled = pickle.loads(mc.dumps()) assert_equal(mc_pickled._baseclass, mc._baseclass) assert_equal(mc_pickled._mask, mc._mask) assert_equal(mc_pickled._data, mc._data) def test_pickling_wstructured(self): # Tests pickling w/ structured array a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], dtype=[('a', int), ('b', float)]) a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays a = arange(10) a.shape = (-1, 2) b = a.T test = pickle.loads(pickle.dumps(b)) assert_equal(test, b) def test_single_element_subscript(self): # Tests single element subscripts of Maskedarrays. a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) assert_equal(a[0].shape, ()) assert_equal(b[0].shape, ()) assert_equal(b[1].shape, ()) def test_topython(self): # Tests some communication issues with Python. assert_equal(1, int(array(1))) assert_equal(1.0, float(array(1))) assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) self.assertRaises(TypeError, float, array([1, 1])) with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) self.assertRaises(TypeError, lambda:float(a)) assert_equal(float(a[-1]), 3.) self.assertTrue(np.isnan(float(a[0]))) self.assertRaises(TypeError, int, a) assert_equal(int(a[-1]), 3) self.assertRaises(MAError, lambda:int(a[0])) def test_oddfeatures_1(self): # Test of other odd features x = arange(20) x = x.reshape(4, 5) x.flat[5] = 12 assert_(x[1, 0] == 12) z = x + 10j * x assert_equal(z.real, x) assert_equal(z.imag, 10 * x) assert_equal((z * conjugate(z)).real, 101 * x * x) z.imag[...] = 0.0 x = arange(10) x[3] = masked assert_(str(x[3]) == str(masked)) c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) z = masked_where(c, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) assert_equal(x, z) def test_oddfeatures_2(self): # Tests some more features. x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) def test_oddfeatures_3(self): # Tests some generic features atest = array([10], mask=True) btest = array([20]) idx = atest.mask atest[idx] = btest[idx] assert_equal(atest, [20]) def test_filled_w_object_dtype(self): a = np.ma.masked_all(1, dtype='O') assert_equal(a.filled('x')[0], 'x') def test_filled_w_flexible_dtype(self): # Test filled w/ flexible dtype flexi = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) flexi[0] = masked assert_equal(flexi.filled(), np.array([(default_fill_value(0), default_fill_value('0'), default_fill_value(0.),)], dtype=flexi.dtype)) flexi[0] = masked assert_equal(flexi.filled(1), np.array([(1, '1', 1.)], dtype=flexi.dtype)) def test_filled_w_mvoid(self): # Test filled w/ mvoid ndtype = [('a', int), ('b', float)] a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) # Filled using default test = a.filled() assert_equal(tuple(test), (1, default_fill_value(1.))) # Explicit fill_value test = a.filled((-1, -1)) assert_equal(tuple(test), (1, -1)) # Using predefined filling values a.fill_value = (-999, -999) assert_equal(tuple(a.filled()), (1, -999)) def test_filled_w_nested_dtype(self): # Test filled w/ nested dtype ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] a = array([(1, (1, 1)), (2, (2, 2))], mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) test = a.filled(0) control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) assert_equal(test, control) test = a['B'].filled(0) control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) assert_equal(test, control) def test_filled_w_f_order(self): # Test filled w/ F-contiguous array a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), order='F') # this is currently ignored self.assertTrue(a.flags['F_CONTIGUOUS']) self.assertTrue(a.filled(0).flags['F_CONTIGUOUS']) def test_optinfo_propagation(self): # Checks that _optinfo dictionary isn't back-propagated x = array([1, 2, 3, ], dtype=float) x._optinfo['info'] = '???' y = x.copy() assert_equal(y._optinfo['info'], '???') y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') def test_fancy_printoptions(self): # Test printing a masked array w/ fancy dtype. fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) test = array([(1, (2, 3.0)), (4, (5, 6.0))], mask=[(1, (0, 1)), (0, (1, 0))], dtype=fancydtype) control = "[(--, (2, --)) (4, (--, 6.0))]" assert_equal(str(test), control) def test_flatten_structured_array(self): # Test flatten_structured_array on arrays # On ndarray ndtype = [('a', int), ('b', float)] a = np.array([(1, 1), (2, 2)], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[1., 1.], [2., 2.]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) # On masked_array a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1.], [2., 2.]], mask=[[0, 1], [1, 0]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # On masked array with nested structure ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] a = array([(1, (1, 1.1)), (2, (2, 2.2))], mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1., 1.1], [2., 2., 2.2]], mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) # Keeping the initial shape ndtype = [('a', int), ('b', float)] a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) test = flatten_structured_array(a) control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) def test_void0d(self): # Test creating a mvoid object ndtype = [('a', int), ('b', int)] a = np.array([(1, 2,)], dtype=ndtype)[0] f = mvoid(a) assert_(isinstance(f, mvoid)) a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] assert_(isinstance(a, mvoid)) a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) f = mvoid(a._data[0], a._mask[0]) assert_(isinstance(f, mvoid)) def test_mvoid_getitem(self): # Test mvoid.__getitem__ ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask f = a[0] self.assertTrue(isinstance(f, mvoid)) assert_equal((f[0], f['a']), (1, 1)) assert_equal(f['b'], 2) # w/ mask f = a[1] self.assertTrue(isinstance(f, mvoid)) self.assertTrue(f[0] is masked) self.assertTrue(f['a'] is masked) assert_equal(f[1], 4) def test_mvoid_iter(self): # Test iteration on __getitem__ ndtype = [('a', int), ('b', int)] a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype) # w/o mask assert_equal(list(a[0]), [1, 2]) # w/ mask assert_equal(list(a[1]), [masked, 4]) def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) assert_equal(str(mx[0]), "(1, 1)") mx['b'][0] = masked ini_display = masked_print_option._display masked_print_option.set_display("-X-") try: assert_equal(str(mx[0]), "(1, -X-)") assert_equal(repr(mx[0]), "(1, -X-)") finally: masked_print_option.set_display(ini_display) def test_mvoid_multidim_print(self): # regression test for gh-6019 t_ma = masked_array(data = [([1, 2, 3],)], mask = [([False, True, False],)], fill_value = ([999999, 999999, 999999],), dtype = [('a', '<i4', (3,))]) assert_(str(t_ma[0]) == "([1, --, 3],)") assert_(repr(t_ma[0]) == "([1, --, 3],)") # additonal tests with structured arrays t_2d = masked_array(data = [([[1, 2], [3,4]],)], mask = [([[False, True], [True, False]],)], dtype = [('a', '<i4', (2,2))]) assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)") assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)") t_0d = masked_array(data = [(1,2)], mask = [(True,False)], dtype = [('a', '<i4'), ('b', '<i4')]) assert_(str(t_0d[0]) == "(--, 2)") assert_(repr(t_0d[0]) == "(--, 2)") t_2d = masked_array(data = [([[1, 2], [3,4]], 1)], mask = [([[False, True], [True, False]], False)], dtype = [('a', '<i4', (2,2)), ('b', float)]) assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") t_ne = masked_array(data=[(1, (1, 1))], mask=[(True, (True, False))], dtype = [('a', '<i4'), ('b', 'i4,i4')]) assert_(str(t_ne[0]) == "(--, (--, 1))") assert_(repr(t_ne[0]) == "(--, (--, 1))") def test_object_with_array(self): mx1 = masked_array([1.], mask=[True]) mx2 = masked_array([1., 2.]) mx = masked_array([mx1, mx2], mask=[False, True]) assert mx[0] is mx1 assert mx[1] is not mx2 assert np.all(mx[1].data == mx2.data) assert np.all(mx[1].mask) # check that we return a view. mx[1].data[0] = 0. assert mx2[0] == 0. class TestMaskedArrayArithmetic(TestCase): # Base test class for MaskedArrays. def setUp(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def tearDown(self): np.seterr(**self.err_status) def test_basic_arithmetic(self): # Test of basic arithmetic. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) assert_equal(a2d + a2d, a2d + a2dm) assert_equal(a2d - a2d, a2d - a2dm) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) assert_equal(-x, -xm) assert_equal(x + y, xm + ym) assert_equal(x - y, xm - ym) assert_equal(x * y, xm * ym) assert_equal(x / y, xm / ym) assert_equal(a10 + y, a10 + ym) assert_equal(a10 - y, a10 - ym) assert_equal(a10 * y, a10 * ym) assert_equal(a10 / y, a10 / ym) assert_equal(x + a10, xm + a10) assert_equal(x - a10, xm - a10) assert_equal(x * a10, xm * a10) assert_equal(x / a10, xm / a10) assert_equal(x ** 2, xm ** 2) assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5) assert_equal(x ** y, xm ** ym) assert_equal(np.add(x, y), add(xm, ym)) assert_equal(np.subtract(x, y), subtract(xm, ym)) assert_equal(np.multiply(x, y), multiply(xm, ym)) assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): x = arange(6, dtype=float) x.shape = (2, 3) y = arange(3, dtype=float) z = x / y assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) z = x / y[None,:] assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) y = arange(2, dtype=float) z = x / y[:, None] assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]]) assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]]) def test_mixed_arithmetic(self): # Tests mixed arithmetics. na = np.array([1]) ma = array([1]) self.assertTrue(isinstance(na + ma, MaskedArray)) self.assertTrue(isinstance(ma + na, MaskedArray)) def test_limits_arithmetic(self): tiny = np.finfo(float).tiny a = array([tiny, 1. / tiny, 0.]) assert_equal(getmaskarray(a / 2), [0, 0, 0]) assert_equal(getmaskarray(2 / a), [1, 0, 1]) def test_masked_singleton_arithmetic(self): # Tests some scalar arithmetics on MaskedArrays. # Masked singleton should remain masked no matter what xm = array(0, mask=1) self.assertTrue((1 / array(0)).mask) self.assertTrue((1 + xm).mask) self.assertTrue((-xm).mask) self.assertTrue(maximum(xm, xm).mask) self.assertTrue(minimum(xm, xm).mask) def test_masked_singleton_equality(self): # Tests (in)equality on masked snigleton a = array([1, 2, 3], mask=[1, 1, 0]) assert_((a[0] == 0) is masked) assert_((a[0] != 0) is masked) assert_equal((a[-1] == 0), False) assert_equal((a[-1] != 0), True) def test_arithmetic_with_masked_singleton(self): # Checks that there's no collapsing to masked x = masked_array([1, 2]) y = x * masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) y = x[0] * masked assert_(y is masked) y = x + masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) def test_arithmetic_with_masked_singleton_on_1d_singleton(self): # Check that we're not losing the shape of a singleton x = masked_array([1, ]) y = x + masked assert_equal(y.shape, x.shape) assert_equal(y.mask, [True, ]) def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) # Make sure we don't lose the shape in some circumstances xm = array((0, 0)) / 0. assert_equal(xm.shape, (2,)) assert_equal(xm.mask, [1, 1]) def test_basic_ufuncs(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) assert_equal(np.sinh(x), sinh(xm)) assert_equal(np.tan(x), tan(xm)) assert_equal(np.tanh(x), tanh(xm)) assert_equal(np.sqrt(abs(x)), sqrt(xm)) assert_equal(np.log(abs(x)), log(xm)) assert_equal(np.log10(abs(x)), log10(xm)) assert_equal(np.exp(x), exp(xm)) assert_equal(np.arcsin(z), arcsin(zm)) assert_equal(np.arccos(z), arccos(zm)) assert_equal(np.arctan(z), arctan(zm)) assert_equal(np.arctan2(x, y), arctan2(xm, ym)) assert_equal(np.absolute(x), absolute(xm)) assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym)) assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True)) assert_equal(np.equal(x, y), equal(xm, ym)) assert_equal(np.not_equal(x, y), not_equal(xm, ym)) assert_equal(np.less(x, y), less(xm, ym)) assert_equal(np.greater(x, y), greater(xm, ym)) assert_equal(np.less_equal(x, y), less_equal(xm, ym)) assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) assert_equal(np.conjugate(x), conjugate(xm)) def test_count_func(self): # Tests count assert_equal(1, count(1)) assert_equal(0, array(1, mask=[1])) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) res = count(ott) self.assertTrue(res.dtype.type is np.intp) assert_equal(3, res) ott = ott.reshape((2, 2)) res = count(ott) assert_(res.dtype.type is np.intp) assert_equal(3, res) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_equal([1, 2], res) assert_(getmask(res) is nomask) ott = array([0., 1., 2., 3.]) res = count(ott, 0) assert_(isinstance(res, ndarray)) assert_(res.dtype.type is np.intp) assert_raises(IndexError, ott.count, 1) def test_minmax_func(self): # Tests minimum and maximum. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) # following are true because of careful selection of data assert_equal(max(xr), maximum(xmr)) assert_equal(min(xr), minimum(xmr)) assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) x = arange(5) y = arange(5) - 2 x[3] = masked y[0] = masked assert_equal(minimum(x, y), where(less(x, y), x, y)) assert_equal(maximum(x, y), where(greater(x, y), x, y)) assert_(minimum(x) == 0) assert_(maximum(x) == 4) x = arange(4).reshape(2, 2) x[-1, -1] = masked assert_equal(maximum(x), 2) def test_minimummaximum_func(self): a = np.ones((2, 2)) aminimum = minimum(a, a) self.assertTrue(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum(a, a)) aminimum = minimum.outer(a, a) self.assertTrue(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum.outer(a, a)) amaximum = maximum(a, a) self.assertTrue(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum(a, a)) amaximum = maximum.outer(a, a) self.assertTrue(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a, a)) def test_minmax_reduce(self): # Test np.min/maximum.reduce on array w/ full False mask a = array([1, 2, 3], mask=[False, False, False]) b = np.maximum.reduce(a) assert_equal(b, 3) def test_minmax_funcs_with_output(self): # Tests the min/max functions with explicit outputs mask = np.random.rand(12).round() xm = array(np.random.uniform(0, 10, 12), mask=mask) xm.shape = (3, 4) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) mafunc = getattr(numpy.ma.core, funcname) # Use the np version nout = np.empty((4,), dtype=int) try: result = npfunc(xm, axis=0, out=nout) except MaskError: pass nout = np.empty((4,), dtype=float) result = npfunc(xm, axis=0, out=nout) self.assertTrue(result is nout) # Use the ma version nout.fill(-999) result = mafunc(xm, axis=0, out=nout) self.assertTrue(result is nout) def test_minmax_methods(self): # Additional tests on max/min (_, _, _, _, _, xm, _, _, _, _) = self.d xm.shape = (xm.size,) assert_equal(xm.max(), 10) self.assertTrue(xm[0].max() is masked) self.assertTrue(xm[0].max(0) is masked) self.assertTrue(xm[0].max(-1) is masked) assert_equal(xm.min(), -10.) self.assertTrue(xm[0].min() is masked) self.assertTrue(xm[0].min(0) is masked) self.assertTrue(xm[0].min(-1) is masked) assert_equal(xm.ptp(), 20.) self.assertTrue(xm[0].ptp() is masked) self.assertTrue(xm[0].ptp(0) is masked) self.assertTrue(xm[0].ptp(-1) is masked) x = array([1, 2, 3], mask=True) self.assertTrue(x.min() is masked) self.assertTrue(x.max() is masked) self.assertTrue(x.ptp() is masked) def test_addsumprod(self): # Tests add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) assert_equal(4, sum(array(4), axis=0)) assert_equal(np.sum(x, axis=0), sum(x, axis=0)) assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)) assert_equal(np.sum(x, 0), sum(x, 0)) assert_equal(np.product(x, axis=0), product(x, axis=0)) assert_equal(np.product(x, 0), product(x, 0)) assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0)) s = (3, 4) x.shape = y.shape = xm.shape = ym.shape = s if len(s) > 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) assert_equal(np.sum(x, 1), sum(x, 1)) assert_equal(np.product(x, 1), product(x, 1)) def test_binops_d2D(self): # Test binary operations on 2D data a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) test = a * b control = array([[2., 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b * a control = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a * b control = array([[2, 3], [8, 10], [18, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b * a control = array([[2, 3], [8, 10], [18, 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_domained_binops_d2D(self): # Test domained binary operations on 2D data a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) b = array([[2., 3.], [4., 5.], [6., 7.]]) test = a / b control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b / a control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], mask=[[0, 0], [1, 1], [1, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) a = array([[1.], [2.], [3.]]) b = array([[2., 3.], [4., 5.], [6., 7.]], mask=[[0, 0], [0, 0], [0, 1]]) test = a / b control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) test = b / a control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], mask=[[0, 0], [0, 0], [0, 1]]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) def test_noshrinking(self): # Check that we don't shrink a mask when not wanted # Binary operations a = masked_array([1., 2., 3.], mask=[False, False, False], shrink=False) b = a + 1 assert_equal(b.mask, [0, 0, 0]) # In place binary operation a += 1 assert_equal(a.mask, [0, 0, 0]) # Domained binary operation b = a / 1. assert_equal(b.mask, [0, 0, 0]) # In place binary operation a /= 1. assert_equal(a.mask, [0, 0, 0]) def test_noshink_on_creation(self): # Check that the mask is not shrunk on array creation when not wanted a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) assert_equal(a.mask, [0, 0, 0]) def test_mod(self): # Tests mod (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) assert_equal(test.mask, mask_or(xm.mask, ym.mask)) test = mod(xm, ym) assert_equal(test, np.mod(xm, ym)) assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) def test_TakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) assert_equal(np.inner(filled(x, 0), filled(y, 0)), inner(x, y)) assert_equal(np.outer(filled(x, 0), filled(y, 0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3) def test_imag_real(self): # Check complex xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) assert_equal(xx.imag, [10, 2]) assert_equal(xx.imag.filled(), [1e+20, 2]) assert_equal(xx.imag.dtype, xx._data.imag.dtype) assert_equal(xx.real, [1, 20]) assert_equal(xx.real.filled(), [1e+20, 20]) assert_equal(xx.real.dtype, xx._data.real.dtype) def test_methods_with_output(self): xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) for funcname in funclist: npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty(4, dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output assert_(result is output) assert_equal(result, xmmeth(axis=0, out=output)) output = empty(4, dtype=int) result = xmmeth(axis=0, out=output) assert_(result is output) assert_(output[0] is masked) def test_eq_on_structured(self): # Test the equality of structured arrays ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a == a) assert_equal(test, [True, True]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a == b) assert_equal(test, [False, True]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a == b) assert_equal(test, [True, False]) assert_equal(test.mask, [False, False]) def test_ne_on_structured(self): # Test the equality of structured arrays ndtype = [('A', int), ('B', int)] a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) test = (a != a) assert_equal(test, [False, False]) assert_equal(test.mask, [False, False]) b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) test = (a != b) assert_equal(test, [True, False]) assert_equal(test.mask, [True, False]) b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = (a != b) assert_equal(test, [False, True]) assert_equal(test.mask, [False, False]) def test_eq_w_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. # With partial mask a = array([1, 2], mask=[0, 1]) assert_equal(a == None, False) assert_equal(a.data == None, False) assert_equal(a.mask == None, False) assert_equal(a != None, True) # With nomask a = array([1, 2], mask=False) assert_equal(a == None, False) assert_equal(a != None, True) # With complete mask a = array([1, 2], mask=True) assert_equal(a == None, False) assert_equal(a != None, True) # Fully masked, even comparison to None should return "masked" a = masked assert_equal(a == None, masked) def test_eq_w_scalar(self): a = array(1) assert_equal(a == 1, True) assert_equal(a == 0, False) assert_equal(a != 1, False) assert_equal(a != 0, True) def test_numpyarithmetics(self): # Check that the mask is not back-propagated when using numpy functions a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) control = masked_array([np.nan, np.nan, 0, np.log(2), -1], mask=[1, 1, 0, 0, 1]) test = log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) test = np.log(a) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) class TestMaskedArrayAttributes(TestCase): def test_keepmask(self): # Tests the keep mask flag x = masked_array([1, 2, 3], mask=[1, 0, 0]) mx = masked_array(x) assert_equal(mx.mask, x.mask) mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) assert_equal(mx.mask, [0, 1, 0]) mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) assert_equal(mx.mask, [1, 1, 0]) # We default to true mx = masked_array(x, mask=[0, 1, 0]) assert_equal(mx.mask, [1, 1, 0]) def test_hardmask(self): # Test hard_mask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) # We need to copy, to avoid updating d in xh ! xs = array(d, mask=m, hard_mask=False, copy=True) xh[[1, 4]] = [10, 40] xs[[1, 4]] = [10, 40] assert_equal(xh._data, [0, 10, 2, 3, 4]) assert_equal(xs._data, [0, 10, 2, 3, 40]) #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, [0, 0, 0, 1, 0]) self.assertTrue(xh._hardmask) self.assertTrue(not xs._hardmask) xh[1:4] = [10, 20, 30] xs[1:4] = [10, 20, 30] assert_equal(xh._data, [0, 10, 20, 3, 4]) assert_equal(xs._data, [0, 10, 20, 30, 40]) #assert_equal(xh.mask.ctypes._data, m.ctypes._data) assert_equal(xs.mask, nomask) xh[0] = masked xs[0] = masked assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, [1, 0, 0, 0, 0]) xh[:] = 1 xs[:] = 1 assert_equal(xh._data, [0, 1, 1, 3, 4]) assert_equal(xs._data, [1, 1, 1, 1, 1]) assert_equal(xh.mask, [1, 0, 0, 1, 1]) assert_equal(xs.mask, nomask) # Switch to soft mask xh.soften_mask() xh[:] = arange(5) assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh.mask, nomask) # Switch back to hard mask xh.harden_mask() xh[xh < 3] = masked assert_equal(xh._data, [0, 1, 2, 3, 4]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) xh[filled(xh > 1, False)] = 5 assert_equal(xh._data, [0, 1, 2, 5, 5]) assert_equal(xh._mask, [1, 1, 1, 0, 0]) xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) xh[0] = 0 assert_equal(xh._data, [[1, 0], [3, 4]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[-1, -1] = 5 assert_equal(xh._data, [[1, 0], [3, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) xh[filled(xh < 5, False)] = 2 assert_equal(xh._data, [[1, 2], [2, 5]]) assert_equal(xh._mask, [[1, 0], [0, 0]]) def test_hardmask_again(self): # Another test of hardmask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d, mask=m, hard_mask=True) xh[4:5] = 999 #assert_equal(xh.mask.ctypes._data, m.ctypes._data) xh[0:1] = 999 assert_equal(xh._data, [999, 1, 2, 3, 4]) def test_hardmask_oncemore_yay(self): # OK, yet another test of hardmask # Make sure that harden_mask/soften_mask//unshare_mask returns self a = array([1, 2, 3], mask=[1, 0, 0]) b = a.harden_mask() assert_equal(a, b) b[0] = 0 assert_equal(a, b) assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) a = b.soften_mask() a[0] = 0 assert_equal(a, b) assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) def test_smallmask(self): # Checks the behaviour of _smallmask a = arange(10) a[1] = masked a[1] = 1 assert_equal(a._mask, nomask) a = arange(10) a._smallmask = False a[1] = masked a[1] = 1 assert_equal(a._mask, zeros(10)) def test_shrink_mask(self): # Tests .shrink_mask() a = array([1, 2, 3], mask=[0, 0, 0]) b = a.shrink_mask() assert_equal(a, b) assert_equal(a.mask, nomask) def test_flat(self): # Test that flat can return all types of items [#4585, #4615] # test simple access test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) assert_equal(test.flat[1], 2) assert_equal(test.flat[2], masked) self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2])) # Test flat on masked_matrices test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) assert_equal(test, control) # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat testflat[:] = testflat[[2, 1, 0]] assert_equal(test, control) testflat[0] = 9 assert_equal(test[0, 0], 9) # test 2-D record array # ... on structured array w/ masked records x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], dtype=[('a', int), ('b', float), ('c', '|S8')]) x['a'][0, 1] = masked x['b'][1, 0] = masked x['c'][0, 2] = masked x[-1, -1] = masked xflat = x.flat assert_equal(xflat[0], x[0, 0]) assert_equal(xflat[1], x[0, 1]) assert_equal(xflat[2], x[0, 2]) assert_equal(xflat[:3], x[0]) assert_equal(xflat[3], x[1, 0]) assert_equal(xflat[4], x[1, 1]) assert_equal(xflat[5], x[1, 2]) assert_equal(xflat[3:], x[1]) assert_equal(xflat[-1], x[-1, -1]) i = 0 j = 0 for xf in xflat: assert_equal(xf, x[j, i]) i += 1 if i >= x.shape[-1]: i = 0 j += 1 # test that matrices keep the correct shape (#4615) a = masked_array(np.matrix(np.eye(2)), mask=0) b = a.flat b01 = b[:2] assert_equal(b01.data, array([[1., 0.]])) assert_equal(b01.mask, array([[False, False]])) def test_assign_dtype(self): # check that the mask's dtype is updated when dtype is changed a = np.zeros(4, dtype='f4,i4') m = np.ma.array(a) m.dtype = np.dtype('f4') repr(m) # raises? assert_equal(m.dtype, np.dtype('f4')) # check that dtype changes that change shape of mask too much # are not allowed def assign(): m = np.ma.array(a) m.dtype = np.dtype('f8') assert_raises(ValueError, assign) b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? assert_equal(b.dtype, np.dtype('f4')) # check that nomask is preserved a = np.zeros(4, dtype='f4') m = np.ma.array(a) m.dtype = np.dtype('f4,i4') assert_equal(m.dtype, np.dtype('f4,i4')) assert_equal(m._mask, np.ma.nomask) class TestFillingValues(TestCase): def test_check_on_scalar(self): # Test _check_fill_value set to valid and invalid values _check_fill_value = np.ma.core._check_fill_value fval = _check_fill_value(0, int) assert_equal(fval, 0) fval = _check_fill_value(None, int) assert_equal(fval, default_fill_value(0)) fval = _check_fill_value(0, "|S3") assert_equal(fval, asbytes("0")) fval = _check_fill_value(None, "|S3") assert_equal(fval, default_fill_value("|S3")) self.assertRaises(TypeError, _check_fill_value, 1e+20, int) self.assertRaises(TypeError, _check_fill_value, 'stuff', int) def test_check_on_fields(self): # Tests _check_fill_value with records _check_fill_value = np.ma.core._check_fill_value ndtype = [('a', int), ('b', float), ('c', "|S3")] # A check on a list should return a single record fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) # A check on None should output the defaults fval = _check_fill_value(None, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [default_fill_value(0), default_fill_value(0.), asbytes(default_fill_value("0"))]) #.....Using a structured type as fill_value should work fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....Using a flexible type w/ a different type shouldn't matter # BEHAVIOR in 1.5 and earlier: match structured types by position #fill_val = np.array((-999, -12345678.9, "???"), # dtype=[("A", int), ("B", float), ("C", "|S3")]) # BEHAVIOR in 1.6 and later: match structured types by name fill_val = np.array(("???", -999, -12345678.9), dtype=[("c", "|S3"), ("a", int), ("b", float), ]) fval = _check_fill_value(fill_val, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....Using an object-array shouldn't matter either fill_val = np.ndarray(shape=(1,), dtype=object) fill_val[0] = (-999, -12345678.9, asbytes("???")) fval = _check_fill_value(fill_val, object) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) # NOTE: This test was never run properly as "fill_value" rather than # "fill_val" was assigned. Written properly, it fails. #fill_val = np.array((-999, -12345678.9, "???")) #fval = _check_fill_value(fill_val, ndtype) #self.assertTrue(isinstance(fval, ndarray)) #assert_equal(fval.item(), [-999, -12345678.9, asbytes("???")]) #.....One-field-only flexible type should work as well ndtype = [("a", int)] fval = _check_fill_value(-999999999, ndtype) self.assertTrue(isinstance(fval, ndarray)) assert_equal(fval.item(), (-999999999,)) def test_fillvalue_conversion(self): # Tests the behavior of fill_value during conversion # We had a tailored comment to make sure special attributes are # properly dealt with a = array(asbytes_nested(['3', '4', '5'])) a._optinfo.update({'comment':"updated!"}) b = array(a, dtype=int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) b = array(a, dtype=float) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0.)) b = a.astype(int) assert_equal(b._data, [3, 4, 5]) assert_equal(b.fill_value, default_fill_value(0)) assert_equal(b._optinfo['comment'], "updated!") b = a.astype([('a', '|S3')]) assert_equal(b['a']._data, a._data) assert_equal(b['a'].fill_value, a.fill_value) def test_fillvalue(self): # Yet more fun with the fill_value data = masked_array([1, 2, 3], fill_value=-999) series = data[[0, 2, 1]] assert_equal(series._fill_value, data._fill_value) mtype = [('f', float), ('s', '|S3')] x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) x.fill_value = 999 assert_equal(x.fill_value.item(), [999., asbytes('999')]) assert_equal(x['f'].fill_value, 999) assert_equal(x['s'].fill_value, asbytes('999')) x.fill_value = (9, '???') assert_equal(x.fill_value.item(), (9, asbytes('???'))) assert_equal(x['f'].fill_value, 9) assert_equal(x['s'].fill_value, asbytes('???')) x = array([1, 2, 3.1]) x.fill_value = 999 assert_equal(np.asarray(x.fill_value).dtype, float) assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes _check_fill_value = np.ma.core._check_fill_value ndtype = [('i', int), ('s', '|S8'), ('f', float)] control = np.array((default_fill_value(0), default_fill_value('0'), default_fill_value(0.),), dtype=ndtype) assert_equal(_check_fill_value(None, ndtype), control) # The shape shouldn't matter ndtype = [('f0', float, (2, 2))] control = np.array((default_fill_value(0.),), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(None, ndtype), control) control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) ndtype = np.dtype("int, (2,3)float, float") control = np.array((default_fill_value(0), default_fill_value(0.), default_fill_value(0.),), dtype="int, float, float").astype(ndtype) test = _check_fill_value(None, ndtype) assert_equal(test, control) control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) def test_fillvalue_datetime_timedelta(self): # Test default fillvalue for datetime64 and timedelta64 types. # See issue #4476, this would return '?' which would cause errors # elsewhere for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"): control = numpy.datetime64("NaT", timecode) test = default_fill_value(numpy.dtype("<M8[" + timecode + "]")) assert_equal(test, control) control = numpy.timedelta64("NaT", timecode) test = default_fill_value(numpy.dtype("<m8[" + timecode + "]")) assert_equal(test, control) def test_extremum_fill_value(self): # Tests extremum fill values for flexible type. a = array([(1, (2, 3)), (4, (5, 6))], dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) test = a.fill_value assert_equal(test['A'], default_fill_value(a['A'])) assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) test = minimum_fill_value(a) assert_equal(test[0], minimum_fill_value(a['A'])) assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) assert_equal(test[1], minimum_fill_value(a['B'])) test = maximum_fill_value(a) assert_equal(test[0], maximum_fill_value(a['A'])) assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) assert_equal(test[1], maximum_fill_value(a['B'])) def test_fillvalue_individual_fields(self): # Test setting fill_value on individual fields ndtype = [('a', int), ('b', int)] # Explicit fill_value a = array(list(zip([1, 2, 3], [4, 5, 6])), fill_value=(-999, -999), dtype=ndtype) aa = a['a'] aa.set_fill_value(10) assert_equal(aa._fill_value, np.array(10)) assert_equal(tuple(a.fill_value), (10, -999)) a.fill_value['b'] = -10 assert_equal(tuple(a.fill_value), (10, -10)) # Implicit fill_value t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype) tt = t['a'] tt.set_fill_value(10) assert_equal(tt._fill_value, np.array(10)) assert_equal(tuple(t.fill_value), (10, default_fill_value(0))) def test_fillvalue_implicit_structured_array(self): # Check that fill_value is always defined for structured arrays ndtype = ('b', float) adtype = ('a', float) a = array([(1.,), (2.,)], mask=[(False,), (False,)], fill_value=(np.nan,), dtype=np.dtype([adtype])) b = empty(a.shape, dtype=[adtype, ndtype]) b['a'] = a['a'] b['a'].set_fill_value(a['a'].fill_value) f = b._fill_value[()] assert_(np.isnan(f[0])) assert_equal(f[-1], default_fill_value(1.)) def test_fillvalue_as_arguments(self): # Test adding a fill_value parameter to empty/ones/zeros a = empty(3, fill_value=999.) assert_equal(a.fill_value, 999.) a = ones(3, fill_value=999., dtype=float) assert_equal(a.fill_value, 999.) a = zeros(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) a = identity(3, fill_value=0., dtype=complex) assert_equal(a.fill_value, 0.) def test_fillvalue_in_view(self): # Test the behavior of fill_value in view # Create initial masked array x = array([1, 2, 3], fill_value=1, dtype=np.int64) # Check that fill_value is preserved by default y = x.view() assert_(y.fill_value == 1) # Check that fill_value is preserved if dtype is specified and the # dtype is an ndarray sub-class and has a _fill_value attribute y = x.view(MaskedArray) assert_(y.fill_value == 1) # Check that fill_value is preserved if type is specified and the # dtype is an ndarray sub-class and has a _fill_value attribute (by # default, the first argument is dtype, not type) y = x.view(type=MaskedArray) assert_(y.fill_value == 1) # Check that code does not crash if passed an ndarray sub-class that # does not have a _fill_value attribute y = x.view(np.ndarray) y = x.view(type=np.ndarray) # Check that fill_value can be overriden with view y = x.view(MaskedArray, fill_value=2) assert_(y.fill_value == 2) # Check that fill_value can be overriden with view (using type=) y = x.view(type=MaskedArray, fill_value=2) assert_(y.fill_value == 2) # Check that fill_value gets reset if passed a dtype but not a # fill_value. This is because even though in some cases one can safely # cast the fill_value, e.g. if taking an int64 view of an int32 array, # in other cases, this cannot be done (e.g. int32 view of an int64 # array with a large fill_value). y = x.view(dtype=np.int32) assert_(y.fill_value == 999999) class TestUfuncs(TestCase): # Test class for the application of ufuncs on MaskedArrays. def setUp(self): # Base data definition. self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') def tearDown(self): np.seterr(**self.err_status) def test_testUfuncRegression(self): # Tests new ufuncs on MaskedArrays. for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', 'absolute', 'fabs', 'negative', # 'nonzero', 'around', 'floor', 'ceil', # 'sometrue', 'alltrue', 'logical_not', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'floor_divide', 'remainder', 'fmod', 'hypot', 'arctan2', 'equal', 'not_equal', 'less_equal', 'greater_equal', 'less', 'greater', 'logical_and', 'logical_or', 'logical_xor', ]: try: uf = getattr(umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) args = self.d[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) assert_mask_equal(ur.mask, mr.mask, err_msg=f) def test_reduce(self): # Tests reduce on MaskedArrays. a = self.d[0] self.assertTrue(not alltrue(a, axis=0)) self.assertTrue(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) assert_equal(product(a, axis=0), 0) assert_equal(add.reduce(a), pi) def test_minmax(self): # Tests extrema on MaskedArrays. a = arange(1, 13).reshape(3, 4) amask = masked_where(a < 5, a) assert_equal(amask.max(), a.max()) assert_equal(amask.min(), 5) assert_equal(amask.max(0), a.max(0)) assert_equal(amask.min(0), [5, 6, 7, 8]) self.assertTrue(amask.max(1)[0].mask) self.assertTrue(amask.min(1)[0].mask) def test_ndarray_mask(self): # Check that the mask of the result is a ndarray (not a MaskedArray...) a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) test = np.sqrt(a) control = masked_array([-1, 0, 1, np.sqrt(2), -1], mask=[1, 0, 0, 0, 1]) assert_equal(test, control) assert_equal(test.mask, control.mask) self.assertTrue(not isinstance(test.mask, MaskedArray)) def test_treatment_of_NotImplemented(self): # Check that NotImplemented is returned at appropriate places a = masked_array([1., 2.], mask=[1, 0]) self.assertRaises(TypeError, operator.mul, a, "abc") self.assertRaises(TypeError, operator.truediv, a, "abc") class MyClass(object): __array_priority__ = a.__array_priority__ + 1 def __mul__(self, other): return "My mul" def __rmul__(self, other): return "My rmul" me = MyClass() assert_(me * a == "My mul") assert_(a * me == "My rmul") # and that __array_priority__ is respected class MyClass2(object): __array_priority__ = 100 def __mul__(self, other): return "Me2mul" def __rmul__(self, other): return "Me2rmul" def __rdiv__(self, other): return "Me2rdiv" __rtruediv__ = __rdiv__ me_too = MyClass2() assert_(a.__mul__(me_too) is NotImplemented) assert_(all(multiply.outer(a, me_too) == "Me2rmul")) assert_(a.__truediv__(me_too) is NotImplemented) assert_(me_too * a == "Me2mul") assert_(a * me_too == "Me2rmul") assert_(a / me_too == "Me2rdiv") class TestMaskedArrayInPlaceArithmetics(TestCase): # Test MaskedArray Arithmetics def setUp(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked self.intdata = (x, y, xm) self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] self.othertypes = [np.dtype(_).type for _ in self.othertypes] self.uint8data = ( x.astype(np.uint8), y.astype(np.uint8), xm.astype(np.uint8) ) def test_inplace_addition_scalar(self): # Test of inplace additions (x, y, xm) = self.intdata xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) (x, _, xm) = self.floatdata id1 = x.data.ctypes._data x += 1. assert_(id1 == x.data.ctypes._data) assert_equal(x, y + 1.) def test_inplace_addition_array(self): # Test of inplace additions (x, y, xm) = self.intdata m = xm.mask a = arange(10, dtype=np.int16) a[-1] = masked x += a xm += a assert_equal(x, y + a) assert_equal(xm, y + a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_subtraction_scalar(self): # Test of inplace subtractions (x, y, xm) = self.intdata x -= 1 assert_equal(x, y - 1) xm -= 1 assert_equal(xm, y - 1) def test_inplace_subtraction_array(self): # Test of inplace subtractions (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x -= a xm -= a assert_equal(x, y - a) assert_equal(xm, y - a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_multiplication_scalar(self): # Test of inplace multiplication (x, y, xm) = self.floatdata x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 assert_equal(xm, y * 2) def test_inplace_multiplication_array(self): # Test of inplace multiplication (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x *= a xm *= a assert_equal(x, y * a) assert_equal(xm, y * a) assert_equal(xm.mask, mask_or(m, a.mask)) def test_inplace_division_scalar_int(self): # Test of inplace division (x, y, xm) = self.intdata x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked x //= 2 assert_equal(x, y) xm //= 2 assert_equal(xm, y) def test_inplace_division_scalar_float(self): # Test of inplace division (x, y, xm) = self.floatdata x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) assert_equal(xm, ones((10,))) def test_inplace_division_array_float(self): # Test of inplace division (x, y, xm) = self.floatdata m = xm.mask a = arange(10, dtype=float) a[-1] = masked x /= a xm /= a assert_equal(x, y / a) assert_equal(xm, y / a) assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) def test_inplace_division_misc(self): x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = xm / ym assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) xm = xm.copy() xm /= ym assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) #assert_equal(xm._data, # [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) def test_datafriendly_add(self): # Test keeping data w/ (inplace) addition x = array([1, 2, 3], mask=[0, 0, 1]) # Test add w/ scalar xx = x + 1 assert_equal(xx.data, [2, 3, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test iadd w/ scalar x += 1 assert_equal(x.data, [2, 3, 3]) assert_equal(x.mask, [0, 0, 1]) # Test add w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x + array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 4, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test iadd w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x += array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 4, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_sub(self): # Test keeping data w/ (inplace) subtraction # Test sub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - 1 assert_equal(xx.data, [0, 1, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test isub w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x -= 1 assert_equal(x.data, [0, 1, 3]) assert_equal(x.mask, [0, 0, 1]) # Test sub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x - array([1, 2, 3], mask=[1, 0, 0]) assert_equal(xx.data, [1, 0, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test isub w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x -= array([1, 2, 3], mask=[1, 0, 0]) assert_equal(x.data, [1, 0, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_mul(self): # Test keeping data w/ (inplace) multiplication # Test mul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * 2 assert_equal(xx.data, [2, 4, 3]) assert_equal(xx.mask, [0, 0, 1]) # Test imul w/ scalar x = array([1, 2, 3], mask=[0, 0, 1]) x *= 2 assert_equal(x.data, [2, 4, 3]) assert_equal(x.mask, [0, 0, 1]) # Test mul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) xx = x * array([10, 20, 30], mask=[1, 0, 0]) assert_equal(xx.data, [1, 40, 3]) assert_equal(xx.mask, [1, 0, 1]) # Test imul w/ array x = array([1, 2, 3], mask=[0, 0, 1]) x *= array([10, 20, 30], mask=[1, 0, 0]) assert_equal(x.data, [1, 40, 3]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_div(self): # Test keeping data w/ (inplace) division # Test div on scalar x = array([1, 2, 3], mask=[0, 0, 1]) xx = x / 2. assert_equal(xx.data, [1 / 2., 2 / 2., 3]) assert_equal(xx.mask, [0, 0, 1]) # Test idiv on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) x /= 2. assert_equal(x.data, [1 / 2., 2 / 2., 3]) assert_equal(x.mask, [0, 0, 1]) # Test div on array x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x / array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(xx.data, [1., 2. / 20., 3.]) assert_equal(xx.mask, [1, 0, 1]) # Test idiv on array x = array([1., 2., 3.], mask=[0, 0, 1]) x /= array([10., 20., 30.], mask=[1, 0, 0]) assert_equal(x.data, [1., 2 / 20., 3.]) assert_equal(x.mask, [1, 0, 1]) def test_datafriendly_pow(self): # Test keeping data w/ (inplace) power # Test pow on scalar x = array([1., 2., 3.], mask=[0, 0, 1]) xx = x ** 2.5 assert_equal(xx.data, [1., 2. ** 2.5, 3.]) assert_equal(xx.mask, [0, 0, 1]) # Test ipow on scalar x **= 2.5 assert_equal(x.data, [1., 2. ** 2.5, 3]) assert_equal(x.mask, [0, 0, 1]) def test_datafriendly_add_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a += b assert_equal(a, [[2, 2], [4, 4]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a += b assert_equal(a, [[2, 2], [4, 4]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_sub_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a -= b assert_equal(a, [[0, 0], [2, 2]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a -= b assert_equal(a, [[0, 0], [2, 2]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_datafriendly_mul_arrays(self): a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 0]) a *= b assert_equal(a, [[1, 1], [3, 3]]) if a.mask is not nomask: assert_equal(a.mask, [[0, 0], [0, 0]]) a = array([[1, 1], [3, 3]]) b = array([1, 1], mask=[0, 1]) a *= b assert_equal(a, [[1, 1], [3, 3]]) assert_equal(a.mask, [[0, 1], [0, 1]]) def test_inplace_addition_scalar_type(self): # Test of inplace additions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) xm[2] = masked x += t(1) assert_equal(x, y + t(1)) xm += t(1) assert_equal(xm, y + t(1)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_addition_array_type(self): # Test of inplace additions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x += a xm += a assert_equal(x, y + a) assert_equal(xm, y + a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_subtraction_scalar_type(self): # Test of inplace subtractions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x -= t(1) assert_equal(x, y - t(1)) xm -= t(1) assert_equal(xm, y - t(1)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_subtraction_array_type(self): # Test of inplace subtractions for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x -= a xm -= a assert_equal(x, y - a) assert_equal(xm, y - a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_multiplication_scalar_type(self): # Test of inplace multiplication for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x *= t(2) assert_equal(x, y * t(2)) xm *= t(2) assert_equal(xm, y * t(2)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_multiplication_array_type(self): # Test of inplace multiplication for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x *= a xm *= a assert_equal(x, y * a) assert_equal(xm, y * a) assert_equal(xm.mask, mask_or(m, a.mask)) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_floor_division_scalar_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked x //= t(2) xm //= t(2) assert_equal(x, y) assert_equal(xm, y) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_floor_division_array_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked x //= a xm //= a assert_equal(x, y // a) assert_equal(xm, y // a) assert_equal( xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_division_scalar_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked # May get a DeprecationWarning or a TypeError. # # This is a consequence of the fact that this is true divide # and will require casting to float for calculation and # casting back to the original type. This will only be raised # with integers. Whether it is an error or warning is only # dependent on how stringent the casting rules are. # # Will handle the same way. try: x /= t(2) assert_equal(x, y) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e)) try: xm /= t(2) assert_equal(xm, y) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e)) if issubclass(t, np.integer): assert_equal(len(w), 2, "Failed on type=%s." % t) else: assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_division_array_type(self): # Test of inplace division for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked # May get a DeprecationWarning or a TypeError. # # This is a consequence of the fact that this is true divide # and will require casting to float for calculation and # casting back to the original type. This will only be raised # with integers. Whether it is an error or warning is only # dependent on how stringent the casting rules are. # # Will handle the same way. try: x /= a assert_equal(x, y / a) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e)) try: xm /= a assert_equal(xm, y / a) assert_equal( xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) except (DeprecationWarning, TypeError) as e: warnings.warn(str(e)) if issubclass(t, np.integer): assert_equal(len(w), 2, "Failed on type=%s." % t) else: assert_equal(len(w), 0, "Failed on type=%s." % t) def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power for t in self.othertypes: with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always") # Test pow on scalar x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) xx = x ** t(2) xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) assert_equal(xx.data, xx_r.data) assert_equal(xx.mask, xx_r.mask) # Test ipow on scalar x **= t(2) assert_equal(x.data, xx_r.data) assert_equal(x.mask, xx_r.mask) assert_equal(len(w), 0, "Failed on type=%s." % t) class TestMaskedArrayMethods(TestCase): # Test class for miscellaneous MaskedArrays methods. def setUp(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_generic_methods(self): # Tests some MaskedArray methods. a = array([1, 3, 2]) assert_equal(a.any(), a._data.any()) assert_equal(a.all(), a._data.all()) assert_equal(a.argmax(), a._data.argmax()) assert_equal(a.argmin(), a._data.argmin()) assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) assert_equal(a.conj(), a._data.conj()) assert_equal(a.conjugate(), a._data.conjugate()) m = array([[1, 2], [3, 4]]) assert_equal(m.diagonal(), m._data.diagonal()) assert_equal(a.sum(), a._data.sum()) assert_equal(a.take([1, 2]), a._data.take([1, 2])) assert_equal(m.transpose(), m._data.transpose()) def test_allclose(self): # Tests allclose on arrays a = np.random.rand(10) b = a + np.random.rand(10) * 1e-8 self.assertTrue(allclose(a, b)) # Test allclose w/ infs a[0] = np.inf self.assertTrue(not allclose(a, b)) b[0] = np.inf self.assertTrue(allclose(a, b)) # Test all close w/ masked a = masked_array(a) a[-1] = masked self.assertTrue(allclose(a, b, masked_equal=True)) self.assertTrue(not allclose(a, b, masked_equal=False)) # Test comparison w/ scalar a *= 1e-8 a[0] = 0 self.assertTrue(allclose(a, 0, masked_equal=True)) # Test that the function works for MIN_INT integer typed arrays a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) self.assertTrue(allclose(a, a)) def test_allany(self): # Checks the any/all methods/functions. x = np.array([[0.13, 0.26, 0.90], [0.28, 0.33, 0.63], [0.31, 0.87, 0.70]]) m = np.array([[True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mx = masked_array(x, mask=m) mxbig = (mx > 0.5) mxsmall = (mx < 0.5) self.assertFalse(mxbig.all()) self.assertTrue(mxbig.any()) assert_equal(mxbig.all(0), [False, False, True]) assert_equal(mxbig.all(1), [False, False, True]) assert_equal(mxbig.any(0), [False, False, True]) assert_equal(mxbig.any(1), [True, True, True]) self.assertFalse(mxsmall.all()) self.assertTrue(mxsmall.any()) assert_equal(mxsmall.all(0), [True, True, False]) assert_equal(mxsmall.all(1), [False, False, False]) assert_equal(mxsmall.any(0), [True, True, False]) assert_equal(mxsmall.any(1), [True, True, False]) def test_allany_onmatrices(self): x = np.array([[0.13, 0.26, 0.90], [0.28, 0.33, 0.63], [0.31, 0.87, 0.70]]) X = np.matrix(x) m = np.array([[True, False, False], [False, False, False], [True, True, False]], dtype=np.bool_) mX = masked_array(X, mask=m) mXbig = (mX > 0.5) mXsmall = (mX < 0.5) self.assertFalse(mXbig.all()) self.assertTrue(mXbig.any()) assert_equal(mXbig.all(0), np.matrix([False, False, True])) assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) assert_equal(mXbig.any(0), np.matrix([False, False, True])) assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) self.assertFalse(mXsmall.all()) self.assertTrue(mXsmall.any()) assert_equal(mXsmall.all(0), np.matrix([True, True, False])) assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) assert_equal(mXsmall.any(0), np.matrix([True, True, False])) assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) def test_allany_oddities(self): # Some fun with all and any store = empty((), dtype=bool) full = array([1, 2, 3], mask=True) self.assertTrue(full.all() is masked) full.all(out=store) self.assertTrue(store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked) store = empty((), dtype=bool) self.assertTrue(full.any() is masked) full.any(out=store) self.assertTrue(not store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked) def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) assert_equal(m2x.argmin(), 4) assert_equal(m2X.argmin(), 4) assert_equal(mx.argmax(), 28) assert_equal(mX.argmax(), 28) assert_equal(m2x.argmax(), 31) assert_equal(m2X.argmax(), 31) assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) def test_clip(self): # Tests clip on MaskedArrays. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(x, mask=m) clipped = mx.clip(2, 8) assert_equal(clipped.mask, mx.mask) assert_equal(clipped._data, x.clip(2, 8)) assert_equal(clipped._data, mx._data.clip(2, 8)) def test_compress(self): # test compress a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) condition = (a > 1.5) & (a < 3.5) assert_equal(a.compress(condition), [2., 3.]) a[[2, 3]] = masked b = a.compress(condition) assert_equal(b._data, [2., 3.]) assert_equal(b._mask, [0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) condition = (a < 4.) b = a.compress(condition) assert_equal(b._data, [1., 2., 3.]) assert_equal(b._mask, [0, 0, 1]) assert_equal(b.fill_value, 9999) assert_equal(b, a[condition]) a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]]) b = a.compress(a.ravel() >= 22) assert_equal(b._data, [30, 40, 50, 60]) assert_equal(b._mask, [1, 1, 0, 0]) x = np.array([3, 1, 2]) b = a.compress(x >= 2, axis=1) assert_equal(b._data, [[10, 30], [40, 60]]) assert_equal(b._mask, [[0, 1], [1, 0]]) def test_compressed(self): # Tests compressed a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) a[0] = masked b = a.compressed() assert_equal(b, [2, 3, 4]) a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) self.assertTrue(isinstance(b, np.matrix)) a[0, 0] = masked b = a.compressed() assert_equal(b, [[2, 3, 4]]) def test_empty(self): # Tests empty/like datatype = [('a', int), ('b', float), ('c', '|S8')] a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], dtype=datatype) assert_equal(len(a.fill_value.item()), len(datatype)) b = empty_like(a) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) b = empty(len(a), dtype=datatype) assert_equal(b.shape, a.shape) assert_equal(b.fill_value, a.fill_value) # check empty_like mask handling a = masked_array([1, 2, 3], mask=[False, True, False]) b = empty_like(a) assert_(not np.may_share_memory(a.mask, b.mask)) b = a.view(masked_array) assert_(np.may_share_memory(a.mask, b.mask)) def test_put(self): # Tests put. d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) x = array(d, mask=m) self.assertTrue(x[3] is masked) self.assertTrue(x[4] is masked) x[[1, 4]] = [10, 40] #self.assertTrue(x.mask is not m) self.assertTrue(x[3] is masked) self.assertTrue(x[4] is not masked) assert_equal(x, [0, 10, 2, -1, 40]) x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) i = [0, 2, 4, 6] x.put(i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) put(x, i, [6, 4, 2, 0]) assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) def test_put_hardmask(self): # Tests put on hardmask d = arange(5) n = [0, 0, 0, 1, 1] m = make_mask(n) xh = array(d + 1, mask=m, hard_mask=True, copy=True) xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) assert_equal(xh._data, [3, 4, 2, 4, 5]) def test_putmask(self): x = arange(6) + 1 mx = array(x, mask=[0, 0, 0, 1, 1, 1]) mask = [0, 0, 1, 0, 0, 1] # w/o mask, w/o masked values xx = x.copy() putmask(xx, mask, 99) assert_equal(xx, [1, 2, 99, 4, 5, 99]) # w/ mask, w/o masked values mxx = mx.copy() putmask(mxx, mask, 99) assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) # w/o mask, w/ masked values values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) xx = x.copy() putmask(xx, mask, values) assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) # w/ mask, w/ masked values mxx = mx.copy() putmask(mxx, mask, values) assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) # w/ mask, w/ masked values + hardmask mxx = mx.copy() mxx.harden_mask() putmask(mxx, mask, values) assert_equal(mxx, [1, 2, 30, 4, 5, 60]) def test_ravel(self): # Tests ravel a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(aravel._mask.shape, aravel.shape) a = array([0, 0], mask=[1, 1]) aravel = a.ravel() assert_equal(aravel._mask.shape, a.shape) a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) aravel = a.ravel() assert_equal(aravel.shape, (1, 5)) assert_equal(aravel._mask.shape, a.shape) # Checks that small_mask is preserved a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) assert_equal(a.ravel()._mask, [0, 0, 0, 0]) # Test that the fill_value is preserved a.fill_value = -99 a.shape = (2, 2) ar = a.ravel() assert_equal(ar._mask, [0, 0, 0, 0]) assert_equal(ar._data, [1, 2, 3, 4]) assert_equal(ar.fill_value, -99) # Test index ordering assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) def test_reshape(self): # Tests reshape x = arange(4) x[0] = masked y = x.reshape(2, 2) assert_equal(y.shape, (2, 2,)) assert_equal(y._mask.shape, (2, 2,)) assert_equal(x.shape, (4,)) assert_equal(x._mask.shape, (4,)) def test_sort(self): # Test sort x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) sortedx = sort(x) assert_equal(sortedx._data, [1, 2, 3, 4]) assert_equal(sortedx._mask, [0, 0, 0, 1]) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [4, 1, 2, 3]) assert_equal(sortedx._mask, [1, 0, 0, 0]) x.sort() assert_equal(x._data, [1, 2, 3, 4]) assert_equal(x._mask, [0, 0, 0, 1]) x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) x.sort(endwith=False) assert_equal(x._data, [4, 1, 2, 3]) assert_equal(x._mask, [1, 0, 0, 0]) x = [1, 4, 2, 3] sortedx = sort(x) self.assertTrue(not isinstance(sorted, MaskedArray)) x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [1, 2, -2, -1, 0]) assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) def test_sort_2d(self): # Check sort of 2D array. # 2D array w/o mask a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) a = masked_array([[8, 4, 1], [2, 0, 9]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) # 2D array w/mask a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(0) assert_equal(a, [[2, 0, 1], [8, 4, 9]]) assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) a.sort(1) assert_equal(a, [[1, 4, 8], [0, 2, 9]]) assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) # 3D a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], [[1, 2, 3], [7, 8, 9], [4, 5, 6]], [[7, 8, 9], [1, 2, 3], [4, 5, 6]], [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) a[a % 4 == 0] = masked am = a.copy() an = a.filled(99) am.sort(0) an.sort(0) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(1) an.sort(1) assert_equal(am, an) am = a.copy() an = a.filled(99) am.sort(2) an.sort(2) assert_equal(am, an) def test_sort_flexible(self): # Test sort on flexible dtype. a = array( data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], dtype=[('A', int), ('B', int)]) test = sort(a) b = array( data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], dtype=[('A', int), ('B', int)]) assert_equal(test, b) assert_equal(test.mask, b.mask) test = sort(a, endwith=False) b = array( data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], dtype=[('A', int), ('B', int)]) assert_equal(test, b) assert_equal(test.mask, b.mask) def test_argsort(self): # Test argsort a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) assert_equal(np.argsort(a), argsort(a)) def test_squeeze(self): # Check squeeze data = masked_array([[1, 2, 3]]) assert_equal(data.squeeze(), [1, 2, 3]) data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) assert_equal(data.squeeze(), [1, 2, 3]) assert_equal(data.squeeze()._mask, [1, 1, 1]) data = masked_array([[1]], mask=True) self.assertTrue(data.squeeze() is masked) def test_swapaxes(self): # Tests swapaxes on MaskedArrays. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mX = array(x, mask=m).reshape(6, 6) mXX = mX.reshape(3, 2, 2, 3) mXswapped = mX.swapaxes(0, 1) assert_equal(mXswapped[-1], mX[:, -1]) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_take(self): # Tests take x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) assert_equal(x.take([[0, 1], [0, 1]]), masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) assert_equal(x.take([0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) assert_equal(take(x, [0, 2], axis=1), array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) def test_take_masked_indices(self): # Test take w/ masked indices a = np.array((40, 18, 37, 9, 22)) indices = np.arange(3)[None,:] + np.arange(5)[:, None] mindices = array(indices, mask=(indices >= len(a))) # No mask test = take(a, mindices, mode='clip') ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [9, 22, 22], [22, 22, 22]]) assert_equal(test, ctrl) # Masked indices test = take(a, mindices) ctrl = array([[40, 18, 37], [18, 37, 9], [37, 9, 22], [9, 22, 40], [22, 40, 40]]) ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # Masked input + masked indices a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) test = take(a, mindices) ctrl[0, 1] = ctrl[1, 0] = masked assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_tolist(self): # Tests to list # ... on 1D x = array(np.arange(12)) x[[1, -2]] = masked xlist = x.tolist() self.assertTrue(xlist[1] is None) self.assertTrue(xlist[-2] is None) # ... on 2D x.shape = (3, 4) xlist = x.tolist() ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] assert_equal(xlist[0], [0, None, 2, 3]) assert_equal(xlist[1], [4, 5, 6, 7]) assert_equal(xlist[2], [8, 9, None, 11]) assert_equal(xlist, ctrl) # ... on structured array w/ masked records x = array(list(zip([1, 2, 3], [1.1, 2.2, 3.3], ['one', 'two', 'thr'])), dtype=[('a', int), ('b', float), ('c', '|S8')]) x[-1] = masked assert_equal(x.tolist(), [(1, 1.1, asbytes('one')), (2, 2.2, asbytes('two')), (None, None, None)]) # ... on structured array w/ masked fields a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], dtype=[('a', int), ('b', int)]) test = a.tolist() assert_equal(test, [[1, None], [3, 4]]) # ... on mvoid a = a[0] test = a.tolist() assert_equal(test, [1, None]) def test_tolist_specialcase(self): # Test mvoid.tolist: make sure we return a standard Python object a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) # w/o mask: each entry is a np.void whose elements are standard Python for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic)) # w/ mask: each entry is a ma.void whose elements should be # standard Python a.mask[0] = (0, 1) for entry in a: for item in entry.tolist(): assert_(not isinstance(item, np.generic)) def test_toflex(self): # Test the conversion to records data = arange(10) record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) ndtype = [('i', int), ('s', '|S3'), ('f', float)] data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), 'ABCDEFGHIJKLM', np.random.rand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) ndtype = np.dtype("int, (2,3)float, float") data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), np.random.rand(10), np.random.rand(10))], dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() assert_equal_records(record['_data'], data._data) assert_equal_records(record['_mask'], data._mask) def test_fromflex(self): # Test the reconstruction of a masked_array from a record a = array([1, 2, 3]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) a = array([1, 2, 3], mask=[0, 0, 1]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.mask, a.mask) a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], dtype=[('A', int), ('B', float)]) test = fromflex(a.toflex()) assert_equal(test, a) assert_equal(test.data, a.data) def test_arraymethod(self): # Test a _arraymethod w/ n argument marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) control = masked_array([[1], [2], [3], [4], [5]], mask=[0, 0, 1, 0, 0]) assert_equal(marray.T, control) assert_equal(marray.transpose(), control) assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) class TestMaskedArrayMathMethods(TestCase): def setUp(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) assert_equal(mXcp._data, mX.filled(0).cumsum(1)) mXcp = mX.cumprod(0) assert_equal(mXcp._data, mX.filled(1).cumprod(0)) mXcp = mX.cumprod(1) assert_equal(mXcp._data, mX.filled(1).cumprod(1)) def test_cumsumprod_with_output(self): # Tests cumsum/cumprod w/ output xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked for funcname in ('cumsum', 'cumprod'): npfunc = getattr(np, funcname) xmmeth = getattr(xm, funcname) # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output self.assertTrue(result is output) assert_equal(result, xmmeth(axis=0, out=output)) output = empty((3, 4), dtype=int) result = xmmeth(axis=0, out=output) self.assertTrue(result is output) def test_ptp(self): # Tests ptp on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d (n, m) = X.shape assert_equal(mx.ptp(), mx.compressed().ptp()) rows = np.zeros(n, np.float) cols = np.zeros(m, np.float) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): rows[k] = mX[k].compressed().ptp() assert_equal(mX.ptp(0), cols) assert_equal(mX.ptp(1), rows) def test_add_object(self): x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) y = x + 'x' assert_equal(y[1], 'bx') assert_(y.mask[0]) def test_sum_object(self): # Test sum on object dtype a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) assert_equal(a.sum(), 5) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.sum(axis=0), [5, 7, 9]) def test_prod_object(self): # Test prod on object dtype a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) assert_equal(a.prod(), 2 * 3) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.prod(axis=0), [4, 10, 18]) def test_meananom_object(self): # Test mean/anom on object dtype a = masked_array([1, 2, 3], dtype=np.object) assert_equal(a.mean(), 2) assert_equal(a.anom(), [-1, 0, 1]) def test_trace(self): # Tests trace on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0)) assert_equal(np.trace(mX), mX.trace()) def test_dot(self): # Tests dot on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) assert_(r.mask is nomask) fX = mX.filled(0) r = mX.dot(mX) assert_almost_equal(r.filled(0), fX.dot(fX)) assert_(r.mask[1,3]) r1 = empty_like(r) mX.dot(mX, out=r1) assert_almost_equal(r, r1) mYY = mXX.swapaxes(-1, -2) fXX, fYY = mXX.filled(0), mYY.filled(0) r = mXX.dot(mYY) assert_almost_equal(r.filled(0), fXX.dot(fYY)) r1 = empty_like(r) mXX.dot(mYY, out=r1) assert_almost_equal(r, r1) def test_varstd(self): # Tests var & std on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), mX.compressed().std(ddof=1)) assert_almost_equal(mX.var(axis=None, ddof=1), mX.compressed().var(ddof=1)) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) mout = array(-1, dtype=float) x = array(arange(10), mask=True) for methodname in ('var', 'std'): method = getattr(x, methodname) self.assertTrue(method() is masked) self.assertTrue(method(0) is masked) self.assertTrue(method(-1) is masked) # Using a masked array as explicit output with warnings.catch_warnings(): warnings.simplefilter('ignore') method(out=mout) self.assertTrue(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output with warnings.catch_warnings(): warnings.simplefilter('ignore') method(out=nout) self.assertTrue(np.isnan(nout)) x = array(arange(10), mask=True) x[-1] = 9 for methodname in ('var', 'std'): method = getattr(x, methodname) self.assertTrue(method(ddof=1) is masked) self.assertTrue(method(0, ddof=1) is masked) self.assertTrue(method(-1, ddof=1) is masked) # Using a masked array as explicit output method(out=mout, ddof=1) self.assertTrue(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output method(out=nout, ddof=1) self.assertTrue(np.isnan(nout)) def test_varstd_ddof(self): a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) test = a.std(axis=0, ddof=0) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=1) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [0, 0, 1]) test = a.std(axis=0, ddof=2) assert_equal(test.filled(0), [0, 0, 0]) assert_equal(test.mask, [1, 1, 1]) def test_diag(self): # Test diag x = arange(9).reshape((3, 3)) x[1, 1] = masked out = np.diag(x) assert_equal(out, [0, 4, 8]) out = diag(x) assert_equal(out, [0, 4, 8]) assert_equal(out.mask, [0, 1, 0]) out = diag(out) control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(out, control) def test_axis_methods_nomask(self): # Test the combination nomask & methods w/ axis a = array([[1, 2, 3], [4, 5, 6]]) assert_equal(a.sum(0), [5, 7, 9]) assert_equal(a.sum(-1), [6, 15]) assert_equal(a.sum(1), [6, 15]) assert_equal(a.prod(0), [4, 10, 18]) assert_equal(a.prod(-1), [6, 120]) assert_equal(a.prod(1), [6, 120]) assert_equal(a.min(0), [1, 2, 3]) assert_equal(a.min(-1), [1, 4]) assert_equal(a.min(1), [1, 4]) assert_equal(a.max(0), [4, 5, 6]) assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) class TestMaskedArrayMathMethodsComplex(TestCase): # Test class for miscellaneous MaskedArrays methods. def setUp(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) X = x.reshape(6, 6) XX = x.reshape(3, 2, 2, 3) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) mx = array(data=x, mask=m) mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) m2 = np.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1]) m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) def test_varstd(self): # Tests var & std on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) assert_equal(mX.var().shape, X.var().shape) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2)) assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2)) for k in range(6): assert_almost_equal(mXvar1[k], mX[k].compressed().var()) assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) class TestMaskedArrayFunctions(TestCase): # Test class for miscellaneous functions. def setUp(self): x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) xm.set_fill_value(1e+20) self.info = (xm, ym) def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) assert_equal(y, [1, 2]) assert_equal(y[1], 2) def test_masked_equal_wlist(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [0, 0, 1]) mx = masked_not_equal(x, 3) assert_equal(mx, x) assert_equal(mx._mask, [1, 1, 0]) def test_masked_equal_fill_value(self): x = [1, 2, 3] mx = masked_equal(x, 3) assert_equal(mx._mask, [0, 0, 1]) assert_equal(mx.fill_value, 3) def test_masked_where_condition(self): # Tests masking functions. x = array([1., 2., 3., 4., 5.]) x[2] = masked assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2)) assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5]) def test_masked_where_oddities(self): # Tests some generic features. atest = ones((10, 10, 10), dtype=float) btest = zeros(atest.shape, MaskType) ctest = masked_where(btest, atest) assert_equal(atest, ctest) def test_masked_where_shape_constraint(self): a = arange(10) try: test = masked_equal(1, a) except IndexError: pass else: raise AssertionError("Should have failed...") test = masked_equal(a, 1) assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) def test_masked_where_structured(self): # test that masked_where on a structured array sets a structured # mask (see issue #2972) a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")]) am = np.ma.masked_where(a["A"] < 5, a) assert_equal(am.mask.dtype.names, am.dtype.names) assert_equal(am["A"], np.ma.masked_array(np.zeros(10), np.ones(10))) def test_masked_otherfunctions(self): assert_equal(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]) assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) assert_equal(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0]) assert_equal(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1]) assert_equal(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0]) assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1]) def test_round(self): a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], mask=[0, 1, 0, 0, 0]) assert_equal(a.round(), [1., 2., 3., 5., 6.]) assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) b = empty_like(a) a.round(out=b) assert_equal(b, [1., 2., 3., 5., 6.]) x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) def test_round_with_output(self): # Testing round with an explicit output xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) xm[:, 0] = xm[0] = xm[-1, -1] = masked # A ndarray as explicit input output = np.empty((3, 4), dtype=float) output.fill(-9999) result = np.round(xm, decimals=2, out=output) # ... the result should be the given output self.assertTrue(result is output) assert_equal(result, xm.round(decimals=2, out=output)) output = empty((3, 4), dtype=float) result = xm.round(decimals=2, out=output) self.assertTrue(result is output) def test_identity(self): a = identity(5) self.assertTrue(isinstance(a, MaskedArray)) assert_equal(a, np.identity(5)) def test_power(self): x = -1.1 assert_almost_equal(power(x, 2.), 1.21) self.assertTrue(power(x, masked) is masked) x = array([-1.1, -1.1, 1.1, 1.1, 0.]) b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) y = power(x, b) assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) assert_equal(y._mask, [1, 0, 0, 0, 1]) b.mask = nomask y = power(x, b) assert_equal(y._mask, [1, 0, 0, 0, 1]) z = x ** b assert_equal(z._mask, y._mask) assert_almost_equal(z, y) assert_almost_equal(z._data, y._data) x **= b assert_equal(x._mask, y._mask) assert_almost_equal(x, y) assert_almost_equal(x._data, y._data) def test_power_w_broadcasting(self): # Test power w/ broadcasting a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) b1 = np.array([2, 4, 3]) b2 = np.array([b1, b1]) b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], mask=[[1, 1, 0], [0, 1, 1]]) # No broadcasting, base & exp w/ mask test = a2m ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) # No broadcasting, base w/ mask, exp w/o mask test = a2m ** b2 assert_equal(test, ctrl) assert_equal(test.mask, a2m.mask) # No broadcasting, base w/o mask, exp w/ mask test = a2 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, b2m.mask) ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], mask=[[0, 1, 0], [0, 1, 0]]) test = b1 ** b2m assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) test = b2m ** b1 assert_equal(test, ctrl) assert_equal(test.mask, ctrl.mask) def test_where(self): # Test the where function x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) xm.set_fill_value(1e+20) d = where(xm > 2, xm, -9) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) assert_equal(d._mask, xm._mask) d = where(xm > 2, -9, ym) assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.]) assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) d = where(xm > 2, xm, masked) assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.]) tmp = xm._mask.copy() tmp[(xm <= 2).filled(True)] = True assert_equal(d._mask, tmp) ixm = xm.astype(int) d = where(ixm > 2, ixm, masked) assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) assert_equal(d.dtype, ixm.dtype) def test_where_object(self): a = np.array(None) b = masked_array(None) r = b.copy() assert_equal(np.ma.where(True, a, a), r) assert_equal(np.ma.where(True, b, b), r) def test_where_with_masked_choice(self): x = arange(10) x[3] = masked c = x >= 8 # Set False to masked z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) assert_(z[7] is masked) assert_(z[8] is not masked) assert_(z[9] is not masked) assert_equal(x, z) # Set True to masked z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) assert_(z[7] is not masked) assert_(z[8] is masked) assert_(z[9] is masked) def test_where_with_masked_condition(self): x = array([1., 2., 3., 4., 5.]) c = array([1, 1, 1, 0, 0]) x[2] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) c[0] = masked z = where(c, x, -x) assert_equal(z, [1., 2., 0., -4., -5]) assert_(z[0] is masked) assert_(z[1] is not masked) assert_(z[2] is masked) x = arange(1, 6) x[-1] = masked y = arange(1, 6) * 10 y[2] = masked c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) cm = c.filled(1) z = where(c, x, y) zm = where(cm, x, y) assert_equal(z, zm) assert_(getmask(zm) is nomask) assert_equal(zm, [1, 2, 3, 40, 50]) z = where(c, masked, 1) assert_equal(z, [99, 99, 99, 1, 1]) z = where(c, 1, masked) assert_equal(z, [99, 1, 1, 99, 99]) def test_where_type(self): # Test the type conservation with where x = np.arange(4, dtype=np.int32) y = np.arange(4, dtype=np.float32) * 2.2 test = where(x > 1.5, y, x).dtype control = np.find_common_type([np.int32, np.float32], []) assert_equal(test, control) def test_choose(self): # Test choose choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] chosen = choose([2, 3, 1, 0], choices) assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='clip') assert_equal(chosen, array([20, 31, 12, 3])) chosen = choose([2, 4, 1, 0], choices, mode='wrap') assert_equal(chosen, array([20, 1, 12, 3])) # Check with some masked indices indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([99, 1, 12, 99])) assert_equal(chosen.mask, [1, 0, 0, 1]) # Check with some masked choices choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] chosen = choose(indices_, choices, mode='wrap') assert_equal(chosen, array([20, 31, 12, 3])) assert_equal(chosen.mask, [1, 0, 0, 1]) def test_choose_with_out(self): # Test choose with an explicit out keyword choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] store = empty(4, dtype=int) chosen = choose([2, 3, 1, 0], choices, out=store) assert_equal(store, array([20, 31, 12, 3])) self.assertTrue(store is chosen) # Check with some masked indices + out store = empty(4, dtype=int) indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([99, 31, 12, 99])) assert_equal(store.mask, [1, 0, 0, 1]) # Check with some masked choices + out ina ndarray ! choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 0, 0], [0, 0, 0, 0]]) indices_ = [2, 3, 1, 0] store = empty(4, dtype=int).view(ndarray) chosen = choose(indices_, choices, mode='wrap', out=store) assert_equal(store, array([999999, 31, 12, 999999])) def test_reshape(self): a = arange(10) a[0] = masked # Try the default b = a.reshape((5, 2)) assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['C']) # Try w/ arguments as list instead of tuple b = a.reshape(5, 2) assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['C']) # Try w/ order b = a.reshape((5, 2), order='F') assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['F']) # Try w/ order b = a.reshape(5, 2, order='F') assert_equal(b.shape, (5, 2)) self.assertTrue(b.flags['F']) c = np.reshape(a, (2, 5)) self.assertTrue(isinstance(c, MaskedArray)) assert_equal(c.shape, (2, 5)) self.assertTrue(c[0, 0] is masked) self.assertTrue(c.flags['C']) def test_make_mask_descr(self): # Test make_mask_descr # Flexible ntype = [('a', np.float), ('b', np.float)] test = make_mask_descr(ntype) assert_equal(test, [('a', np.bool), ('b', np.bool)]) # Standard w/ shape ntype = (np.float, 2) test = make_mask_descr(ntype) assert_equal(test, (np.bool, 2)) # Standard standard ntype = np.float test = make_mask_descr(ntype) assert_equal(test, np.dtype(np.bool)) # Nested ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] test = make_mask_descr(ntype) control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) assert_equal(test, control) # Named+ shape ntype = [('a', (np.float, 2))] test = make_mask_descr(ntype) assert_equal(test, np.dtype([('a', (np.bool, 2))])) # 2 names ntype = [(('A', 'a'), float)] test = make_mask_descr(ntype) assert_equal(test, np.dtype([(('A', 'a'), bool)])) def test_make_mask(self): # Test make_mask # w/ a list as an input mask = [0, 1] test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a ndarray as an input mask = np.array([0, 1], dtype=np.bool) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a flexible-type ndarray as an input - use default mdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [1, 1]) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, mdtype) assert_equal(test, mask) # w/ a flexible-type ndarray as an input - use input dtype mdtype = [('a', np.float), ('b', np.float)] bdtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, bdtype) assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) # test that nomask is returned when m is nomask. bools = [True, False] dtypes = [MaskType, np.float] msgformat = 'copy=%s, shrink=%s, dtype=%s' for cpy, shr, dt in itertools.product(bools, bools, dtypes): res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) assert_(res is nomask, msgformat % (cpy, shr, dt)) def test_mask_or(self): # Initialize mtype = [('a', np.bool), ('b', np.bool)] mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) # Test using nomask as input test = mask_or(mask, nomask) assert_equal(test, mask) test = mask_or(nomask, mask) assert_equal(test, mask) # Using False as input test = mask_or(mask, False) assert_equal(test, mask) # Using True as input. Won't work, but keep it for the kicks # test = mask_or(mask, True) # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) # assert_equal(test, control) # Using another array w / the same dtype other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) test = mask_or(mask, other) control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) assert_equal(test, control) # Using another array w / a different dtype othertype = [('A', np.bool), ('B', np.bool)] other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) try: test = mask_or(mask, other) except ValueError: pass # Using nested arrays dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) assert_equal(mask_or(amask, bmask), cntrl) def test_flatten_mask(self): # Tests flatten mask # Standarad dtype mask = np.array([0, 0, 1], dtype=np.bool) assert_equal(flatten_mask(mask), mask) # Flexible dtype mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) test = flatten_mask(mask) control = np.array([0, 0, 0, 1], dtype=bool) assert_equal(test, control) mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] data = [(0, (0, 0)), (0, (0, 1))] mask = np.array(data, dtype=mdtype) test = flatten_mask(mask) control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) assert_equal(test, control) def test_on_ndarray(self): # Test functions on ndarrays a = np.array([1, 2, 3, 4]) m = array(a, mask=False) test = anom(a) assert_equal(test, m.anom()) test = reshape(a, (2, 2)) assert_equal(test, m.reshape(2, 2)) def test_compress(self): # Test compress function on ndarray and masked array # Address Github #2495. arr = np.arange(8) arr.shape = 4, 2 cond = np.array([True, False, True, True]) control = arr[[0, 2, 3]] test = np.ma.compress(cond, arr, axis=0) assert_equal(test, control) marr = np.ma.array(arr) test = np.ma.compress(cond, marr, axis=0) assert_equal(test, control) def test_compressed(self): # Test ma.compressed function. # Address gh-4026 a = np.ma.array([1, 2]) test = np.ma.compressed(a) assert_(type(test) is np.ndarray) # Test case when input data is ndarray subclass class A(np.ndarray): pass a = np.ma.array(A(shape=0)) test = np.ma.compressed(a) assert_(type(test) is A) # Test that compress flattens test = np.ma.compressed([[1],[2]]) assert_equal(test.ndim, 1) test = np.ma.compressed([[[[[1]]]]]) assert_equal(test.ndim, 1) # Test case when input is MaskedArray subclass class M(MaskedArray): pass test = np.ma.compressed(M(shape=(0,1,2))) assert_equal(test.ndim, 1) # with .compessed() overriden class M(MaskedArray): def compressed(self): return 42 test = np.ma.compressed(M(shape=(0,1,2))) assert_equal(test, 42) class TestMaskedFields(TestCase): def setUp(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] ddtype = [('a', int), ('b', float), ('c', '|S8')] mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) def test_set_records_masks(self): base = self.data['base'] mdtype = self.data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = masked assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ simple boolean base.mask = False assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) base.mask = True assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) # Set w/ list base.mask = [0, 0, 0, 1, 1] assert_equal_records(base._mask, np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], dtype=mdtype)) def test_set_record_element(self): # Check setting an element of a record) base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 2, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, asbytes_nested(['pi', 'two', 'three', 'four', 'five'])) def test_set_record_slice(self): base = self.data['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') assert_equal(base_a.dtype, int) assert_equal(base_a._data, [3, 3, 3, 4, 5]) assert_equal(base_b.dtype, float) assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) assert_equal(base_c.dtype, '|S8') assert_equal(base_c._data, asbytes_nested(['pi', 'pi', 'pi', 'four', 'five'])) def test_mask_element(self): "Check record access" base = self.data['base'] base[0] = masked for n in ('a', 'b', 'c'): assert_equal(base[n].mask, [1, 1, 0, 0, 1]) assert_equal(base[n]._data, base._data[n]) def test_getmaskarray(self): # Test getmaskarray on flexible dtype ndtype = [('a', int), ('b', float)] test = empty(3, dtype=ndtype) assert_equal(getmaskarray(test), np.array([(0, 0), (0, 0), (0, 0)], dtype=[('a', '|b1'), ('b', '|b1')])) test[:] = masked assert_equal(getmaskarray(test), np.array([(1, 1), (1, 1), (1, 1)], dtype=[('a', '|b1'), ('b', '|b1')])) def test_view(self): # Test view w/ flexible dtype iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) # Transform globally to simple dtype test = a.view(float) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) # Transform globally to dty test = a.view((float, 2)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) test = a.view((float, 2), np.matrix) assert_equal(test, data) self.assertTrue(isinstance(test, np.matrix)) def test_getitem(self): ndtype = [('a', float), ('b', float)] a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), dtype=[('a', bool), ('b', bool)]) # No mask self.assertTrue(isinstance(a[1], MaskedArray)) # One element masked self.assertTrue(isinstance(a[0], MaskedArray)) assert_equal_records(a[0]._data, a._data[0]) assert_equal_records(a[0]._mask, a._mask[0]) # All element masked self.assertTrue(isinstance(a[-2], MaskedArray)) assert_equal_records(a[-2]._data, a._data[-2]) assert_equal_records(a[-2]._mask, a._mask[-2]) def test_setitem(self): # Issue 4866: check that one can set individual items in [record][col] # and [col][record] order ndtype = np.dtype([('a', float), ('b', int)]) ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) ma['a'][1] = 3.0 assert_equal(ma['a'], np.array([1.0, 3.0])) ma[1]['a'] = 4.0 assert_equal(ma['a'], np.array([1.0, 4.0])) # Issue 2403 mdtype = np.dtype([('a', bool), ('b', bool)]) # soft mask control = np.array([(False, True), (True, True)], dtype=mdtype) a = np.ma.masked_all((2,), dtype=ndtype) a['a'][0] = 2 assert_equal(a.mask, control) a = np.ma.masked_all((2,), dtype=ndtype) a[0]['a'] = 2 assert_equal(a.mask, control) # hard mask control = np.array([(True, True), (True, True)], dtype=mdtype) a = np.ma.masked_all((2,), dtype=ndtype) a.harden_mask() a['a'][0] = 2 assert_equal(a.mask, control) a = np.ma.masked_all((2,), dtype=ndtype) a.harden_mask() a[0]['a'] = 2 assert_equal(a.mask, control) def test_element_len(self): # check that len() works for mvoid (Github issue #576) for rec in self.data['base']: assert_equal(len(rec), len(self.data['ddtype'])) class TestMaskedView(TestCase): def setUp(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) self.data = (data, a, controlmask) def test_view_to_nothing(self): (data, a, controlmask) = self.data test = a.view() self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): (data, a, controlmask) = self.data test = a.view(np.ndarray) self.assertTrue(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): (data, a, controlmask) = self.data # View globally test = a.view(float) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): (data, a, controlmask) = self.data test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a']) assert_equal(test['B'], a['b']) test = a[0].view([('A', float), ('B', float)]) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][0]) assert_equal(test['B'], a['b'][0]) test = a[-1].view([('A', float), ('B', float)]) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): (data, a, controlmask) = self.data # View globally test = a.view((float, 2)) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) # View on 1 masked element test = a[0].view((float, 2)) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data[0]) assert_equal(test.mask, (1, 0)) # View on 1 unmasked element test = a[-1].view((float, 2)) self.assertTrue(isinstance(test, MaskedArray)) assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): (data, a, controlmask) = self.data test = a.view((float, 2), np.matrix) assert_equal(test, data) self.assertTrue(isinstance(test, np.matrix)) self.assertTrue(not isinstance(test, MaskedArray)) def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) def test_append_masked_array(): a = np.ma.masked_equal([1,2,3], value=2) b = np.ma.masked_equal([4,3,2], value=2) result = np.ma.append(a, b) expected_data = [1, 2, 3, 4, 3, 2] expected_mask = [False, True, False, False, False, True] assert_array_equal(result.data, expected_data) assert_array_equal(result.mask, expected_mask) a = np.ma.masked_all((2,2)) b = np.ma.ones((3,1)) result = np.ma.append(a, b) expected_data = [1] * 3 expected_mask = [True] * 4 + [False] * 3 assert_array_equal(result.data[-3], expected_data) assert_array_equal(result.mask, expected_mask) result = np.ma.append(a, b, axis=None) assert_array_equal(result.data[-3], expected_data) assert_array_equal(result.mask, expected_mask) def test_append_masked_array_along_axis(): a = np.ma.masked_equal([1,2,3], value=2) b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) # When `axis` is specified, `values` must have the correct shape. assert_raises(ValueError, np.ma.append, a, b, axis=0) result = np.ma.append(a[np.newaxis,:], b, axis=0) expected = np.ma.arange(1, 10) expected[[1, 6]] = np.ma.masked expected = expected.reshape((3,3)) assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) if __name__ == "__main__": run_module_suite()
unknown
codeparrot/codeparrot-clean
from unittest import mock from django.core.checks import Error from django.db import connections, models from django.test import SimpleTestCase from django.test.utils import isolate_apps def dummy_allow_migrate(db, app_label, **hints): # Prevent checks from being run on the 'other' database, which doesn't have # its check_field() method mocked in the test. return db == "default" @isolate_apps("invalid_models_tests") class BackendSpecificChecksTests(SimpleTestCase): @mock.patch("django.db.models.fields.router.allow_migrate", new=dummy_allow_migrate) def test_check_field(self): """Test if backend specific checks are performed.""" error = Error("an error") class Model(models.Model): field = models.IntegerField() field = Model._meta.get_field("field") with mock.patch.object( connections["default"].validation, "check_field", return_value=[error] ): self.assertEqual(field.check(databases={"default"}), [error])
python
github
https://github.com/django/django
tests/invalid_models_tests/test_backend_specific.py
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_scaling_policy short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy description: - Manipulate Rackspace Cloud Autoscale Scaling Policy version_added: 1.7 options: at: description: - The UTC time when this policy will be executed. The time must be formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as C(2013-05-19T08:07:08Z) change: description: - The change, either as a number of servers or as a percentage, to make in the scaling group. If this is a percentage, you must set I(is_percent) to C(true) also. cron: description: - The time when the policy will be executed, as a cron entry. For example, if this is parameter is set to C(1 0 * * *) cooldown: description: - The period of time, in seconds, that must pass before any scaling can occur after the previous scaling. Must be an integer between 0 and 86400 (24 hrs). desired_capacity: description: - The desired server capacity of the scaling the group; that is, how many servers should be in the scaling group. is_percent: description: - Whether the value in I(change) is a percent value default: false name: description: - Name to give the policy required: true policy_type: description: - The type of policy that will be executed for the current release. choices: - webhook - schedule required: true scaling_group: description: - Name of the scaling group that this policy will be added to required: true state: description: - Indicate desired state of the resource choices: - present - absent default: present author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' EXAMPLES = ''' --- - hosts: localhost gather_facts: false connection: local tasks: - rax_scaling_policy: credentials: ~/.raxpub region: ORD at: '2013-05-19T08:07:08Z' change: 25 cooldown: 300 is_percent: true name: ASG Test Policy - at policy_type: schedule scaling_group: ASG Test register: asps_at - rax_scaling_policy: credentials: ~/.raxpub region: ORD cron: '1 0 * * *' change: 25 cooldown: 300 is_percent: true name: ASG Test Policy - cron policy_type: schedule scaling_group: ASG Test register: asp_cron - rax_scaling_policy: credentials: ~/.raxpub region: ORD cooldown: 300 desired_capacity: 5 name: ASG Test Policy - webhook policy_type: webhook scaling_group: ASG Test register: asp_webhook ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module) def rax_asp(module, at=None, change=0, cron=None, cooldown=300, desired_capacity=0, is_percent=False, name=None, policy_type=None, scaling_group=None, state='present'): changed = False au = pyrax.autoscale if not au: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') try: UUID(scaling_group) except ValueError: try: sg = au.find(name=scaling_group) except Exception as e: module.fail_json(msg='%s' % e.message) else: try: sg = au.get(scaling_group) except Exception as e: module.fail_json(msg='%s' % e.message) if state == 'present': policies = filter(lambda p: name == p.name, sg.list_policies()) if len(policies) > 1: module.fail_json(msg='No unique policy match found by name') if at: args = dict(at=at) elif cron: args = dict(cron=cron) else: args = None if not policies: try: policy = sg.add_policy(name, policy_type=policy_type, cooldown=cooldown, change=change, is_percent=is_percent, desired_capacity=desired_capacity, args=args) changed = True except Exception as e: module.fail_json(msg='%s' % e.message) else: policy = policies[0] kwargs = {} if policy_type != policy.type: kwargs['policy_type'] = policy_type if cooldown != policy.cooldown: kwargs['cooldown'] = cooldown if hasattr(policy, 'change') and change != policy.change: kwargs['change'] = change if hasattr(policy, 'changePercent') and is_percent is False: kwargs['change'] = change kwargs['is_percent'] = False elif hasattr(policy, 'change') and is_percent is True: kwargs['change'] = change kwargs['is_percent'] = True if hasattr(policy, 'desiredCapacity') and change: kwargs['change'] = change elif ((hasattr(policy, 'change') or hasattr(policy, 'changePercent')) and desired_capacity): kwargs['desired_capacity'] = desired_capacity if hasattr(policy, 'args') and args != policy.args: kwargs['args'] = args if kwargs: policy.update(**kwargs) changed = True policy.get() module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) else: try: policies = filter(lambda p: name == p.name, sg.list_policies()) if len(policies) > 1: module.fail_json(msg='No unique policy match found by name') elif not policies: policy = {} else: policy.delete() changed = True except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( at=dict(), change=dict(type='int'), cron=dict(), cooldown=dict(type='int', default=300), desired_capacity=dict(type='int'), is_percent=dict(type='bool', default=False), name=dict(required=True), policy_type=dict(required=True, choices=['webhook', 'schedule']), scaling_group=dict(required=True), state=dict(default='present', choices=['present', 'absent']), ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), mutually_exclusive=[ ['cron', 'at'], ['change', 'desired_capacity'], ] ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') at = module.params.get('at') change = module.params.get('change') cron = module.params.get('cron') cooldown = module.params.get('cooldown') desired_capacity = module.params.get('desired_capacity') is_percent = module.params.get('is_percent') name = module.params.get('name') policy_type = module.params.get('policy_type') scaling_group = module.params.get('scaling_group') state = module.params.get('state') if (at or cron) and policy_type == 'webhook': module.fail_json(msg='policy_type=schedule is required for a time ' 'based policy') setup_rax_module(module, pyrax) rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown, desired_capacity=desired_capacity, is_percent=is_percent, name=name, policy_type=policy_type, scaling_group=scaling_group, state=state) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#include <c10/util/Exception.h> #include <gtest/gtest.h> #include <stdexcept> using c10::Error; namespace { template <class Functor> inline void expectThrowsEq(Functor&& functor, const char* expectedMessage) { try { std::forward<Functor>(functor)(); } catch (const Error& e) { EXPECT_STREQ(e.what_without_backtrace(), expectedMessage); return; } ADD_FAILURE() << "Expected to throw exception with message \"" << expectedMessage << "\" but didn't throw"; } } // namespace TEST(ExceptionTest, TORCH_INTERNAL_ASSERT_DEBUG_ONLY) { #ifdef NDEBUG // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false)); // Does nothing - `throw ...` should not be evaluated // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY( (throw std::runtime_error("I'm throwing..."), true))); #else ASSERT_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false), c10::Error); ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(true)); #endif } // On these platforms there's no assert #if !defined(__ANDROID__) && !defined(__APPLE__) TEST(ExceptionTest, CUDA_KERNEL_ASSERT) { // This function always throws even in NDEBUG mode ASSERT_DEATH_IF_SUPPORTED({ CUDA_KERNEL_ASSERT(false); }, "Assert"); } #endif TEST(WarningTest, JustPrintWarning) { TORCH_WARN("I'm a warning"); } TEST(ExceptionTest, ErrorFormatting) { expectThrowsEq( []() { TORCH_CHECK(false, "This is invalid"); }, "This is invalid"); expectThrowsEq( []() { try { TORCH_CHECK(false, "This is invalid"); } catch (Error& e) { TORCH_RETHROW(e, "While checking X"); } }, "This is invalid (While checking X)"); expectThrowsEq( []() { try { try { TORCH_CHECK(false, "This is invalid"); } catch (Error& e) { TORCH_RETHROW(e, "While checking X"); } } catch (Error& e) { TORCH_RETHROW(e, "While checking Y"); } }, R"msg(This is invalid While checking X While checking Y)msg"); } static int assertionArgumentCounter = 0; static int getAssertionArgument() { return ++assertionArgumentCounter; } static void failCheck() { TORCH_CHECK(false, "message ", getAssertionArgument()); } static void failInternalAssert() { TORCH_INTERNAL_ASSERT(false, "message ", getAssertionArgument()); } TEST(ExceptionTest, DontCallArgumentFunctionsTwiceOnFailure) { assertionArgumentCounter = 0; // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) EXPECT_ANY_THROW(failCheck()); EXPECT_EQ(assertionArgumentCounter, 1) << "TORCH_CHECK called argument twice"; // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) EXPECT_ANY_THROW(failInternalAssert()); EXPECT_EQ(assertionArgumentCounter, 2) << "TORCH_INTERNAL_ASSERT called argument twice"; }
cpp
github
https://github.com/pytorch/pytorch
c10/test/util/exception_test.cpp
# Documentation ## Realm Swift The documentation can be found at [https://www.mongodb.com/docs/atlas/device-sdks/sdk/swift/](https://www.mongodb.com/docs/atlas/device-sdks/sdk/swift/). The API reference is located at [www.mongodb.com/docs/realm-sdks/swift/latest/](https://www.mongodb.com/docs/realm-sdks/swift/latest/). ## Realm Objective-C The documentation can be found at [https://www.mongodb.com/docs/atlas/device-sdks/sdk/swift/](https://www.mongodb.com/docs/atlas/device-sdks/sdk/swift/). The API reference is located at [www.mongodb.com/docs/realm-sdks/objc/latest](https://www.mongodb.com/docs/realm-sdks/objc/latest/). ## Generating Docs You can generate the API docs locally by running `sh build.sh docs` from the root of this repository. This requires installation of [jazzy](https://github.com/realm/jazzy/). You will find the output in `docs/swift_output/` and `docs/objc_output/`.
unknown
github
https://github.com/realm/realm-swift
docs/README.md
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import osv from openerp.report import report_sxw class bank_statement_balance_report(report_sxw.rml_parse): def set_context(self, objects, data, ids, report_type=None): cr = self.cr cr.execute('SELECT s.name as s_name, s.date AS s_date, j.code as j_code, s.balance_end_real as s_balance ' \ 'FROM account_bank_statement s ' \ 'INNER JOIN account_journal j on s.journal_id = j.id ' \ 'INNER JOIN ' \ '(SELECT journal_id, max(date) as max_date FROM account_bank_statement ' \ 'GROUP BY journal_id) d ' \ 'ON (s.journal_id = d.journal_id AND s.date = d.max_date) ' \ 'ORDER BY j.code') lines = cr.dictfetchall() self.localcontext.update( { 'lines': lines, }) super(bank_statement_balance_report, self).set_context(objects, data, ids, report_type=report_type) def __init__(self, cr, uid, name, context): if context is None: context = {} super(bank_statement_balance_report, self).__init__(cr, uid, name, context=context) self.localcontext.update( { 'time': time, }) self.context = context class report_bankstatementbalance(osv.AbstractModel): _name = 'report.account_bank_statement_extensions.report_bankstatementbalance' _inherit = 'report.abstract_report' _template = 'account_bank_statement_extensions.report_bankstatementbalance' _wrapped_report_class = bank_statement_balance_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Adam.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam def adam_update_numpy(param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t) m_t = beta1 * m + (1 - beta1) * g_t v_t = beta2 * v + (1 - beta2) * g_t * g_t param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon) return param_t, m_t, v_t class AdamOptimizerTest(XLATestCase): def testBasic(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): variable_scope.get_variable_scope().set_use_resource(True) # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype) var1_np = np.array([3.0, 4.0], dtype=dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = array_ops.placeholder(dtype) grads1 = array_ops.placeholder(dtype) opt = adam.AdamOptimizer() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) beta1_power, beta2_power = opt._get_beta_accumulators() # Run 3 steps of Adam for t in range(1, 4): self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval()) self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) update.run(feed_dict={grads0: grads0_np, grads1: grads1_np}) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) def testTensorLearningRate(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): variable_scope.get_variable_scope().set_use_resource(True) # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype) var1_np = np.array([3.0, 4.0], dtype=dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = array_ops.placeholder(dtype) grads1 = array_ops.placeholder(dtype) opt = adam.AdamOptimizer(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) beta1_power, beta2_power = opt._get_beta_accumulators() # Run 3 steps of Adam for t in range(1, 4): self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval()) self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) update.run(feed_dict={grads0: grads0_np, grads1: grads1_np}) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) def testSharing(self): for dtype in self.float_types: with self.test_session(), self.test_scope(): variable_scope.get_variable_scope().set_use_resource(True) # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype) var1_np = np.array([3.0, 4.0], dtype=dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype) var0 = resource_variable_ops.ResourceVariable(var0_np) var1 = resource_variable_ops.ResourceVariable(var1_np) grads0 = array_ops.placeholder(dtype) grads1 = array_ops.placeholder(dtype) opt = adam.AdamOptimizer() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() beta1_power, beta2_power = opt._get_beta_accumulators() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Run 3 steps of intertwined Adam1 and Adam2. for t in range(1, 4): self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval()) self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) if t % 2 == 0: update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np}) else: update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np}) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) if __name__ == "__main__": test.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python -t # -*- coding: utf-8 -*- # Copyright (C) 2017 Jonathan Delvaux <pyshell@djoproject.net> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import threading from pyshell.system.manager.abstract import AbstractParentManager from pyshell.system.manager.context import ContextParameterManager from pyshell.system.manager.environment import EnvironmentParameterManager from pyshell.system.manager.key import CryptographicKeyParameterManager from pyshell.system.manager.procedure import ProcedureParameterManager from pyshell.system.manager.variable import VariableParameterManager from pyshell.utils.abstract.flushable import Flushable from pyshell.utils.constants import DEFAULT_GROUP_NAME from pyshell.utils.exception import DefaultPyshellException from pyshell.utils.raises import raiseIfNotString class ParentManager(AbstractParentManager, Flushable): def __init__(self): self._environement = EnvironmentParameterManager(self) self._context = ContextParameterManager(self) self._key = CryptographicKeyParameterManager(self) self._procedure = ProcedureParameterManager(self) self._variable = VariableParameterManager(self) self._managers = (self._environement, self._context, self._key, self._procedure, self._variable) self._group_name = DEFAULT_GROUP_NAME def getEnvironmentManager(self): return self._environement def getContextManager(self): return self._context def getKeyManager(self): return self._key def getProcedureManager(self): return self._procedure def getVariableManager(self): return self._variable def flush(self): for m in self._managers: m.flush() def getCurrentId(self): return threading.current_thread().ident def setDefaultGroupName(self, group_name): raiseIfNotString(group_name, "group_name", DefaultPyshellException, "setDefaultGroupName", self.__class__.__name__) self._group_name = group_name def getDefaultGroupName(self): return self._group_name def checkForSetGlobalParameter(self, group_name, loader_name): pass # do nothing def checkForUnsetGlobalParameter(self, group_name, loader_name): pass # do nothing
unknown
codeparrot/codeparrot-clean