repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
aviciimaxwell/odoo
openerp/service/wsgi_server.py
335
9490
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ WSGI stack, common code. """ import httplib import urllib import xmlrpclib import StringIO import errno import logging import platform import socket import sys import threading import traceback import werkzeug.serving import werkzeug.contrib.fixers import openerp import openerp.tools.config as config import websrv_lib _logger = logging.getLogger(__name__) # XML-RPC fault codes. Some care must be taken when changing these: the # constants are also defined client-side and must remain in sync. # User code must use the exceptions defined in ``openerp.exceptions`` (not # create directly ``xmlrpclib.Fault`` objects). RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error. RPC_FAULT_CODE_APPLICATION_ERROR = 1 RPC_FAULT_CODE_WARNING = 2 RPC_FAULT_CODE_ACCESS_DENIED = 3 RPC_FAULT_CODE_ACCESS_ERROR = 4 def xmlrpc_return(start_response, service, method, params, string_faultcode=False): """ Helper to call a service's method with some params, using a wsgi-supplied ``start_response`` callback. This is the place to look at to see the mapping between core exceptions and XML-RPC fault codes. """ # Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions # defined in ``openerp.exceptions`` are mapped to specific fault codes; # all the other exceptions are mapped to the generic # RPC_FAULT_CODE_APPLICATION_ERROR value. # This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for # exception handling. try: result = openerp.http.dispatch_rpc(service, method, params) response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None) except Exception, e: if string_faultcode: response = xmlrpc_handle_exception_string(e) else: response = xmlrpc_handle_exception_int(e) start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))]) return [response] def xmlrpc_handle_exception_int(e): if isinstance(e, openerp.osv.orm.except_orm): # legacy fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.Warning) or isinstance(e, openerp.exceptions.RedirectWarning): fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance (e, openerp.exceptions.AccessError): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.AccessDenied): fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.DeferredException): info = e.traceback # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) else: if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) else: info = sys.exc_info() # Which one is the best ? formatted_info = "".join(traceback.format_exception(*info)) #formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info) response = xmlrpclib.dumps(fault, allow_none=None, encoding=None) return response def xmlrpc_handle_exception_string(e): if isinstance(e, openerp.osv.orm.except_orm): fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.Warning): fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.AccessError): fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '') response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.AccessDenied): fault = xmlrpclib.Fault('AccessDenied', str(e)) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) elif isinstance(e, openerp.exceptions.DeferredException): info = e.traceback formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info) response = xmlrpclib.dumps(fault, allow_none=False, encoding=None) else: info = sys.exc_info() formatted_info = "".join(traceback.format_exception(*info)) fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info) response = xmlrpclib.dumps(fault, allow_none=None, encoding=None) return response def wsgi_xmlrpc(environ, start_response): """ Two routes are available for XML-RPC /xmlrpc/<service> route returns faultCode as strings. This is a historic violation of the protocol kept for compatibility. /xmlrpc/2/<service> is a new route that returns faultCode as int and is therefore fully compliant. """ if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'): length = int(environ['CONTENT_LENGTH']) data = environ['wsgi.input'].read(length) # Distinguish betweed the 2 faultCode modes string_faultcode = True if environ['PATH_INFO'].startswith('/xmlrpc/2/'): service = environ['PATH_INFO'][len('/xmlrpc/2/'):] string_faultcode = False else: service = environ['PATH_INFO'][len('/xmlrpc/'):] params, method = xmlrpclib.loads(data) return xmlrpc_return(start_response, service, method, params, string_faultcode) # WSGI handlers registered through the register_wsgi_handler() function below. module_handlers = [] # RPC endpoints registered through the register_rpc_endpoint() function below. rpc_handlers = {} def register_wsgi_handler(handler): """ Register a WSGI handler. Handlers are tried in the order they are added. We might provide a way to register a handler for specific routes later. """ module_handlers.append(handler) def register_rpc_endpoint(endpoint, handler): """ Register a handler for a given RPC enpoint. """ rpc_handlers[endpoint] = handler def application_unproxied(environ, start_response): """ WSGI entry point.""" # cleanup db/uid trackers - they're set at HTTP dispatch in # web.session.OpenERPSession.send() and at RPC dispatch in # openerp.service.web_services.objects_proxy.dispatch(). # /!\ The cleanup cannot be done at the end of this `application` # method because werkzeug still produces relevant logging afterwards if hasattr(threading.current_thread(), 'uid'): del threading.current_thread().uid if hasattr(threading.current_thread(), 'dbname'): del threading.current_thread().dbname with openerp.api.Environment.manage(): # Try all handlers until one returns some result (i.e. not None). wsgi_handlers = [wsgi_xmlrpc] wsgi_handlers += module_handlers for handler in wsgi_handlers: result = handler(environ, start_response) if result is None: continue return result # We never returned from the loop. response = 'No handler found.\n' start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))]) return [response] def application(environ, start_response): if config['proxy_mode'] and 'HTTP_X_FORWARDED_HOST' in environ: return werkzeug.contrib.fixers.ProxyFix(application_unproxied)(environ, start_response) else: return application_unproxied(environ, start_response) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
boehm-s/dotfiles
.emacs.d/elpy/rpc-venv/lib/python3.7/site-packages/pkg_resources/extern/__init__.py
245
2487
import sys class VendorImporter: """ A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') @property def search_path(self): """ Search first the vendor package then as a natural package. """ yield self.vendor_pkg + '.' yield '' def find_module(self, fullname, path=None): """ Return self when fullname starts with root_name and the target module is one vendored through this importer. """ root, base, target = fullname.partition(self.root_name + '.') if root: return if not any(map(target.startswith, self.vendored_names)): return return self def load_module(self, fullname): """ Iterate over the search path to locate and load fullname. """ root, base, target = fullname.partition(self.root_name + '.') for prefix in self.search_path: try: extant = prefix + target __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod # mysterious hack: # Remove the reference to the extant package/module # on later Python versions to cause relative imports # in the vendor package to resolve the same modules # as those going through this importer. if sys.version_info > (3, 3): del sys.modules[extant] return mod except ImportError: pass else: raise ImportError( "The '{target}' package is required; " "normally this is bundled with this package so if you get " "this warning, consult the packager of your " "distribution.".format(**locals()) ) def install(self): """ Install this importer into sys.meta_path if not already present. """ if self not in sys.meta_path: sys.meta_path.append(self) names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install()
gpl-3.0
kayak/fireant
fireant/queries/builder/dimension_latest_query_builder.py
1
2026
import pandas as pd from fireant.dataset.fields import Field from fireant.utils import ( alias_for_alias_selector, immutable, ) from fireant.queries.builder.query_builder import QueryBuilder, QueryException, add_hints from fireant.queries.execution import fetch_data class DimensionLatestQueryBuilder(QueryBuilder): def __init__(self, dataset): super().__init__(dataset) @immutable def __call__(self, dimension: Field, *dimensions: Field): self._dimensions += [dimension] + list(dimensions) @property def sql(self): """ Serializes this query builder as a set of SQL queries. This method will always return a list of one query since only one query is required to retrieve dimension choices. This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed for the query to fetch choices for dimensions. The dataset query extends this with metrics, references, and totals. """ if not self.dimensions: raise QueryException("Must select at least one dimension to query latest values") query = self.dataset.database.make_latest_query( base_table=self.table, joins=self.dataset.joins, dimensions=self.dimensions, ) return [query] def fetch(self, hint=None): queries = add_hints(self.sql, hint) max_rows_returned, data = fetch_data(self.dataset.database, queries, self.dimensions) data = self._get_latest_data_from_df(data) return self._transform_for_return(data, max_rows_returned=max_rows_returned) def _get_latest_data_from_df(self, df: pd.DataFrame) -> pd.Series: latest = df.reset_index().iloc[0] # Remove the row index as the name and trim the special dimension key characters from the dimension key latest.name = None latest.index = [alias_for_alias_selector(alias) for alias in latest.index] return latest
apache-2.0
nkhuyu/commons
tests/python/twitter/common/collections/test_ringbuffer.py
13
1710
# ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== import pytest from twitter.common.collections import RingBuffer def test_append(): r = RingBuffer(5) for i in xrange(0, 5): r.append(i) assert (r[0], r[1], r[2], r[3], r[4]) == (0, 1, 2, 3, 4) for i in xrange(5, 10): r.append(i) assert (r[0], r[1], r[2], r[3], r[4]) == (5, 6, 7, 8, 9) def test_circularity(): r = RingBuffer(3) r.append(1) r.append(2) r.append(3) assert 1 in r assert (r[0], r[3], r[6], r[-3]) == (1, 1, 1, 1) r.append(4) assert 1 not in r assert (r[0], r[3], r[6], r[-3]) == (2, 2, 2, 2) def test_bad_operations(): with pytest.raises(ValueError): RingBuffer(0) r = RingBuffer() with pytest.raises(IndexError): r[1] with pytest.raises(IndexError): r[-1] r.append(1) with pytest.raises(RingBuffer.InvalidOperation): del r[0]
apache-2.0
bdarnell/tornado
tornado/routing.py
1
25147
# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Flexible routing implementation. Tornado routes HTTP requests to appropriate handlers using `Router` class implementations. The `tornado.web.Application` class is a `Router` implementation and may be used directly, or the classes in this module may be used for additional flexibility. The `RuleRouter` class can match on more criteria than `.Application`, or the `Router` interface can be subclassed for maximum customization. `Router` interface extends `~.httputil.HTTPServerConnectionDelegate` to provide additional routing capabilities. This also means that any `Router` implementation can be used directly as a ``request_callback`` for `~.httpserver.HTTPServer` constructor. `Router` subclass must implement a ``find_handler`` method to provide a suitable `~.httputil.HTTPMessageDelegate` instance to handle the request: .. code-block:: python class CustomRouter(Router): def find_handler(self, request, **kwargs): # some routing logic providing a suitable HTTPMessageDelegate instance return MessageDelegate(request.connection) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK") self.connection.finish() router = CustomRouter() server = HTTPServer(router) The main responsibility of `Router` implementation is to provide a mapping from a request to `~.httputil.HTTPMessageDelegate` instance that will handle this request. In the example above we can see that routing is possible even without instantiating an `~.web.Application`. For routing to `~.web.RequestHandler` implementations we need an `~.web.Application` instance. `~.web.Application.get_handler_delegate` provides a convenient way to create `~.httputil.HTTPMessageDelegate` for a given request and `~.web.RequestHandler`. Here is a simple example of how we can we route to `~.web.RequestHandler` subclasses by HTTP method: .. code-block:: python resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) router = HTTPMethodRouter(Application()) server = HTTPServer(router) `ReversibleRouter` interface adds the ability to distinguish between the routes and reverse them to the original urls using route's name and additional arguments. `~.web.Application` is itself an implementation of `ReversibleRouter` class. `RuleRouter` and `ReversibleRuleRouter` are implementations of `Router` and `ReversibleRouter` interfaces and can be used for creating rule-based routing configurations. Rules are instances of `Rule` class. They contain a `Matcher`, which provides the logic for determining whether the rule is a match for a particular request and a target, which can be one of the following. 1) An instance of `~.httputil.HTTPServerConnectionDelegate`: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/handler"), ConnectionDelegate()), # ... more rules ]) class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return MessageDelegate(request_conn) 2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/callable"), request_callable) ]) def request_callable(request): request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") request.finish() 3) Another `Router` instance: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/router.*"), CustomRouter()) ]) Of course a nested `RuleRouter` or a `~.web.Application` is allowed: .. code-block:: python router = RuleRouter([ Rule(HostMatches("example.com"), RuleRouter([ Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)])), ])) ]) server = HTTPServer(router) In the example below `RuleRouter` is used to route between applications: .. code-block:: python app1 = Application([ (r"/app1/handler", Handler1), # other handlers ... ]) app2 = Application([ (r"/app2/handler", Handler2), # other handlers ... ]) router = RuleRouter([ Rule(PathMatches("/app1.*"), app1), Rule(PathMatches("/app2.*"), app2) ]) server = HTTPServer(router) For more information on application-level routing see docs for `~.web.Application`. .. versionadded:: 4.5 """ import re from functools import partial from tornado import httputil from tornado.httpserver import _CallableAdapter from tornado.escape import url_escape, url_unescape, utf8 from tornado.log import app_log from tornado.util import basestring_type, import_object, re_unescape, unicode_type from typing import Any, Union, Optional, Awaitable, List, Dict, Pattern, Tuple, overload class Router(httputil.HTTPServerConnectionDelegate): """Abstract router interface.""" def find_handler( self, request: httputil.HTTPServerRequest, **kwargs: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` that can serve the request. Routing implementations may pass additional kwargs to extend the routing logic. :arg httputil.HTTPServerRequest request: current HTTP request. :arg kwargs: additional keyword arguments passed by routing implementation. :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to process the request. """ raise NotImplementedError() def start_request( self, server_conn: object, request_conn: httputil.HTTPConnection ) -> httputil.HTTPMessageDelegate: return _RoutingDelegate(self, server_conn, request_conn) class ReversibleRouter(Router): """Abstract router interface for routers that can handle named routes and support reversing them to original urls. """ def reverse_url(self, name: str, *args: Any) -> Optional[str]: """Returns url string for a given route name and arguments or ``None`` if no match is found. :arg str name: route name. :arg args: url parameters. :returns: parametrized url string for a given route name (or ``None``). """ raise NotImplementedError() class _RoutingDelegate(httputil.HTTPMessageDelegate): def __init__( self, router: Router, server_conn: object, request_conn: httputil.HTTPConnection ) -> None: self.server_conn = server_conn self.request_conn = request_conn self.delegate = None # type: Optional[httputil.HTTPMessageDelegate] self.router = router # type: Router def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: assert isinstance(start_line, httputil.RequestStartLine) request = httputil.HTTPServerRequest( connection=self.request_conn, server_connection=self.server_conn, start_line=start_line, headers=headers, ) self.delegate = self.router.find_handler(request) if self.delegate is None: app_log.debug( "Delegate for %s %s request not found", start_line.method, start_line.path, ) self.delegate = _DefaultMessageDelegate(self.request_conn) return self.delegate.headers_received(start_line, headers) def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: assert self.delegate is not None return self.delegate.data_received(chunk) def finish(self) -> None: assert self.delegate is not None self.delegate.finish() def on_connection_close(self) -> None: assert self.delegate is not None self.delegate.on_connection_close() class _DefaultMessageDelegate(httputil.HTTPMessageDelegate): def __init__(self, connection: httputil.HTTPConnection) -> None: self.connection = connection def finish(self) -> None: self.connection.write_headers( httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders(), ) self.connection.finish() # _RuleList can either contain pre-constructed Rules or a sequence of # arguments to be passed to the Rule constructor. _RuleList = List[ Union[ "Rule", List[Any], # Can't do detailed typechecking of lists. Tuple[Union[str, "Matcher"], Any], Tuple[Union[str, "Matcher"], Any, Dict[str, Any]], Tuple[Union[str, "Matcher"], Any, Dict[str, Any], str], ] ] class RuleRouter(Router): """Rule-based router implementation.""" def __init__(self, rules: Optional[_RuleList] = None) -> None: """Constructs a router from an ordered list of rules:: RuleRouter([ Rule(PathMatches("/handler"), Target), # ... more rules ]) You can also omit explicit `Rule` constructor and use tuples of arguments:: RuleRouter([ (PathMatches("/handler"), Target), ]) `PathMatches` is a default matcher, so the example above can be simplified:: RuleRouter([ ("/handler", Target), ]) In the examples above, ``Target`` can be a nested `Router` instance, an instance of `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument. :arg rules: a list of `Rule` instances or tuples of `Rule` constructor arguments. """ self.rules = [] # type: List[Rule] if rules: self.add_rules(rules) def add_rules(self, rules: _RuleList) -> None: """Appends new rules to the router. :arg rules: a list of Rule instances (or tuples of arguments, which are passed to Rule constructor). """ for rule in rules: if isinstance(rule, (tuple, list)): assert len(rule) in (2, 3, 4) if isinstance(rule[0], basestring_type): rule = Rule(PathMatches(rule[0]), *rule[1:]) else: rule = Rule(*rule) self.rules.append(self.process_rule(rule)) def process_rule(self, rule: "Rule") -> "Rule": """Override this method for additional preprocessing of each rule. :arg Rule rule: a rule to be processed. :returns: the same or modified Rule instance. """ return rule def find_handler( self, request: httputil.HTTPServerRequest, **kwargs: Any ) -> Optional[httputil.HTTPMessageDelegate]: for rule in self.rules: target_params = rule.matcher.match(request) if target_params is not None: if rule.target_kwargs: target_params["target_kwargs"] = rule.target_kwargs delegate = self.get_target_delegate( rule.target, request, **target_params ) if delegate is not None: return delegate return None def get_target_delegate( self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Returns an instance of `~.httputil.HTTPMessageDelegate` for a Rule's target. This method is called by `~.find_handler` and can be extended to provide additional target types. :arg target: a Rule's target. :arg httputil.HTTPServerRequest request: current request. :arg target_params: additional parameters that can be useful for `~.httputil.HTTPMessageDelegate` creation. """ if isinstance(target, Router): return target.find_handler(request, **target_params) elif isinstance(target, httputil.HTTPServerConnectionDelegate): assert request.connection is not None return target.start_request(request.server_connection, request.connection) elif callable(target): assert request.connection is not None return _CallableAdapter( partial(target, **target_params), request.connection ) return None class ReversibleRuleRouter(ReversibleRouter, RuleRouter): """A rule-based router that implements ``reverse_url`` method. Each rule added to this router may have a ``name`` attribute that can be used to reconstruct an original uri. The actual reconstruction takes place in a rule's matcher (see `Matcher.reverse`). """ def __init__(self, rules: Optional[_RuleList] = None) -> None: self.named_rules = {} # type: Dict[str, Any] super(ReversibleRuleRouter, self).__init__(rules) def process_rule(self, rule: "Rule") -> "Rule": rule = super(ReversibleRuleRouter, self).process_rule(rule) if rule.name: if rule.name in self.named_rules: app_log.warning( "Multiple handlers named %s; replacing previous value", rule.name ) self.named_rules[rule.name] = rule return rule def reverse_url(self, name: str, *args: Any) -> Optional[str]: if name in self.named_rules: return self.named_rules[name].matcher.reverse(*args) for rule in self.rules: if isinstance(rule.target, ReversibleRouter): reversed_url = rule.target.reverse_url(name, *args) if reversed_url is not None: return reversed_url return None class Rule(object): """A routing rule.""" def __init__( self, matcher: "Matcher", target: Any, target_kwargs: Optional[Dict[str, Any]] = None, name: Optional[str] = None, ) -> None: """Constructs a Rule instance. :arg Matcher matcher: a `Matcher` instance used for determining whether the rule should be considered a match for a specific request. :arg target: a Rule's target (typically a ``RequestHandler`` or `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`, depending on routing implementation). :arg dict target_kwargs: a dict of parameters that can be useful at the moment of target instantiation (for example, ``status_code`` for a ``RequestHandler`` subclass). They end up in ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate` method. :arg str name: the name of the rule that can be used to find it in `ReversibleRouter.reverse_url` implementation. """ if isinstance(target, str): # import the Module and instantiate the class # Must be a fully qualified name (module.ClassName) target = import_object(target) self.matcher = matcher # type: Matcher self.target = target self.target_kwargs = target_kwargs if target_kwargs else {} self.name = name def reverse(self, *args: Any) -> Optional[str]: return self.matcher.reverse(*args) def __repr__(self) -> str: return "%s(%r, %s, kwargs=%r, name=%r)" % ( self.__class__.__name__, self.matcher, self.target, self.target_kwargs, self.name, ) class Matcher(object): """Represents a matcher for request features.""" def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: """Matches current instance against the request. :arg httputil.HTTPServerRequest request: current HTTP request :returns: a dict of parameters to be passed to the target handler (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` can be passed for proper `~.web.RequestHandler` instantiation). An empty dict is a valid (and common) return value to indicate a match when the argument-passing features are not used. ``None`` must be returned to indicate that there is no match.""" raise NotImplementedError() def reverse(self, *args: Any) -> Optional[str]: """Reconstructs full url from matcher instance and additional arguments.""" return None class AnyMatches(Matcher): """Matches any request.""" def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: return {} class HostMatches(Matcher): """Matches requests from hosts specified by ``host_pattern`` regex.""" def __init__(self, host_pattern: Union[str, Pattern]) -> None: if isinstance(host_pattern, basestring_type): if not host_pattern.endswith("$"): host_pattern += "$" self.host_pattern = re.compile(host_pattern) else: self.host_pattern = host_pattern def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: if self.host_pattern.match(request.host_name): return {} return None class DefaultHostMatches(Matcher): """Matches requests from host that is equal to application's default_host. Always returns no match if ``X-Real-Ip`` header is present. """ def __init__(self, application: Any, host_pattern: Pattern) -> None: self.application = application self.host_pattern = host_pattern def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: # Look for default host if not behind load balancer (for debugging) if "X-Real-Ip" not in request.headers: if self.host_pattern.match(self.application.default_host): return {} return None class PathMatches(Matcher): """Matches requests with paths specified by ``path_pattern`` regex.""" def __init__(self, path_pattern: Union[str, Pattern]) -> None: if isinstance(path_pattern, basestring_type): if not path_pattern.endswith("$"): path_pattern += "$" self.regex = re.compile(path_pattern) else: self.regex = path_pattern assert len(self.regex.groupindex) in (0, self.regex.groups), ( "groups in url regexes must either be all named or all " "positional: %r" % self.regex.pattern ) self._path, self._group_count = self._find_groups() def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: match = self.regex.match(request.path) if match is None: return None if not self.regex.groups: return {} path_args = [] # type: List[bytes] path_kwargs = {} # type: Dict[str, bytes] # Pass matched groups to the handler. Since # match.groups() includes both named and # unnamed groups, we want to use either groups # or groupdict but not both. if self.regex.groupindex: path_kwargs = dict( (str(k), _unquote_or_none(v)) for (k, v) in match.groupdict().items() ) else: path_args = [_unquote_or_none(s) for s in match.groups()] return dict(path_args=path_args, path_kwargs=path_kwargs) def reverse(self, *args: Any) -> Optional[str]: if self._path is None: raise ValueError("Cannot reverse url regex " + self.regex.pattern) assert len(args) == self._group_count, ( "required number of arguments " "not found" ) if not len(args): return self._path converted_args = [] for a in args: if not isinstance(a, (unicode_type, bytes)): a = str(a) converted_args.append(url_escape(utf8(a), plus=False)) return self._path % tuple(converted_args) def _find_groups(self) -> Tuple[Optional[str], Optional[int]]: """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith("^"): pattern = pattern[1:] if pattern.endswith("$"): pattern = pattern[:-1] if self.regex.groups != pattern.count("("): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return None, None pieces = [] for fragment in pattern.split("("): if ")" in fragment: paren_loc = fragment.index(")") if paren_loc >= 0: try: unescaped_fragment = re_unescape(fragment[paren_loc + 1 :]) except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append("%s" + unescaped_fragment) else: try: unescaped_fragment = re_unescape(fragment) except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append(unescaped_fragment) return "".join(pieces), self.regex.groups class URLSpec(Rule): """Specifies mappings between URLs and handlers. .. versionchanged: 4.5 `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for backwards compatibility. """ def __init__( self, pattern: Union[str, Pattern], handler: Any, kwargs: Optional[Dict[str, Any]] = None, name: Optional[str] = None, ) -> None: """Parameters: * ``pattern``: Regular expression to be matched. Any capturing groups in the regex will be passed in to the handler's get/post/etc methods as arguments (by keyword if named, by position if unnamed. Named and unnamed capturing groups may not be mixed in the same rule). * ``handler``: `~.web.RequestHandler` subclass to be invoked. * ``kwargs`` (optional): A dictionary of additional arguments to be passed to the handler's constructor. * ``name`` (optional): A name for this handler. Used by `~.web.Application.reverse_url`. """ matcher = PathMatches(pattern) super(URLSpec, self).__init__(matcher, handler, kwargs, name) self.regex = matcher.regex self.handler_class = self.target self.kwargs = kwargs def __repr__(self) -> str: return "%s(%r, %s, kwargs=%r, name=%r)" % ( self.__class__.__name__, self.regex.pattern, self.handler_class, self.kwargs, self.name, ) @overload def _unquote_or_none(s: str) -> bytes: pass @overload # noqa: F811 def _unquote_or_none(s: None) -> None: pass def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811 """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return url_unescape(s, encoding=None, plus=False)
apache-2.0
BigAN/OztrendDelpoy
cartridge/shop/management/commands/product_db.py
7
8543
from __future__ import print_function import csv import os import shutil import sys import datetime from optparse import make_option from django.core.management.base import BaseCommand from django.core.management.base import CommandError from django.utils.translation import ugettext as _ from django.db.utils import IntegrityError from mezzanine.conf import settings from cartridge.shop.models import Product from cartridge.shop.models import ProductOption from cartridge.shop.models import ProductImage from cartridge.shop.models import ProductVariation from cartridge.shop.models import Category from mezzanine.core.models import CONTENT_STATUS_PUBLISHED # images get copied from thie directory LOCAL_IMAGE_DIR = "/tmp/orig" # images get copied to this directory under STATIC_ROOT IMAGE_SUFFIXES = [".jpg", ".JPG", ".jpeg", ".JPEG", ".tif", ".gif", ".GIF"] EMPTY_IMAGE_ENTRIES = ["Please add", "N/A", ""] DATE_FORMAT = "%Y-%m-%d" TIME_FORMAT = "%H:%M" # Here we define what column headings are used in the csv. TITLE = _("Title") CONTENT = _("Content") DESCRIPTION = _("Description") SKU = _("SKU") IMAGE = _("Image") CATEGORY = _("Category") SUB_CATEGORY = _("Sub-Category") #SIZE = _("Size") NUM_IN_STOCK = _("Number in Stock") UNIT_PRICE = _("Unit Price") SALE_PRICE = _("Sale Price") SALE_START_DATE = _("Sale Start Date") SALE_START_TIME = _("Sale Start Time") SALE_END_DATE = _("Sale End Date") SALE_END_TIME = _("Sale End Time") DATETIME_FORMAT = "%s %s" % (DATE_FORMAT, TIME_FORMAT) SITE_MEDIA_IMAGE_DIR = _("product") PRODUCT_IMAGE_DIR = os.path.join(settings.STATIC_ROOT, SITE_MEDIA_IMAGE_DIR) # python < 2.7 doesn't have dictionary comprehensions ;( #TYPE_CHOICES = {choice:id for id, choice in settings.SHOP_OPTION_TYPE_CHOICES} TYPE_CHOICES = dict() for id, choice in settings.SHOP_OPTION_TYPE_CHOICES: TYPE_CHOICES[choice] = id fieldnames = [TITLE, CONTENT, DESCRIPTION, CATEGORY, SUB_CATEGORY, SKU, IMAGE, NUM_IN_STOCK, UNIT_PRICE, SALE_PRICE, SALE_START_DATE, SALE_START_TIME, SALE_END_DATE, SALE_END_TIME] # TODO: Make sure no options conflict with other fieldnames. fieldnames += TYPE_CHOICES.keys() class Command(BaseCommand): args = '--import/--export <csv_file>' help = _('Import/Export products from a csv file.') option_list = BaseCommand.option_list + ( make_option('--import', action='store_true', dest='import', default=False, help=_('Import products from csv file.')), make_option('--export', action='store_true', dest='export', default=False, help=_('Export products from csv file.')), ) def handle(self, *args, **options): if sys.version_info[0] == 3: raise CommandError("Python 3 not supported") try: csv_file = args[0] except IndexError: raise CommandError(_("Please provide csv file to import")) if options["import"] and options["export"]: raise CommandError("can't both import and export") if not options["import"] and not options["export"]: raise CommandError(_("need to import or export")) if options['import']: import_products(csv_file) elif options['export']: export_products(csv_file) def _product_from_row(row): product, created = Product.objects.get_or_create(title=row[TITLE]) product.content = row[CONTENT] product.description = row[DESCRIPTION] # TODO: set the 2 below from spreadsheet. product.status = CONTENT_STATUS_PUBLISHED product.available = True # TODO: allow arbitrary level/number of categories. base_cat, created = Category.objects.get_or_create(title=row[CATEGORY]) sub_cat, created = Category.objects.get_or_create( title=row[SUB_CATEGORY], parent=base_cat) product.categories.add(sub_cat) shop_cat, created = Category.objects.get_or_create(title="Shop") product.categories.add(shop_cat) return product def _make_image(image_str, product): if image_str in EMPTY_IMAGE_ENTRIES: return None # try adding various image suffixes, if none given in original filename. root, suffix = os.path.splitext(image_str) if suffix not in IMAGE_SUFFIXES: raise CommandError("INCORRECT SUFFIX: %s" % image_str) image_path = os.path.join(LOCAL_IMAGE_DIR, image_str) if not os.path.exists(image_path): raise CommandError("NO FILE %s" % image_path) shutil.copy(image_path, PRODUCT_IMAGE_DIR) #shutil.copy(image_path, os.path.join(PRODUCT_IMAGE_DIR, "orig")) image, created = ProductImage.objects.get_or_create( file="%s" % (os.path.join(SITE_MEDIA_IMAGE_DIR, image_str)), description=image_str, # TODO: handle column for this. product=product) return image def _make_date(date_str, time_str): date_string = '%s %s' % (date_str, time_str) date = datetime.datetime.strptime(date_string, DATETIME_FORMAT) return date def import_products(csv_file): print(_("Importing ..")) # More appropriate for testing. #Product.objects.all().delete() reader = csv.DictReader(open(csv_file), delimiter=',') for row in reader: print(row) product = _product_from_row(row) try: variation = ProductVariation.objects.create( # strip whitespace sku=row[SKU].replace(" ", ""), product=product, ) except IntegrityError: raise CommandError("Product with SKU exists! sku: %s" % row[SKU]) if row[NUM_IN_STOCK]: variation.num_in_stock = row[NUM_IN_STOCK] if row[UNIT_PRICE]: variation.unit_price = row[UNIT_PRICE] if row[SALE_PRICE]: variation.sale_price = row[SALE_PRICE] if row[SALE_START_DATE] and row[SALE_START_TIME]: variation.sale_from = _make_date(row[SALE_START_DATE], row[SALE_START_TIME]) if row[SALE_END_DATE] and row[SALE_END_TIME]: variation.sale_to = _make_date(row[SALE_END_DATE], row[SALE_END_TIME]) for option in TYPE_CHOICES: if row[option]: name = "option%s" % TYPE_CHOICES[option] setattr(variation, name, row[option]) new_option, created = ProductOption.objects.get_or_create( type=TYPE_CHOICES[option], # TODO: set dynamically name=row[option]) variation.save() image = _make_image(row[IMAGE], product) if image: variation.image = image product.variations.manage_empty() product.variations.set_default_images([]) product.copy_default_variation() product.save() print("Variations: %s" % ProductVariation.objects.all().count()) print("Products: %s" % Product.objects.all().count()) def export_products(csv_file): print(_("Exporting ..")) filehandle = open(csv_file, 'w') writer = csv.DictWriter(filehandle, delimiter=',', fieldnames=fieldnames) headers = dict() for field in fieldnames: headers[field] = field writer.writerow(headers) for pv in ProductVariation.objects.all(): row = dict() row[TITLE] = pv.product.title row[CONTENT] = pv.product.content row[DESCRIPTION] = pv.product.description row[SKU] = pv.sku row[IMAGE] = pv.image # TODO: handle multiple categories, and multiple levels of categories cat = pv.product.categories.all()[0] if cat.parent: row[SUB_CATEGORY] = cat.title row[CATEGORY] = cat.parent.title else: row[CATEGORY] = cat.title row[SUB_CATEGORY] = "" for option in TYPE_CHOICES: row[option] = getattr(pv, "option%s" % TYPE_CHOICES[option]) row[NUM_IN_STOCK] = pv.num_in_stock row[UNIT_PRICE] = pv.unit_price row[SALE_PRICE] = pv.sale_price try: row[SALE_START_DATE] = pv.sale_from.strftime(DATE_FORMAT) row[SALE_START_TIME] = pv.sale_from.strftime(TIME_FORMAT) except AttributeError: pass try: row[SALE_END_DATE] = pv.sale_to.strftime(DATE_FORMAT) row[SALE_END_TIME] = pv.sale_to.strftime(TIME_FORMAT) except AttributeError: pass writer.writerow(row) filehandle.close()
bsd-2-clause
keimlink/django-cms
cms/extensions/extension_pool.py
26
5091
from cms.exceptions import SubClassNeededError from .models import PageExtension, TitleExtension class ExtensionPool(object): def __init__(self): self.page_extensions = set() self.title_extensions = set() self.signaling_activated = False def register(self, extension): """ Registers the given extension. """ if issubclass(extension, PageExtension): self.page_extensions.add(extension) elif issubclass(extension, TitleExtension): self.title_extensions.add(extension) else: raise SubClassNeededError( 'Extension has to subclass either %r or %r. %r does not!' % (PageExtension, TitleExtension, extension) ) self._activate_signaling() def unregister(self, extension): """ Unregisters the given extension. No error is thrown if given extension isn't an extension or wasn't registered yet. """ try: if issubclass(extension, PageExtension): self.page_extensions.remove(extension) elif issubclass(extension, TitleExtension): self.title_extensions.remove(extension) except KeyError: pass def _activate_signaling(self): """ Activates the post_publish signal receiver if not already done. """ if not self.signaling_activated: from cms.signals import post_publish post_publish.connect(self._receiver) self.signaling_activated = True def _receiver(self, sender, **kwargs): """ Receiver for the post_publish signal. Gets the published page from kwargs. """ # instance from kwargs is the draft page draft_page = kwargs.get('instance') language = kwargs.get('language') # get the new public page from the draft page public_page = draft_page.publisher_public if self.page_extensions: self._copy_page_extensions(draft_page, public_page, language, clone=False) self._remove_orphaned_page_extensions() if self.title_extensions: self._copy_title_extensions(draft_page, None, language, clone=False) self._remove_orphaned_title_extensions() def _copy_page_extensions(self, source_page, target_page, language, clone=False): for extension in self.page_extensions: for instance in extension.objects.filter(extended_object=source_page): if clone: instance.copy(target_page, language) else: instance.copy_to_public(target_page, language) def _copy_title_extensions(self, source_page, target_page, language, clone=False): source_title = source_page.title_set.get(language=language) if target_page: target_title = target_page.title_set.get(language=language) else: target_title = source_title.publisher_public for extension in self.title_extensions: for instance in extension.objects.filter(extended_object=source_title): if clone: instance.copy(target_title, language) else: instance.copy_to_public(target_title, language) def copy_extensions(self, source_page, target_page, languages=None): if not languages: languages = target_page.get_languages() if self.page_extensions: self._copy_page_extensions(source_page, target_page, None, clone=True) self._remove_orphaned_page_extensions() for language in languages: if self.title_extensions: self._copy_title_extensions(source_page, target_page, language, clone=True) self._remove_orphaned_title_extensions() def _remove_orphaned_page_extensions(self): for extension in self.page_extensions: extension.objects.filter( extended_object__publisher_is_draft=False, draft_extension=None ).delete() def _remove_orphaned_title_extensions(self): for extension in self.title_extensions: extension.objects.filter( extended_object__page__publisher_is_draft=False, draft_extension=None ).delete() def get_page_extensions(self, page=None): extensions = [] for extension in self.page_extensions: if page: extensions.extend(list(extension.objects.filter(extended_object=page))) else: extensions.extend(list(extension.objects.all())) return extensions def get_title_extensions(self, title=None): extensions = [] for extension in self.title_extensions: if title: extensions.extend(list(extension.objects.filter(extended_object=title))) else: extensions.extend(list(extension.objects.all())) return extensions extension_pool = ExtensionPool()
bsd-3-clause
trnewman/VT-USRP-daughterboard-drivers
gnuradio-examples/python/digital/benchmark_rx.py
10
3329
#!/usr/bin/env python # # Copyright 2005,2006,2007,2009 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gru, modulation_utils from gnuradio import usrp from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import random import struct import sys # from current dir import usrp_receive_path #import os #print os.getpid() #raw_input('Attach and press enter: ') class my_top_block(gr.top_block): def __init__(self, demodulator, rx_callback, options): gr.top_block.__init__(self) # Set up receive path self.rxpath = usrp_receive_path.usrp_receive_path(demodulator, rx_callback, options) self.connect(self.rxpath) # ///////////////////////////////////////////////////////////////////////////// # main # ///////////////////////////////////////////////////////////////////////////// global n_rcvd, n_right def main(): global n_rcvd, n_right n_rcvd = 0 n_right = 0 def rx_callback(ok, payload): global n_rcvd, n_right (pktno,) = struct.unpack('!H', payload[0:2]) n_rcvd += 1 if ok: n_right += 1 print "ok = %5s pktno = %4d n_rcvd = %4d n_right = %4d" % ( ok, pktno, n_rcvd, n_right) demods = modulation_utils.type_1_demods() # Create Options Parser: parser = OptionParser (option_class=eng_option, conflict_handler="resolve") expert_grp = parser.add_option_group("Expert") parser.add_option("-m", "--modulation", type="choice", choices=demods.keys(), default='gmsk', help="Select modulation from: %s [default=%%default]" % (', '.join(demods.keys()),)) usrp_receive_path.add_options(parser, expert_grp) for mod in demods.values(): mod.add_options(expert_grp) (options, args) = parser.parse_args () if len(args) != 0: parser.print_help(sys.stderr) sys.exit(1) if options.rx_freq is None: sys.stderr.write("You must specify -f FREQ or --freq FREQ\n") parser.print_help(sys.stderr) sys.exit(1) # build the graph tb = my_top_block(demods[options.modulation], rx_callback, options) r = gr.enable_realtime_scheduling() if r != gr.RT_OK: print "Warning: Failed to enable realtime scheduling." tb.start() # start flow graph tb.wait() # wait for it to finish if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
gpl-3.0
jmt4/Selenium2
py/selenium/webdriver/common/alert.py
44
1289
#Copyright 2007-2009 WebDriver committers #Copyright 2007-2009 Google Inc. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from selenium.webdriver.remote.command import Command class Alert(object): def __init__(self, driver): self.driver = driver @property def text(self): """ Gets the text of the Alert """ return self.driver.execute(Command.GET_ALERT_TEXT)["value"] def dismiss(self): """ Dismisses the alert available """ self.driver.execute(Command.DISMISS_ALERT) def accept(self): """ Accepts the alert available """ self.driver.execute(Command.ACCEPT_ALERT) def send_keys(self, keysToSend): """ Send Keys to the Alert """ self.driver.execute(Command.SET_ALERT_VALUE, {'text': keysToSend})
apache-2.0
quinot/ansible
test/units/playbook/test_taggable.py
119
4453
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.playbook.taggable import Taggable from units.mock.loader import DictDataLoader class TaggableTestObj(Taggable): def __init__(self): self._loader = DictDataLoader({}) self.tags = [] class TestTaggable(unittest.TestCase): def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags): taggable_obj = TaggableTestObj() taggable_obj.tags = tags evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {}) self.assertEqual(test_value, evaluate) def test_evaluate_tags_tag_in_only_tags(self): self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], []) def test_evaluate_tags_tag_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1']) def test_evaluate_tags_special_always_in_object_tags(self): self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], []) def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag']) def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always']) def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self): self.assert_evaluate_equal(True, ['tag'], ['tagged'], []) def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self): self.assert_evaluate_equal(False, [], ['tagged'], []) def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self): self.assert_evaluate_equal(False, ['tag'], [], ['tagged']) def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self): self.assert_evaluate_equal(True, [], [], ['tagged']) def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self): self.assert_evaluate_equal(False, ['tag'], ['untagged'], []) def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self): self.assert_evaluate_equal(True, [], ['untagged'], []) def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self): self.assert_evaluate_equal(True, ['tag'], [], ['untagged']) def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self): self.assert_evaluate_equal(False, [], [], ['untagged']) def test_evaluate_tags_special_all_in_only_tags(self): self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged']) def test_evaluate_tags_special_all_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all']) def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag'], ['all'], ['all']) def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self): self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all']) def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always']) def test_evaluate_tags_accepts_lists(self): self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], []) def test_evaluate_tags_accepts_strings(self): self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], []) def test_evaluate_tags_with_repeated_tags(self): self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
gpl-3.0
aserranoh/picc
picc/coff.py
1
21846
'''Inspect Microchip's PIC objects in COFF format. Copyright 2016 Antonio Serrano Hernandez This file is part of picc. picc is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. picc is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with picc; see the file COPYING. If not, see <http://www.gnu.org/licenses/>. ''' import datetime import struct from . import error __author__ = 'Antonio Serrano Hernandez' __copyright__ = 'Copyright (C) 2016 Antonio Serrano Hernandez' __version__ = '0.2.2' __license__ = 'GPL' __maintainer__ = 'Antonio Serrano Hernandez' __email__ = 'toni.serranoh@gmail.com' __status__ = 'Development' _BASE_TYPES = {0: 'T_NULL'} _C_EXT = 2 _C_FILE = 103 _C_SECTION = 109 _DERIVED_TYPES = {0: 'DT_NON'} _HDR_SIZE = 20 _LINENO_SIZE = 16 _MAGIC = 0x1240 _PROCESSORS = { 0x2550: '18f2550', 0xd616: '18f26j13', } _RELOC_SIZE = 12 _RELOC_TYPES = { 1: 'RELOCT_CALL', 2: 'RELOCT_GOTO', 3: 'RELOCT_HIGH', 4: 'RELOCT_LOW', 5: 'RELOCT_P', 6: 'RELOCT_BANKSEL', 7: 'RELOCT_PAGESEL', 8: 'RELOCT_ALL', 9: 'RELOCT_IBANKSEL', 10: 'RELOCT_F', 11: 'RELOCT_TRIS', 12: 'RELOCT_MOVLR', 13: 'RELOCT_MOVLB', 14: 'RELOCT_GOTO2/CALL2', 15: 'RELOCT_FF1', 16: 'RELOCT_FF2', 17: 'RELOCT_LFSR1', 18: 'RELOCT_LFSR2', 19: 'RELOCT_BRA/RCALL', 20: 'RELOCT_CONDBRA', 21: 'RELOCT_UPPER', 22: 'RELOCT_ACCESS', 23: 'RELOCT_PAGESEL_WREG', 24: 'RELOCT_PAGESEL_BITS', 25: 'RELOCT_SCNSZ_LOW', 26: 'RELOCT_SCNSZ_HIGH', 27: 'RELOCT_SCNSZ_UPPER', 28: 'RELOCT_SCNEND_LOW', 29: 'RELOCT_SCNEND_HIGH', 30: 'RELOCT_SCNEND_UPPER', 31: 'RELOCT_SCNEND_LFSR1', 32: 'RELOCT_SCNEND_LFSR2', } _SHDR_SIZE = 40 _STORAGE_CLASSES = { 2: 'C_EXT', 3: 'C_STAT', 6: 'C_LABEL', 103: 'C_FILE', 107: 'C_EOF', 108: 'C_LIST', 109: 'C_SECTION', } _STYP_ABS = 0x01000 _STYP_ACCESS = 0x08000 _STYP_BSS = 0x00080 _STYP_DATA_ROM = 0x00100 _STYP_TEXT = 0x00020 _STYP_FLAGS = [_STYP_TEXT, _STYP_BSS, _STYP_DATA_ROM, _STYP_ACCESS, _STYP_ABS] _STYP_FLAGS_STR = { _STYP_TEXT: ' Executable code.', _STYP_BSS: ' Uninitialized data.', _STYP_DATA_ROM: ' Initialized data for ROM.', _STYP_ACCESS: ' Available using access bit.', _STYP_ABS: ' Absolute.' } _SYMENT_SIZE = 20 class Section(object): '''Represents a section in the COFF object file.''' def __init__(self, name, paddress, vaddress, flags): self.name = name self.paddress = paddress self.vaddress = vaddress self.flags = flags self.data = [] self.relocations = [] self.linenumbers = [] def isabsolute(self): return self.flags & _STYP_ABS def iscode(self): return self.flags & _STYP_TEXT def isaccess(self): return self.flags & _STYP_ACCESS def isudata(self): return self.flags & _STYP_BSS def isprogramdata(self): return self.flags & _STYP_DATA_ROM @property def size(self): return self._size if hasattr(self, '_size') else len(self.data) @size.setter def size(self, value): self._size = value def __str__(self): # Build the header text = ['Section Header', '\nName ', self.name, '\nPhysical address ', hex(self.paddress), '\nVirtual address ', hex(self.vaddress), '\nSize of Section ', str(self.size), '\nNumber of Relocations ', str(len(self.relocations)), '\nNumber of Line Numbers ', str(len(self.linenumbers)), '\nFlags ', hex(self.flags), '\n'] for f in _STYP_FLAGS: if f & self.flags: text.append(_STYP_FLAGS_STR[f]) text.append('\n') # Add the data if type text or program data index = 0 if self.iscode(): text.append('\nData\n') while index < len(self.data): text.append('{:06x}: {:02x}{:02x}\n'.format( index + self.paddress, self.data[index + 1], self.data[index])) index += 2 elif self.isprogramdata(): text.append('\nData\n') for b in self.data: text.append('{:06x}: {:04x}\n'.format( index + self.paddress, b)) index += 1 # Add relocation and line number tables if len(self.relocations) > 0: text.append('\nRelocations Table\n') text.append( 'Address Offset Type Symbol\n') for r in self.relocations: text.append(str(r)) text.append('\n') if len(self.linenumbers) > 0: text.append('\nLine Number Table\n') text.append('Line Address Symbol\n') for l in self.linenumbers: text.append(str(l)) text.append('\n') return ''.join(text) class Relocation(object): '''Represents a relocation entry in the COFF file.''' def __init__(self, address, symbol, offset, reltype): self.address = address self.symbol = symbol self.offset = offset self.reltype = reltype def __str__(self): return '{:<#10x} {:<10} {:<#4x} {:20} {}'.format(self.address, self.offset, self.reltype, _RELOC_TYPES[self.reltype], self.symbol.name) class Symbol(object): '''Represents a program's symbol.''' def __init__(self, name, value, section, base_type, derived_type, storage_class): self.name = name self.value = value self.section = section self.base_type = base_type self.derived_type = derived_type self.storage_class = storage_class self.auxsymbols = [] def addauxsymbol(self, auxsymbol): self.auxsymbols.append(auxsymbol) def isexternal(self): return self.storage_class == _C_EXT def isdefined(self): return isinstance(self.section, Section) def __str__(self): if type(self.section) == int: if self.section < 0: section = 'DEBUG' elif self.section == 0: section = 'UNDEFINED' else: section = self.section.name return '{:24} {!s:16} {:<#10x} {:8} {:12} {:9} {}'.format( self.name, section, self.value, _BASE_TYPES[self.base_type], _DERIVED_TYPES[self.derived_type], _STORAGE_CLASSES[self.storage_class], len(self.auxsymbols)) class FileAuxSymbol(object): '''Represents additional information for a symbol of type C_FILE.''' def __init__(self, filename, incline, flags): self.filename = filename self.incline = incline self.flags = flags def isexternal(self): return False def __str__(self): return (' file = {}\n' ' line included = {}\n' ' flags = {}').format(self.filename, self.incline, self.flags) class SectionAuxSymbol(object): '''Represents additional information for a symbol of type C_SECTION.''' def __init__(self, sectionlen, numreloc, numlinenumbers): self.sectionlen = sectionlen self.numreloc = numreloc self.numlinenumbers = numlinenumbers def isexternal(self): return False def __str__(self): return(' length = {}\n' ' number of relocations = {}\n' ' number of line numbers = {}').format(self.sectionlen, self.numreloc, self.numlinenumbers) class LineNumber(object): '''Represents a Line Number entry in a COFF file.''' def __init__(self, srcsymbol, linenumber, paddr, flags, fcnsymbol): self.srcsymbol = srcsymbol self.linenumber = linenumber self.paddr = paddr self.flags = flags self.fcnsymbol = fcnsymbol def __str__(self): return '{:<8d} {:<#8x} {}'.format(self.linenumber, self.paddr, self.srcsymbol.auxsymbols[0].filename) class Coff(object): '''Represents a COFF object in the Microchip format.''' def __init__(self, filename, timestamp, flags, magic=None, version=None, processor=None, rom_width=None, ram_width=None): '''Initialize this object with some values from the headers.''' self.filename = filename self.timestamp = timestamp self.flags = flags self.magic = magic self.version = version self.processor = processor self.rom_width = rom_width self.ram_width = ram_width # Initialize other fields self.strtable = None self.symbols = [] self.sections = [None] def getstrfromoffset(self, offset): '''Returns a string from the string table pointed by offset.''' # Substract 4 from the offset. This is because the offset includes the # 4 bytes of the strtable len, at the beginning of the string table. # However, this field has been stripped in the Coff object offset -= 4 end = offset if offset >= len(self.strtable): raise Exception('string table offset passed the end') while self.strtable[end] != '\0': end += 1 return self.strtable[offset:end] def getstring(self, stroffset): '''Returns a string according to the parameter stroffset. If the first 4 characters of stroffset are the character 0, that parameter is interpreted as a long value pointing to the string table. Otherwise, stroffset is the proper string. ''' zeroes, offset = struct.unpack('=LL', stroffset) if not zeroes: s = self.getstrfromoffset(offset) else: s = stroffset.decode('ascii').rstrip('\0') return s def addsymbol(self, symbol): '''Adds a new symbol to the symbols table.''' self.symbols.append(symbol) def addsection(self, section): self.sections.append(section) def __str__(self): # Add the header text = ['COFF File and Optional Headers', '\nCOFF version ', hex(_MAGIC), '\nProcessor Type ', self.processor, '\nTime Stamp ', self.timestamp.strftime('%c'), # The null section must be substracted from the total number '\nNumber of Sections ', str(len(self.sections) - 1), '\nNumber of Symbols ', str(len(self.symbols)), '\nCharacteristics ', str(self.flags), '\n', '\n'] # Add the sections for s in self.sections[1:]: text.append(str(s)) text.append('\n') # Add the symbols table text.append('Symbol Table') text.append(('\nIdx Name Section ' 'Value Type DT Class NumAux\n')) index = 1 for s in self.symbols: if isinstance(s, Symbol): text.append('{:04} {}\n'.format(index, str(s))) else: text.append(str(s)) text.append('\n') index += 1 return ''.join(text) def _readstrtable(obj, stream, offset): '''Read the string table, located at the end of the COFF file. obj: the Coff object. stream: the opened file that points to the COFF file. offset: the offset inside that file where the string table starts. ''' # Save the current file pointer cur = stream.tell() # Go to the string table and read it stream.seek(offset) # Read the size of the table try: size, = struct.unpack('=l', stream.read(4)) size -= 4 obj.strtable = stream.read(size).decode('ascii') except struct.error: error.fatalf(obj.filename, 'truncated string table size') except UnicodeDecodeError: error.fatalf(obj.filename, 'non ASCII characters in string table') if len(obj.strtable) != size: error.fatalf(obj.filename, 'truncated string table') if obj.strtable[-1] != '\0': error.fatalf(obj.filename, 'last character of string table is not NULL') # Restore the original file pointer stream.seek(cur) def _readsymtable(obj, stream, offset, num): '''Read the symbols table. obj: the Coff objet being built. stream: the opened file that points to the COFF file. offset: the offset inside that file where the symbols table starts. num: the number of symbols in the symbols table. ''' # Save the current file pointer cur = stream.tell() # Go to the symbols table and read each entry stream.seek(offset) entry = 0 try: while entry < num: (_n, n_value, n_scnum, n_btype, n_dtype, n_sclass, n_numaux ) = struct.unpack('=8sLhHHbb', stream.read(_SYMENT_SIZE)) # Get the proper name of the symbol (maybe the string table must be # checked) name = obj.getstring(_n) # Create the symbol entry s = Symbol(name, n_value, n_scnum, n_btype, n_dtype, n_sclass) obj.addsymbol(s) # Read the aux symbols entry += 1 for i in range(n_numaux): if n_sclass == _C_FILE: x_offset, x_incline, x_flags = struct.unpack('=LLB11x', stream.read(_SYMENT_SIZE)) filename = obj.getstrfromoffset(x_offset) auxs = FileAuxSymbol(filename, x_incline, x_flags) elif n_sclass == _C_SECTION: x_scnlen, x_nreloc, x_nlinno = struct.unpack('=LHH12x', stream.read(_SYMENT_SIZE)) auxs = SectionAuxSymbol(x_scnlen, x_nreloc, x_nlinno) s.addauxsymbol(auxs) obj.addsymbol(auxs) entry += 1 # Restore the original file pointer stream.seek(cur) except struct.error: error.fatalf(obj.filename, 'truncated symbol at position {}'.format(entry)) except UnicodeDecodeError: error.fatalf(obj.filename, 'in symbol at position {}: non ASCII ' 'characters in symbol name'.format(entry)) except Exception as e: error.fatalf(obj.filename, 'in symbol at position {}: {}'.format( entry, e)) def _readreloc(obj, section, stream, ptr, num): '''Read the relocation table for a section. obj: the parent Coff object. section: current section being read. stream: the file stream from where the data is read. ptr: the absolute offset in the coff file where the table begins. num: number of realocation entries. ''' stream.seek(ptr) try: reloc_num = 0 for i in range(num): (r_vaddr, r_symndx, r_offset, r_type ) = struct.unpack('=LLhH', stream.read(_RELOC_SIZE)) symbol = obj.symbols[r_symndx] section.relocations.append( Relocation(r_vaddr, symbol, r_offset, r_type)) reloc_num += 1 except struct.error: error.fatalf(obj.filename, "in section {b}'{name}'{re}: truncated " "relocation info at position {pos}".format( b=error.BOLD, re=error.RESET, name=section.name, pos=reloc_num)) except IndexError: error.fatalf(obj.filename, "in section {b}'{name}'{re}: relocation info at position {pos} " "points to nonexistent symbol with index {idx}".format( b=error.BOLD, re=error.RESET, name=section.name, pos=reloc_num, idx=r_symndx)) def _readlinenumbers(obj, section, stream, ptr, num): '''Read the line numbers table for a section. obj: the parent Coff object. section: current section being read. stream: the file stream from where the data is read. ptr: the absolute offset in the coff file where the table begins. num: number of line numbers entries. ''' stream.seek(ptr) try: linenum_count = 0 for i in range(num): (l_srcndx, l_lnno, l_paddr, l_flags, l_fcnndx ) = struct.unpack('=LHLHL', stream.read(_LINENO_SIZE)) # This is just for error handling purposes symindex = l_srcndx src_symbol = obj.symbols[l_srcndx] # This is just for error handling purposes symindex = l_fcnndx fcn_symbol = obj.symbols[l_fcnndx] section.linenumbers.append( LineNumber(src_symbol, l_lnno, l_paddr, l_flags, fcn_symbol)) linenum_count += 1 except struct.error: error.fatalf(obj.filename, "in section {b}'{name}'{re}: truncated line" " number info at position {pos}".format(b=error.BOLD, re=error.RESET, name=section.name, pos=linenum_count)) except IndexError: error.fatalf(obj.filename, "in section {b}'{name}'{re}: line number " "info at position {pos} points to nonexistent symbol with index " "{idx}".format(b=error.BOLD, re=error.RESET, name=section.name, pos=linenum_count, idx=symindex)) def readcoff(stream): '''Read the contents of a COFF file. stream: from where the COFF file is read. Return a Coff object with the contents of the COFF file. ''' filename = stream.name try: # Read filehdr where = 'header' (f_magic, f_nscns, f_timdat, f_symptr, f_nsyms, f_opthdr, f_flags ) = struct.unpack('=HHLLLHH', stream.read(_HDR_SIZE)) # Check that it's a COFF file if f_magic != _MAGIC: error.fatalf(filename, 'not a Microchip COFF file') timestamp = datetime.datetime.fromtimestamp(f_timdat) # Read optional header, if any if f_opthdr: where = 'optional header' (magic, vstamp, proc_type, rom_width_bits, ram_width_bits ) = struct.unpack('=HH2xHLL2x', stream.read(f_opthdr)) obj = Coff(filename, timestamp, f_flags, magic, vstamp, _PROCESSORS[proc_type], rom_width_bits, ram_width_bits) else: obj = Coff(filename, timestamp, f_flags) # Read the strings table _readstrtable(obj, stream, f_symptr + _SYMENT_SIZE * f_nsyms) # Read the symbols table _readsymtable(obj, stream, f_symptr, f_nsyms) # Read the sections section_num = 0 for i in range(f_nscns): where = 'section header at position {}'.format(section_num) (_s_name, s_paddr, s_vaddr, s_size, s_scnptr, s_relptr, s_lnnoptr, s_nreloc, s_nlnno, s_flags ) = struct.unpack('=8sLLLLLLHHL', stream.read(_SHDR_SIZE)) name = obj.getstring(_s_name) section = Section(name, s_paddr, s_vaddr, s_flags) # For the code sections, check that the size of the data is even if section.iscode() and s_size % 2 != 0: error.fatalf(filename, "in section {b}'{name}'{re}: code " "section data size must be multiple of 2".format( b=error.BOLD, re=error.RESET, name=name)) # For the udata section, set size attribure. For the others, read # the raw data if section.isudata(): section.size = s_size elif section.iscode() or section.isprogramdata(): cur = stream.tell() stream.seek(s_scnptr) where = 'data from section {}'.format(name) section.data = bytearray(stream.read(s_size)) # Read the relocation table for this section _readreloc(obj, section, stream, s_relptr, s_nreloc) # Read the line numbers table for this section _readlinenumbers(obj, section, stream, s_lnnoptr, s_nlnno) # Restore the file pointer stream.seek(cur) else: error.fatalf(filename, "in section {b}'{name}'{re}: " "unimplemented section type".format( b=error.BOLD, re=error.RESET, name=name)) obj.addsection(section) section_num += 1 # Patch the names of the sections in the symbols table symbol_num = 0 for s in obj.symbols: if isinstance(s, Symbol) and s.section > 0: # May throw IndexError s.section = obj.sections[s.section] symbol_num += 1 return obj except struct.error: error.fatalf(filename, 'truncated {}'.format(where)) except UnicodeDecodeError: error.fatalf(filename, 'in section at position {}: non ASCII ' 'characters in section name'.format(section_num)) except IndexError: error.fatalf(filename, "in symbol '{b}'{name}'{re}': points to " "nonexistent section with index {idx}".format( b=error.BOLD, re=error.RESET, name=s.name, idx=s.section)) except Exception as e: error.fatalf(filename, 'in section at position {}: {}'.format( section_num, e))
gpl-3.0
jean/sentry
src/sentry/api/endpoints/organization_shortid.py
2
1666
from __future__ import absolute_import import six from rest_framework.response import Response from sentry.api.base import DocSection from sentry.api.bases.organization import OrganizationEndpoint from sentry.models import Group from sentry.api.exceptions import ResourceDoesNotExist from sentry.utils.apidocs import scenario, attach_scenarios @scenario('ResolveShortId') def resolve_short_id_scenario(runner): group = Group.objects.filter(project=runner.default_project).first() runner.request( method='GET', path='/organizations/%s/shortids/%s/' % (runner.org.slug, group.qualified_short_id, ) ) class ShortIdLookupEndpoint(OrganizationEndpoint): doc_section = DocSection.ORGANIZATIONS @attach_scenarios([resolve_short_id_scenario]) def get(self, request, organization, short_id): """ Resolve a Short ID `````````````````` This resolves a short ID to the project slug and internal issue ID. :pparam string organization_slug: the slug of the organization the short ID should be looked up in. :pparam string short_id: the short ID to look up. :auth: required """ try: group = Group.objects.by_qualified_short_id(organization.id, short_id) except Group.DoesNotExist: raise ResourceDoesNotExist() return Response( { 'organizationSlug': organization.slug, 'projectSlug': group.project.slug, 'groupId': six.text_type(group.id), 'shortId': group.qualified_short_id, } )
bsd-3-clause
throwable-one/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/localflavor/in_/in_states.py
296
1859
""" A mapping of state misspellings/abbreviations to normalized abbreviations, and an alphabetical list of states for use as `choices` in a formfield. This exists in this standalone file so that it's only imported into memory when explicitly needed. """ STATE_CHOICES = ( ('KA', 'Karnataka'), ('AP', 'Andhra Pradesh'), ('KL', 'Kerala'), ('TN', 'Tamil Nadu'), ('MH', 'Maharashtra'), ('UP', 'Uttar Pradesh'), ('GA', 'Goa'), ('GJ', 'Gujarat'), ('RJ', 'Rajasthan'), ('HP', 'Himachal Pradesh'), ('JK', 'Jammu and Kashmir'), ('AR', 'Arunachal Pradesh'), ('AS', 'Assam'), ('BR', 'Bihar'), ('CG', 'Chattisgarh'), ('HR', 'Haryana'), ('JH', 'Jharkhand'), ('MP', 'Madhya Pradesh'), ('MN', 'Manipur'), ('ML', 'Meghalaya'), ('MZ', 'Mizoram'), ('NL', 'Nagaland'), ('OR', 'Orissa'), ('PB', 'Punjab'), ('SK', 'Sikkim'), ('TR', 'Tripura'), ('UA', 'Uttarakhand'), ('WB', 'West Bengal'), # Union Territories ('AN', 'Andaman and Nicobar'), ('CH', 'Chandigarh'), ('DN', 'Dadra and Nagar Haveli'), ('DD', 'Daman and Diu'), ('DL', 'Delhi'), ('LD', 'Lakshadweep'), ('PY', 'Pondicherry'), ) STATES_NORMALIZED = { 'ka': 'KA', 'karnatka': 'KA', 'tn': 'TN', 'tamilnad': 'TN', 'tamilnadu': 'TN', 'andra pradesh': 'AP', 'andrapradesh': 'AP', 'andhrapradesh': 'AP', 'maharastra': 'MH', 'mh': 'MH', 'ap': 'AP', 'dl': 'DL', 'dd': 'DD', 'br': 'BR', 'ar': 'AR', 'sk': 'SK', 'kl': 'KL', 'ga': 'GA', 'rj': 'RJ', 'rajastan': 'RJ', 'rajasthan': 'RJ', 'hp': 'HP', 'ua': 'UA', 'up': 'UP', 'mp': 'MP', 'mz': 'MZ', 'bengal': 'WB', 'westbengal': 'WB', 'mizo': 'MZ', 'orisa': 'OR', 'odisa': 'OR', 'or': 'OR', 'ar': 'AR', }
gpl-3.0
RichardLitt/wyrd-django-dev
django/utils/dates.py
488
2237
"Commonly-used date structures" from django.utils.translation import ugettext_lazy as _, pgettext_lazy WEEKDAYS = { 0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'), 5:_('Saturday'), 6:_('Sunday') } WEEKDAYS_ABBR = { 0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'), 5:_('Sat'), 6:_('Sun') } WEEKDAYS_REV = { 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6 } MONTHS = { 1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'), 12:_('December') } MONTHS_3 = { 1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'), 7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec') } MONTHS_3_REV = { 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 } MONTHS_AP = { # month names in Associated Press style 1: pgettext_lazy('abbrev. month', 'Jan.'), 2: pgettext_lazy('abbrev. month', 'Feb.'), 3: pgettext_lazy('abbrev. month', 'March'), 4: pgettext_lazy('abbrev. month', 'April'), 5: pgettext_lazy('abbrev. month', 'May'), 6: pgettext_lazy('abbrev. month', 'June'), 7: pgettext_lazy('abbrev. month', 'July'), 8: pgettext_lazy('abbrev. month', 'Aug.'), 9: pgettext_lazy('abbrev. month', 'Sept.'), 10: pgettext_lazy('abbrev. month', 'Oct.'), 11: pgettext_lazy('abbrev. month', 'Nov.'), 12: pgettext_lazy('abbrev. month', 'Dec.') } MONTHS_ALT = { # required for long date representation by some locales 1: pgettext_lazy('alt. month', 'January'), 2: pgettext_lazy('alt. month', 'February'), 3: pgettext_lazy('alt. month', 'March'), 4: pgettext_lazy('alt. month', 'April'), 5: pgettext_lazy('alt. month', 'May'), 6: pgettext_lazy('alt. month', 'June'), 7: pgettext_lazy('alt. month', 'July'), 8: pgettext_lazy('alt. month', 'August'), 9: pgettext_lazy('alt. month', 'September'), 10: pgettext_lazy('alt. month', 'October'), 11: pgettext_lazy('alt. month', 'November'), 12: pgettext_lazy('alt. month', 'December') }
bsd-3-clause
Kotaimen/georest
georest/geo/feature.py
1
6761
# -*- encoding: utf-8 -*- __author__ = 'kotaimen' __date__ = '5/29/14' """ georest.geo.feature ~~~~~~~~~~~~~~~~~~~ Geo Feature object """ import copy import six import shapely.geometry.base import geojson.base from . import jsonhelper as json from .key import Key from .geometry import Geometry from .spatialref import SpatialReference from .metadata import Metadata from .exceptions import InvalidFeature, InvalidGeometry, InvalidGeoJsonInput, \ InvalidProperties class Feature(object): """ A Geo Feature with optional properties and CRS Available props: - `key`: key of the feature - `geometry`: the geometry, now a `shapely` geometry object - `properties`: property list - `crs`: coordinate reference system - `metadata`: index metadata of the geometry A `Feature` object is mutable, hashable as well as pickleable, plus satisfies python geo interface. GeoJson IO is slightly faster than `osgro.ogr` and `geojson` because internal implementation avoided this: string = json.dumps(json.loads(stuff)) Validation and check is performed in the GeoJson build method so its safe to create a feature from request GeoJson data. """ def __init__(self, key, geometry, crs, properties, metadata): assert isinstance(key, Key) assert Geometry.is_geometry(geometry) assert isinstance(crs, SpatialReference) assert isinstance(metadata, Metadata) self._geometry = geometry self._properties = properties self._key = key self._crs = crs self._metadata = metadata @property def geometry(self): return self._geometry @geometry.setter def geometry(self, geometry): assert Geometry.is_geometry(geometry) self._geometry = geometry self._crs = geometry.crs self.refresh_metadata() @property def properties(self): return self._properties @property def key(self): return self._key @property def crs(self): return self._crs @property def metadata(self): return self._metadata def refresh_metadata(self): self._metadata = self._metadata.spawn(self._geometry) def equals(self, other): assert isinstance(other, Feature) return self._geometry.equals(other._geometry) and \ self._crs.equals(other._crs) and \ self._properties == other._properties def almost_equals(self, other): return self._geometry.almost_equals(other._geometry) and \ self._crs.equals(other._crs) and \ self._properties == other._properties def duplicate(self): srid = self._crs.srid geometry = Geometry.build_geometry(self._geometry, srid=srid, copy=True) properties = copy.deepcopy(self._properties) return Feature.build_from_geometry(geometry, key=self._key, properties=properties) @property def __geo_interface__(self): geo_obj = dict(type='Feature', geometry=shapely.geometry.mapping(self._geometry), properties=self._properties, id=self._key.qualified_name) if not self._crs or self._crs.srid != 4326: geo_obj['crs'] = self._crs.geojson return geo_obj @property def geojson(self): return json.dumps(self.__geo_interface__, double_precision=7) @classmethod def build_from_geometry(cls, geo_input, key=None, srid=4326, properties=None): geometry = Geometry.build_geometry(geo_input, srid=srid) metadata = Metadata.make_metadata(geometry=geometry) if key is None: key = Key.make_key() if properties is None: properties = dict() crs = geometry.crs return cls(key, geometry, crs, properties, metadata) @classmethod def build_from_geojson(cls, geo_input, key=None, srid=4326, precise_float=True): # load json as python literal if necessary if isinstance(geo_input, six.string_types): try: literal = json.loads(geo_input, precise_float=precise_float) except (KeyError, ValueError) as e: raise InvalidGeoJsonInput(e=e) elif isinstance(geo_input, dict): literal = geo_input else: raise InvalidGeoJsonInput('Not a GeoJson or a literal object') # basic feature structural check try: if literal['type'] != 'Feature': raise InvalidGeoJsonInput('Not a GeoFeature object') except KeyError as e: raise InvalidGeoJsonInput(e=e) if 'geometry' not in literal: raise InvalidGeoJsonInput('No geometry in the feature') # load crs from geojson first if 'crs' in literal and literal['crs']: crs = SpatialReference.build_from_geojson_crs(literal['crs']) srid = crs.srid else: crs = SpatialReference(srid) # build geojson feature object try: geojson_feature = geojson.base.GeoJSON.to_instance(literal) except (TypeError, ValueError) as e: raise InvalidFeature(e=e) if geojson_feature['geometry'] is None: raise InvalidGeometry('Invalid geometry') if geojson_feature['properties'] is None: properties = dict() else: properties = geojson_feature['properties'] # assemble the Feature geometry = Geometry.build_geometry(geojson_feature['geometry'], srid=srid) metadata = Metadata.make_metadata(geometry=geometry) if key is None: key = Key.make_key() return cls(key, geometry, crs, properties, metadata) def __repr__(self): feature = self.__geo_interface__ feature.update(self.metadata._asdict()) return 'Feature(%s)' % json.dumps(feature) def __hash__(self): return hash(self._key) def __getstate__(self): return (self._key, self._geometry.wkb, self._crs, self._properties, self._metadata) def __setstate__(self, state): key, wkb, crs, properties, metadata = state geometry = Geometry.build_geometry(wkb, srid=crs.srid) self._key = key self._geometry = geometry self._crs = crs self._properties = properties self._metadata = metadata
bsd-2-clause
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.1/Lib/distutils/command/install_scripts.py
3
2102
"""distutils.command.install_scripts Implements the Distutils 'install_scripts' command, for installing Python scripts.""" # contributed by Bastian Kleineidam __revision__ = "$Id: install_scripts.py 57699 2007-08-30 03:52:21Z collin.winter $" import os from distutils.core import Command from distutils import log from stat import ST_MODE class install_scripts(Command): description = "install scripts (Python or otherwise)" user_options = [ ('install-dir=', 'd', "directory to install scripts to"), ('build-dir=','b', "build directory (where to install from)"), ('force', 'f', "force installation (overwrite existing files)"), ('skip-build', None, "skip the build steps"), ] boolean_options = ['force', 'skip-build'] def initialize_options(self): self.install_dir = None self.force = 0 self.build_dir = None self.skip_build = None def finalize_options(self): self.set_undefined_options('build', ('build_scripts', 'build_dir')) self.set_undefined_options('install', ('install_scripts', 'install_dir'), ('force', 'force'), ('skip_build', 'skip_build'), ) def run(self): if not self.skip_build: self.run_command('build_scripts') self.outfiles = self.copy_tree(self.build_dir, self.install_dir) if os.name == 'posix': # Set the executable bits (owner, group, and world) on # all the scripts we just installed. for file in self.get_outputs(): if self.dry_run: log.info("changing mode of %s", file) else: mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777 log.info("changing mode of %s to %o", file, mode) os.chmod(file, mode) def get_inputs(self): return self.distribution.scripts or [] def get_outputs(self): return self.outfiles or []
mit
ddd332/presto
presto-docs/target/sphinx/jinja2/debug.py
112
11028
# -*- coding: utf-8 -*- """ jinja2.debug ~~~~~~~~~~~~ Implements the debug interface for Jinja. This module does some pretty ugly stuff with the Python traceback system in order to achieve tracebacks with correct line numbers, locals and contents. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import sys import traceback from types import TracebackType from jinja2.utils import CodeType, missing, internal_code from jinja2.exceptions import TemplateSyntaxError # on pypy we can take advantage of transparent proxies try: from __pypy__ import tproxy except ImportError: tproxy = None # how does the raise helper look like? try: exec "raise TypeError, 'foo'" except SyntaxError: raise_helper = 'raise __jinja_exception__[1]' except TypeError: raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]' class TracebackFrameProxy(object): """Proxies a traceback frame.""" def __init__(self, tb): self.tb = tb self._tb_next = None @property def tb_next(self): return self._tb_next def set_next(self, next): if tb_set_next is not None: try: tb_set_next(self.tb, next and next.tb or None) except Exception: # this function can fail due to all the hackery it does # on various python implementations. We just catch errors # down and ignore them if necessary. pass self._tb_next = next @property def is_jinja_frame(self): return '__jinja_template__' in self.tb.tb_frame.f_globals def __getattr__(self, name): return getattr(self.tb, name) def make_frame_proxy(frame): proxy = TracebackFrameProxy(frame) if tproxy is None: return proxy def operation_handler(operation, *args, **kwargs): if operation in ('__getattribute__', '__getattr__'): return getattr(proxy, args[0]) elif operation == '__setattr__': proxy.__setattr__(*args, **kwargs) else: return getattr(proxy, operation)(*args, **kwargs) return tproxy(TracebackType, operation_handler) class ProcessedTraceback(object): """Holds a Jinja preprocessed traceback for priting or reraising.""" def __init__(self, exc_type, exc_value, frames): assert frames, 'no frames for this traceback?' self.exc_type = exc_type self.exc_value = exc_value self.frames = frames # newly concatenate the frames (which are proxies) prev_tb = None for tb in self.frames: if prev_tb is not None: prev_tb.set_next(tb) prev_tb = tb prev_tb.set_next(None) def render_as_text(self, limit=None): """Return a string with the traceback.""" lines = traceback.format_exception(self.exc_type, self.exc_value, self.frames[0], limit=limit) return ''.join(lines).rstrip() def render_as_html(self, full=False): """Return a unicode string with the traceback as rendered HTML.""" from jinja2.debugrenderer import render_traceback return u'%s\n\n<!--\n%s\n-->' % ( render_traceback(self, full=full), self.render_as_text().decode('utf-8', 'replace') ) @property def is_template_syntax_error(self): """`True` if this is a template syntax error.""" return isinstance(self.exc_value, TemplateSyntaxError) @property def exc_info(self): """Exception info tuple with a proxy around the frame objects.""" return self.exc_type, self.exc_value, self.frames[0] @property def standard_exc_info(self): """Standard python exc_info for re-raising""" tb = self.frames[0] # the frame will be an actual traceback (or transparent proxy) if # we are on pypy or a python implementation with support for tproxy if type(tb) is not TracebackType: tb = tb.tb return self.exc_type, self.exc_value, tb def make_traceback(exc_info, source_hint=None): """Creates a processed traceback object from the exc_info.""" exc_type, exc_value, tb = exc_info if isinstance(exc_value, TemplateSyntaxError): exc_info = translate_syntax_error(exc_value, source_hint) initial_skip = 0 else: initial_skip = 1 return translate_exception(exc_info, initial_skip) def translate_syntax_error(error, source=None): """Rewrites a syntax error to please traceback systems.""" error.source = source error.translated = True exc_info = (error.__class__, error, None) filename = error.filename if filename is None: filename = '<unknown>' return fake_exc_info(exc_info, filename, error.lineno) def translate_exception(exc_info, initial_skip=0): """If passed an exc_info it will automatically rewrite the exceptions all the way down to the correct line numbers and frames. """ tb = exc_info[2] frames = [] # skip some internal frames if wanted for x in xrange(initial_skip): if tb is not None: tb = tb.tb_next initial_tb = tb while tb is not None: # skip frames decorated with @internalcode. These are internal # calls we can't avoid and that are useless in template debugging # output. if tb.tb_frame.f_code in internal_code: tb = tb.tb_next continue # save a reference to the next frame if we override the current # one with a faked one. next = tb.tb_next # fake template exceptions template = tb.tb_frame.f_globals.get('__jinja_template__') if template is not None: lineno = template.get_corresponding_lineno(tb.tb_lineno) tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, lineno)[2] frames.append(make_frame_proxy(tb)) tb = next # if we don't have any exceptions in the frames left, we have to # reraise it unchanged. # XXX: can we backup here? when could this happen? if not frames: raise exc_info[0], exc_info[1], exc_info[2] return ProcessedTraceback(exc_info[0], exc_info[1], frames) def fake_exc_info(exc_info, filename, lineno): """Helper for `translate_exception`.""" exc_type, exc_value, tb = exc_info # figure the real context out if tb is not None: real_locals = tb.tb_frame.f_locals.copy() ctx = real_locals.get('context') if ctx: locals = ctx.get_all() else: locals = {} for name, value in real_locals.iteritems(): if name.startswith('l_') and value is not missing: locals[name[2:]] = value # if there is a local called __jinja_exception__, we get # rid of it to not break the debug functionality. locals.pop('__jinja_exception__', None) else: locals = {} # assamble fake globals we need globals = { '__name__': filename, '__file__': filename, '__jinja_exception__': exc_info[:2], # we don't want to keep the reference to the template around # to not cause circular dependencies, but we mark it as Jinja # frame for the ProcessedTraceback '__jinja_template__': None } # and fake the exception code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') # if it's possible, change the name of the code. This won't work # on some python environments such as google appengine try: if tb is None: location = 'template' else: function = tb.tb_frame.f_code.co_name if function == 'root': location = 'top-level template code' elif function.startswith('block_'): location = 'block "%s"' % function[6:] else: location = 'template' code = CodeType(0, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, code.co_varnames, filename, location, code.co_firstlineno, code.co_lnotab, (), ()) except: pass # execute the code and catch the new traceback try: exec code in globals, locals except: exc_info = sys.exc_info() new_tb = exc_info[2].tb_next # return without this frame return exc_info[:2] + (new_tb,) def _init_ugly_crap(): """This function implements a few ugly things so that we can patch the traceback objects. The function returned allows resetting `tb_next` on any python traceback object. Do not attempt to use this on non cpython interpreters """ import ctypes from types import TracebackType # figure out side of _Py_ssize_t if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): _Py_ssize_t = ctypes.c_int64 else: _Py_ssize_t = ctypes.c_int # regular python class _PyObject(ctypes.Structure): pass _PyObject._fields_ = [ ('ob_refcnt', _Py_ssize_t), ('ob_type', ctypes.POINTER(_PyObject)) ] # python with trace if hasattr(sys, 'getobjects'): class _PyObject(ctypes.Structure): pass _PyObject._fields_ = [ ('_ob_next', ctypes.POINTER(_PyObject)), ('_ob_prev', ctypes.POINTER(_PyObject)), ('ob_refcnt', _Py_ssize_t), ('ob_type', ctypes.POINTER(_PyObject)) ] class _Traceback(_PyObject): pass _Traceback._fields_ = [ ('tb_next', ctypes.POINTER(_Traceback)), ('tb_frame', ctypes.POINTER(_PyObject)), ('tb_lasti', ctypes.c_int), ('tb_lineno', ctypes.c_int) ] def tb_set_next(tb, next): """Set the tb_next attribute of a traceback object.""" if not (isinstance(tb, TracebackType) and (next is None or isinstance(next, TracebackType))): raise TypeError('tb_set_next arguments must be traceback objects') obj = _Traceback.from_address(id(tb)) if tb.tb_next is not None: old = _Traceback.from_address(id(tb.tb_next)) old.ob_refcnt -= 1 if next is None: obj.tb_next = ctypes.POINTER(_Traceback)() else: next = _Traceback.from_address(id(next)) next.ob_refcnt += 1 obj.tb_next = ctypes.pointer(next) return tb_set_next # try to get a tb_set_next implementation if we don't have transparent # proxies. tb_set_next = None if tproxy is None: try: from jinja2._debugsupport import tb_set_next except ImportError: try: tb_set_next = _init_ugly_crap() except: pass del _init_ugly_crap
apache-2.0
nathanial/lettuce
tests/integration/lib/Django-1.2.5/tests/regressiontests/modeladmin/tests.py
39
41732
from datetime import date import unittest from django import forms from django.conf import settings from django.contrib.admin.options import ModelAdmin, TabularInline, \ HORIZONTAL, VERTICAL from django.contrib.admin.sites import AdminSite from django.contrib.admin.validation import validate from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect from django.core.exceptions import ImproperlyConfigured from django.forms.models import BaseModelFormSet from django.forms.widgets import Select from django.test import TestCase from models import Band, Concert, ValidationTestModel, \ ValidationTestInlineModel # None of the following tests really depend on the content of the request, # so we'll just pass in None. request = None class ModelAdminTests(TestCase): def setUp(self): self.band = Band.objects.create( name='The Doors', bio='', sign_date=date(1965, 1, 1), ) self.site = AdminSite() # form/fields/fieldsets interaction ############################## def test_default_fields(self): ma = ModelAdmin(Band, self.site) self.assertEquals(ma.get_form(request).base_fields.keys(), ['name', 'bio', 'sign_date']) def test_default_fieldsets(self): # fieldsets_add and fieldsets_change should return a special data structure that # is used in the templates. They should generate the "right thing" whether we # have specified a custom form, the fields argument, or nothing at all. # # Here's the default case. There are no custom form_add/form_change methods, # no fields argument, and no fieldsets argument. ma = ModelAdmin(Band, self.site) self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})]) def test_field_arguments(self): # If we specify the fields argument, fieldsets_add and fielsets_change should # just stick the fields into a formsets structure and return it. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual( ma.get_fieldsets(request), [(None, {'fields': ['name']})]) self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})]) def test_field_arguments_restricted_on_form(self): # If we specify fields or fieldsets, it should exclude fields on the Form class # to the fields specified. This may cause errors to be raised in the db layer if # required model fields arent in fields/fieldsets, but that's preferable to # ghost errors where you have a field in your Form class that isn't being # displayed because you forgot to add it to fields/fieldsets # Using `fields`. class BandAdmin(ModelAdmin): fields = ['name'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) self.assertEqual(ma.get_form(request, self.band).base_fields.keys(), ['name']) # Using `fieldsets`. class BandAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name']})] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) self.assertEqual(ma.get_form(request, self.band).base_fields.keys(), ['name']) # Using `exclude`. class BandAdmin(ModelAdmin): exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'sign_date']) # You can also pass a tuple to `exclude`. class BandAdmin(ModelAdmin): exclude = ('bio',) ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'sign_date']) # Using `fields` and `exclude`. class BandAdmin(ModelAdmin): fields = ['name', 'bio'] exclude = ['bio'] ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name']) def test_custom_form_validation(self): # If we specify a form, it should use it allowing custom validation to work # properly. This won't, however, break any of the admin widgets or media. class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['name', 'bio', 'sign_date', 'delete']) self.assertEqual( type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget) def test_queryset_override(self): # If we need to override the queryset of a ModelChoiceField in our custom form # make sure that RelatedFieldWidgetWrapper doesn't mess that up. band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1)) band2.save() class ConcertAdmin(ModelAdmin): pass ma = ConcertAdmin(Concert, self.site) form = ma.get_form(request)() self.assertEqual(str(form["main_band"]), '<select name="main_band" id="id_main_band">\n' '<option value="" selected="selected">---------</option>\n' '<option value="%d">The Doors</option>\n' '<option value="%d">The Beatles</option>\n' '</select>' % (self.band.id, band2.id)) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert def __init__(self, *args, **kwargs): super(AdminConcertForm, self).__init__(*args, **kwargs) self.fields["main_band"].queryset = Band.objects.filter(name='The Doors') class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) form = ma.get_form(request)() self.assertEqual(str(form["main_band"]), '<select name="main_band" id="id_main_band">\n' '<option value="" selected="selected">---------</option>\n' '<option value="%d">The Doors</option>\n' '</select>' % self.band.id) # radio_fields behavior ########################################### def test_default_foreign_key_widget(self): # First, without any radio_fields specified, the widgets for ForeignKey # and fields with choices specified ought to be a basic Select widget. # ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so # they need to be handled properly when type checking. For Select fields, all of # the choices lists have a first entry of dashes. cma = ModelAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['main_band'].widget.choices), [(u'', u'---------'), (self.band.id, u'The Doors')]) self.assertEqual( type(cmafa.base_fields['opening_band'].widget.widget), Select) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [(u'', u'---------'), (self.band.id, u'The Doors')]) self.assertEqual(type(cmafa.base_fields['day'].widget), Select) self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [('', '---------'), (1, 'Fri'), (2, 'Sat')]) self.assertEqual(type(cmafa.base_fields['transport'].widget), Select) self.assertEqual( list(cmafa.base_fields['transport'].widget.choices), [('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]) def test_foreign_key_as_radio_field(self): # Now specify all the fields as radio_fields. Widgets should now be # RadioSelect, and the choices list should have a first entry of 'None' if # blank=True for the model field. Finally, the widget should have the # 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL. class ConcertAdmin(ModelAdmin): radio_fields = { 'main_band': HORIZONTAL, 'opening_band': VERTICAL, 'day': VERTICAL, 'transport': HORIZONTAL, } cma = ConcertAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices), [(self.band.id, u'The Doors')]) self.assertEqual( type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'}) self.assertEqual( list(cmafa.base_fields['opening_band'].widget.choices), [(u'', u'None'), (self.band.id, u'The Doors')]) self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'}) self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')]) self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'}) self.assertEqual(list(cmafa.base_fields['transport'].widget.choices), [('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ('transport',) class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['main_band', 'opening_band', 'day']) class AdminConcertForm(forms.ModelForm): extra = forms.CharField() class Meta: model = Concert fields = ['extra', 'transport'] class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(ma.get_form(request).base_fields.keys(), ['extra', 'transport']) class ConcertInline(TabularInline): form = AdminConcertForm model = Concert fk_name = 'main_band' can_delete = True class BandAdmin(ModelAdmin): inlines = [ ConcertInline ] ma = BandAdmin(Band, self.site) self.assertEqual( list(ma.get_formsets(request))[0]().forms[0].fields.keys(), ['extra', 'transport', 'id', 'DELETE', 'main_band']) class ValidationTests(unittest.TestCase): def assertRaisesErrorWithMessage(self, error, message, callable, *args, **kwargs): self.assertRaises(error, callable, *args, **kwargs) try: callable(*args, **kwargs) except error, e: self.assertEqual(message, str(e)) def test_validation_only_runs_in_debug(self): # Ensure validation only runs when DEBUG = True try: settings.DEBUG = True class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = 10 site = AdminSite() self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.", site.register, ValidationTestModel, ValidationTestModelAdmin, ) finally: settings.DEBUG = False site = AdminSite() site.register(ValidationTestModel, ValidationTestModelAdmin) def test_raw_id_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('non_existent_field',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('name',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.raw_id_fields[0]', 'name' must be either a ForeignKey or ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): raw_id_fields = ('users',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_fieldsets_validation(self): class ValidationTestModelAdmin(ModelAdmin): fieldsets = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = ({},) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets[0]' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = ((),) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets[0]' does not have exactly two elements.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", ()),) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets[0][1]' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {}),) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'fields' key is required in ValidationTestModelAdmin.fieldsets[0][1] field options dict.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("non_existent_field",)}),) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.fieldsets[0][1]['fields']' refers to field 'non_existent_field' that is missing from the form.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("name",)}),) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): fieldsets = (("General", {"fields": ("name",)}),) fields = ["name",] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "Both fieldsets and fields are specified in ValidationTestModelAdmin.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fieldsets = [(None, {'fields': ['name', 'name']})] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "There are duplicate field(s) in ValidationTestModelAdmin.fieldsets", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): fields = ["name", "name"] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "There are duplicate field(s) in ValidationTestModelAdmin.fields", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_form_validation(self): class FakeForm(object): pass class ValidationTestModelAdmin(ModelAdmin): form = FakeForm self.assertRaisesErrorWithMessage( ImproperlyConfigured, "ValidationTestModelAdmin.form does not inherit from BaseModelForm.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_fieldsets_with_custom_form_validation(self): class BandAdmin(ModelAdmin): fieldsets = ( ('Band', { 'fields': ('non_existent_field',) }), ) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'BandAdmin.fieldsets[0][1]['fields']' refers to field 'non_existent_field' that is missing from the form.", validate, BandAdmin, Band, ) class BandAdmin(ModelAdmin): fieldsets = ( ('Band', { 'fields': ('name',) }), ) validate(BandAdmin, Band) class AdminBandForm(forms.ModelForm): class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = ( ('Band', { 'fields': ('non_existent_field',) }), ) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'BandAdmin.fieldsets[0][1]['fields']' refers to field 'non_existent_field' that is missing from the form.", validate, BandAdmin, Band, ) class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class Meta: model = Band class BandAdmin(ModelAdmin): form = AdminBandForm fieldsets = ( ('Band', { 'fields': ('name', 'bio', 'sign_date', 'delete') }), ) validate(BandAdmin, Band) def test_filter_vertical_validation(self): class ValidationTestModelAdmin(ModelAdmin): filter_vertical = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("non_existent_field",) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("name",) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_vertical[0]' must be a ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_vertical = ("users",) validate(ValidationTestModelAdmin, ValidationTestModel) def test_filter_horizontal_validation(self): class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("non_existent_field",) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("name",) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.filter_horizontal[0]' must be a ManyToManyField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): filter_horizontal = ("users",) validate(ValidationTestModelAdmin, ValidationTestModel) def test_radio_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): radio_fields = () self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"non_existent_field": None} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"name": None} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields['name']' is neither an instance of ForeignKey nor does have choices set.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"state": None} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.radio_fields['state']' is neither admin.HORIZONTAL nor admin.VERTICAL.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): radio_fields = {"state": VERTICAL} validate(ValidationTestModelAdmin, ValidationTestModel) def test_prepopulated_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = () self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"non_existent_field": None} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"slug": ("non_existent_field",)} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields['slug'][0]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"users": ("name",)} self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.prepopulated_fields['users']' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): prepopulated_fields = {"slug": ("name",)} validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_display_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_display = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('non_existent_field',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "ValidationTestModelAdmin.list_display[0], 'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('users',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display[0]', 'users' is a ManyToManyField which is not supported.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('name',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_display_links_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_display_links = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display_links = ('non_existent_field',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links[0]' refers to 'non_existent_field' that is neither a field, method or property of model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display_links = ('name',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_display_links[0]'refers to 'name' which is not defined in 'list_display'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_display = ('name',) list_display_links = ('name',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_filter_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_filter = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_filter' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_filter = ('non_existent_field',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_filter[0]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_filter = ('is_active',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_per_page_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_per_page = 'hello' self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_per_page' should be a integer.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_per_page = 100 validate(ValidationTestModelAdmin, ValidationTestModel) def test_search_fields_validation(self): class ValidationTestModelAdmin(ModelAdmin): search_fields = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.search_fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_date_hierarchy_validation(self): class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'non_existent_field' self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'name' self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): date_hierarchy = 'pub_date' validate(ValidationTestModelAdmin, ValidationTestModel) def test_ordering_validation(self): class ValidationTestModelAdmin(ModelAdmin): ordering = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('non_existent_field',) self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering[0]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('?', 'name') self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.ordering' has the random ordering marker '?', but contains other fields as well. Please either remove '?' or the other fields.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): ordering = ('?',) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): ordering = ('band__name',) validate(ValidationTestModelAdmin, ValidationTestModel) class ValidationTestModelAdmin(ModelAdmin): ordering = ('name',) validate(ValidationTestModelAdmin, ValidationTestModel) def test_list_select_related_validation(self): class ValidationTestModelAdmin(ModelAdmin): list_select_related = 1 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.list_select_related' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): list_select_related = False validate(ValidationTestModelAdmin, ValidationTestModel) def test_save_as_validation(self): class ValidationTestModelAdmin(ModelAdmin): save_as = 1 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.save_as' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): save_as = True validate(ValidationTestModelAdmin, ValidationTestModel) def test_save_on_top_validation(self): class ValidationTestModelAdmin(ModelAdmin): save_on_top = 1 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.save_on_top' should be a boolean.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestModelAdmin(ModelAdmin): save_on_top = True validate(ValidationTestModelAdmin, ValidationTestModel) def test_inlines_validation(self): class ValidationTestModelAdmin(ModelAdmin): inlines = 10 self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(object): pass class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines[0]' does not inherit from BaseModelAdmin.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): pass class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'model' is a required attribute of 'ValidationTestModelAdmin.inlines[0]'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class SomethingBad(object): pass class ValidationTestInline(TabularInline): model = SomethingBad class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestModelAdmin.inlines[0].model' does not inherit from models.Model.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_fields_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fields = 10 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.fields' must be a list or tuple.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fields = ("non_existent_field",) class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.", validate, ValidationTestModelAdmin, ValidationTestModel, ) def test_fk_name_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = "non_existent_field" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'ValidationTestInlineModel'.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel fk_name = "parent" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_extra_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = "hello" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.extra' should be a integer.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel extra = 2 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_max_num_validation(self): class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = "hello" class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.max_num' should be an integer or None (default).", validate, ValidationTestModelAdmin, ValidationTestModel, ) class ValidationTestInline(TabularInline): model = ValidationTestInlineModel max_num = 2 class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel) def test_formset_validation(self): class FakeFormSet(object): pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = FakeFormSet class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] self.assertRaisesErrorWithMessage( ImproperlyConfigured, "'ValidationTestInline.formset' does not inherit from BaseModelFormSet.", validate, ValidationTestModelAdmin, ValidationTestModel, ) class RealModelFormSet(BaseModelFormSet): pass class ValidationTestInline(TabularInline): model = ValidationTestInlineModel formset = RealModelFormSet class ValidationTestModelAdmin(ModelAdmin): inlines = [ValidationTestInline] validate(ValidationTestModelAdmin, ValidationTestModel)
gpl-3.0
tareqalayan/ansible
test/legacy/cleanup_rax.py
160
6547
#!/usr/bin/env python import os import re import yaml import argparse try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.six.moves import input def rax_list_iterator(svc, *args, **kwargs): method = kwargs.pop('method', 'list') items = getattr(svc, method)(*args, **kwargs) while items: retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs) if items and retrieved and items[-1].id == retrieved[0].id: del items[-1] items.extend(retrieved) if len(retrieved) < 2: break return items def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes', default=False, help="Don't prompt for confirmation") parser.add_argument('--match', dest='match_re', default='^ansible-testing', help='Regular expression used to find resources ' '(default: %(default)s)') return parser.parse_args() def authenticate(): try: with open(os.path.realpath('./credentials.yml')) as f: credentials = yaml.load(f) except Exception as e: raise SystemExit(e) try: pyrax.set_credentials(credentials.get('rackspace_username'), credentials.get('rackspace_api_key')) except Exception as e: raise SystemExit(e) def prompt_and_delete(item, prompt, assumeyes): if not assumeyes: assumeyes = input(prompt).lower() == 'y' assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \ "Class <%s> has no delete or terminate attribute" % item.__class__ if assumeyes: if hasattr(item, 'delete'): item.delete() print("Deleted %s" % item) if hasattr(item, 'terminate'): item.terminate() print("Terminated %s" % item) def delete_rax(args): """Function for deleting CloudServers""" print("--- Cleaning CloudServers matching '%s'" % args.match_re) search_opts = dict(name='^%s' % args.match_re) for region in pyrax.identity.services.compute.regions: cs = pyrax.connect_to_cloudservers(region=region) servers = rax_list_iterator(cs.servers, search_opts=search_opts) for server in servers: prompt_and_delete(server, 'Delete matching %s? [y/n]: ' % server, args.assumeyes) def delete_rax_clb(args): """Function for deleting Cloud Load Balancers""" print("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re) for region in pyrax.identity.services.load_balancer.regions: clb = pyrax.connect_to_cloud_loadbalancers(region=region) for lb in rax_list_iterator(clb): if re.search(args.match_re, lb.name): prompt_and_delete(lb, 'Delete matching %s? [y/n]: ' % lb, args.assumeyes) def delete_rax_keypair(args): """Function for deleting Rackspace Key pairs""" print("--- Cleaning Key Pairs matching '%s'" % args.match_re) for region in pyrax.identity.services.compute.regions: cs = pyrax.connect_to_cloudservers(region=region) for keypair in cs.keypairs.list(): if re.search(args.match_re, keypair.name): prompt_and_delete(keypair, 'Delete matching %s? [y/n]: ' % keypair, args.assumeyes) def delete_rax_network(args): """Function for deleting Cloud Networks""" print("--- Cleaning Cloud Networks matching '%s'" % args.match_re) for region in pyrax.identity.services.network.regions: cnw = pyrax.connect_to_cloud_networks(region=region) for network in cnw.list(): if re.search(args.match_re, network.name): prompt_and_delete(network, 'Delete matching %s? [y/n]: ' % network, args.assumeyes) def delete_rax_cbs(args): """Function for deleting Cloud Networks""" print("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re) for region in pyrax.identity.services.network.regions: cbs = pyrax.connect_to_cloud_blockstorage(region=region) for volume in cbs.list(): if re.search(args.match_re, volume.name): prompt_and_delete(volume, 'Delete matching %s? [y/n]: ' % volume, args.assumeyes) def delete_rax_cdb(args): """Function for deleting Cloud Databases""" print("--- Cleaning Cloud Databases matching '%s'" % args.match_re) for region in pyrax.identity.services.database.regions: cdb = pyrax.connect_to_cloud_databases(region=region) for db in rax_list_iterator(cdb): if re.search(args.match_re, db.name): prompt_and_delete(db, 'Delete matching %s? [y/n]: ' % db, args.assumeyes) def _force_delete_rax_scaling_group(manager): def wrapped(uri): manager.api.method_delete('%s?force=true' % uri) return wrapped def delete_rax_scaling_group(args): """Function for deleting Autoscale Groups""" print("--- Cleaning Autoscale Groups matching '%s'" % args.match_re) for region in pyrax.identity.services.autoscale.regions: asg = pyrax.connect_to_autoscale(region=region) for group in rax_list_iterator(asg): if re.search(args.match_re, group.name): group.manager._delete = \ _force_delete_rax_scaling_group(group.manager) prompt_and_delete(group, 'Delete matching %s? [y/n]: ' % group, args.assumeyes) def main(): if not HAS_PYRAX: raise SystemExit('The pyrax python module is required for this script') args = parse_args() authenticate() funcs = [f for n, f in globals().items() if n.startswith('delete_rax')] for func in sorted(funcs, key=lambda f: f.__name__): try: func(args) except Exception as e: print("---- %s failed (%s)" % (func.__name__, e.message)) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('\nExiting...')
gpl-3.0
ritchyteam/odoo
openerp/report/render/odt2odt/__init__.py
381
1085
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from odt2odt import parseNode # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rsalmaso/python-social-auth
social/tests/actions/test_associate.py
80
2932
import json from social.exceptions import AuthAlreadyAssociated from social.tests.models import User from social.tests.actions.actions import BaseActionTest class AssociateActionTest(BaseActionTest): expected_username = 'foobar' def setUp(self): super(AssociateActionTest, self).setUp() self.user = User(username='foobar', email='foo@bar.com') self.backend.strategy.session_set('username', self.user.username) def test_associate(self): self.do_login() self.assertTrue(len(self.user.social), 1) self.assertEqual(self.user.social[0].provider, 'github') def test_associate_with_partial_pipeline(self): self.do_login_with_partial_pipeline() self.assertEqual(len(self.user.social), 1) self.assertEqual(self.user.social[0].provider, 'github') class MultipleAccountsTest(AssociateActionTest): alternative_user_data_body = json.dumps({ 'login': 'foobar2', 'id': 2, 'avatar_url': 'https://github.com/images/error/foobar2_happy.gif', 'gravatar_id': 'somehexcode', 'url': 'https://api.github.com/users/foobar2', 'name': 'monalisa foobar2', 'company': 'GitHub', 'blog': 'https://github.com/blog', 'location': 'San Francisco', 'email': 'foo@bar.com', 'hireable': False, 'bio': 'There once was...', 'public_repos': 2, 'public_gists': 1, 'followers': 20, 'following': 0, 'html_url': 'https://github.com/foobar2', 'created_at': '2008-01-14T04:33:35Z', 'type': 'User', 'total_private_repos': 100, 'owned_private_repos': 100, 'private_gists': 81, 'disk_usage': 10000, 'collaborators': 8, 'plan': { 'name': 'Medium', 'space': 400, 'collaborators': 10, 'private_repos': 20 } }) def test_multiple_social_accounts(self): self.do_login() self.do_login(user_data_body=self.alternative_user_data_body) self.assertEqual(len(self.user.social), 2) self.assertEqual(self.user.social[0].provider, 'github') self.assertEqual(self.user.social[1].provider, 'github') class AlreadyAssociatedErrorTest(BaseActionTest): def setUp(self): super(AlreadyAssociatedErrorTest, self).setUp() self.user1 = User(username='foobar', email='foo@bar.com') self.user = None def tearDown(self): super(AlreadyAssociatedErrorTest, self).tearDown() self.user1 = None self.user = None def test_already_associated_error(self): self.user = self.user1 self.do_login() self.user = User(username='foobar2', email='foo2@bar2.com') with self.assertRaisesRegexp(AuthAlreadyAssociated, 'This github account is already in use.'): self.do_login()
bsd-3-clause
bema-ligo/pycbc
pycbc/filter/zpk.py
2
3957
# Copyright (C) 2014 Christopher M. Biwer # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # import numpy as np from pycbc import future from pycbc.types import TimeSeries def filter_zpk(timeseries, z, p, k): """Return a new timeseries that was filtered with a zero-pole-gain filter. The transfer function in the s-domain looks like: .. math:: \\frac{H(s) = (s - s_1) * (s - s_3) * ... * (s - s_n)}{(s - s_2) * (s - s_4) * ... * (s - s_m)}, m >= n The zeroes, and poles entered in Hz are converted to angular frequency, along the imaginary axis in the s-domain s=i*omega. Then the zeroes, and poles are bilinearly transformed via: .. math:: z(s) = \\frac{(1 + s*T/2)}{(1 - s*T/2)} Where z is the z-domain value, s is the s-domain value, and T is the sampling period. After the poles and zeroes have been bilinearly transformed, then the second-order sections are found and filter the data using scipy. Parameters ---------- timeseries: TimeSeries The TimeSeries instance to be filtered. z: array Array of zeros to include in zero-pole-gain filter design. In units of Hz. p: array Array of poles to include in zero-pole-gain filter design. In units of Hz. k: float Gain to include in zero-pole-gain filter design. This gain is a constant multiplied to the transfer function. Returns ------- Time Series: TimeSeries A new TimeSeries that has been filtered. Examples -------- To apply a 5 zeroes at 100Hz, 5 poles at 1Hz, and a gain of 1e-10 filter to a TimeSeries instance, do: >>> filtered_data = zpk_filter(timeseries, [100]*5, [1]*5, 1e-10) """ # sanity check type if not isinstance(timeseries, TimeSeries): raise TypeError("Can only filter TimeSeries instances.") # sanity check casual filter degree = len(p) - len(z) if degree < 0: raise TypeError("May not have more zeroes than poles. \ Filter is not casual.") # cast zeroes and poles as arrays and gain as a float z = np.array(z) p = np.array(p) k = float(k) # put zeroes and poles in the s-domain # convert from frequency to angular frequency z *= -2 * np.pi p *= -2 * np.pi # get denominator of bilinear transform fs = 2.0 * timeseries.sample_rate # zeroes in the z-domain z_zd = (1 + z/fs) / (1 - z/fs) # any zeros that were at infinity are moved to the Nyquist frequency z_zd = z_zd[np.isfinite(z_zd)] z_zd = np.append(z_zd, -np.ones(degree)) # poles in the z-domain p_zd = (1 + p/fs) / (1 - p/fs) # gain change in z-domain k_zd = k * np.prod(fs - z) / np.prod(fs - p) # get second-order sections sos = future.zpk2sos(z_zd, p_zd, k_zd) # filter filtered_data = future.sosfilt(sos, timeseries.numpy()) return TimeSeries(filtered_data, delta_t = timeseries.delta_t, dtype=timeseries.dtype, epoch=timeseries._epoch)
gpl-3.0
RuudBurger/CouchPotatoServer
libs/html5lib/treebuilders/_base.py
715
13699
from __future__ import absolute_import, division, unicode_literals from six import text_type from ..constants import scopingElements, tableInsertModeElements, namespaces # The scope markers are inserted when entering object elements, # marquees, table cells, and table captions, and are used to prevent formatting # from "leaking" into tables, object elements, and marquees. Marker = None listElementsMap = { None: (frozenset(scopingElements), False), "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), (namespaces["html"], "ul")])), False), "table": (frozenset([(namespaces["html"], "html"), (namespaces["html"], "table")]), False), "select": (frozenset([(namespaces["html"], "optgroup"), (namespaces["html"], "option")]), True) } class Node(object): def __init__(self, name): """Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node """ self.name = name self.parent = None self.value = None self.attributes = {} self.childNodes = [] self._flags = [] def __str__(self): attributesStr = " ".join(["%s=\"%s\"" % (name, value) for name, value in self.attributes.items()]) if attributesStr: return "<%s %s>" % (self.name, attributesStr) else: return "<%s>" % (self.name) def __repr__(self): return "<%s>" % (self.name) def appendChild(self, node): """Insert node as a child of the current node """ raise NotImplementedError def insertText(self, data, insertBefore=None): """Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. """ raise NotImplementedError def insertBefore(self, node, refNode): """Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current node""" raise NotImplementedError def removeChild(self, node): """Remove node from the children of the current node """ raise NotImplementedError def reparentChildren(self, newParent): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way """ # XXX - should this method be made more general? for child in self.childNodes: newParent.appendChild(child) self.childNodes = [] def cloneNode(self): """Return a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes """ raise NotImplementedError def hasContent(self): """Return true if the node has children or text, false otherwise """ raise NotImplementedError class ActiveFormattingElements(list): def append(self, node): equalCount = 0 if node != Marker: for element in self[::-1]: if element == Marker: break if self.nodesEqual(element, node): equalCount += 1 if equalCount == 3: self.remove(element) break list.append(self, node) def nodesEqual(self, node1, node2): if not node1.nameTuple == node2.nameTuple: return False if not node1.attributes == node2.attributes: return False return True class TreeBuilder(object): """Base treebuilder implementation documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes """ # Document class documentClass = None # The class to use for creating a node elementClass = None # The class to use for creating comments commentClass = None # The class to use for creating doctypes doctypeClass = None # Fragment class fragmentClass = None def __init__(self, namespaceHTMLElements): if namespaceHTMLElements: self.defaultNamespace = "http://www.w3.org/1999/xhtml" else: self.defaultNamespace = None self.reset() def reset(self): self.openElements = [] self.activeFormattingElements = ActiveFormattingElements() # XXX - rename these to headElement, formElement self.headPointer = None self.formPointer = None self.insertFromTable = False self.document = self.documentClass() def elementInScope(self, target, variant=None): # If we pass a node in we match that. if we pass a string # match any node with that name exactNode = hasattr(target, "nameTuple") listElements, invert = listElementsMap[variant] for node in reversed(self.openElements): if (node.name == target and not exactNode or node == target and exactNode): return True elif (invert ^ (node.nameTuple in listElements)): return False assert False # We should never reach this point def reconstructActiveFormattingElements(self): # Within this algorithm the order of steps described in the # specification is not quite the same as the order of steps in the # code. It should still do the same though. # Step 1: stop the algorithm when there's nothing to do. if not self.activeFormattingElements: return # Step 2 and step 3: we start with the last element. So i is -1. i = len(self.activeFormattingElements) - 1 entry = self.activeFormattingElements[i] if entry == Marker or entry in self.openElements: return # Step 6 while entry != Marker and entry not in self.openElements: if i == 0: # This will be reset to 0 below i = -1 break i -= 1 # Step 5: let entry be one earlier in the list. entry = self.activeFormattingElements[i] while True: # Step 7 i += 1 # Step 8 entry = self.activeFormattingElements[i] clone = entry.cloneNode() # Mainly to get a new copy of the attributes # Step 9 element = self.insertElement({"type": "StartTag", "name": clone.name, "namespace": clone.namespace, "data": clone.attributes}) # Step 10 self.activeFormattingElements[i] = element # Step 11 if element == self.activeFormattingElements[-1]: break def clearActiveFormattingElements(self): entry = self.activeFormattingElements.pop() while self.activeFormattingElements and entry != Marker: entry = self.activeFormattingElements.pop() def elementInActiveFormattingElements(self, name): """Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false""" for item in self.activeFormattingElements[::-1]: # Check for Marker first because if it's a Marker it doesn't have a # name attribute. if item == Marker: break elif item.name == name: return item return False def insertRoot(self, token): element = self.createElement(token) self.openElements.append(element) self.document.appendChild(element) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = self.doctypeClass(name, publicId, systemId) self.document.appendChild(doctype) def insertComment(self, token, parent=None): if parent is None: parent = self.openElements[-1] parent.appendChild(self.commentClass(token["data"])) def createElement(self, token): """Create an element but don't insert it anywhere""" name = token["name"] namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] return element def _getInsertFromTable(self): return self._insertFromTable def _setInsertFromTable(self, value): """Switch the function used to insert an element from the normal one to the misnested table one and back again""" self._insertFromTable = value if value: self.insertElement = self.insertElementTable else: self.insertElement = self.insertElementNormal insertFromTable = property(_getInsertFromTable, _setInsertFromTable) def insertElementNormal(self, token): name = token["name"] assert isinstance(name, text_type), "Element %s not unicode" % name namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] self.openElements[-1].appendChild(element) self.openElements.append(element) return element def insertElementTable(self, token): """Create an element and insert it into the tree""" element = self.createElement(token) if self.openElements[-1].name not in tableInsertModeElements: return self.insertElementNormal(token) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() if insertBefore is None: parent.appendChild(element) else: parent.insertBefore(element, insertBefore) self.openElements.append(element) return element def insertText(self, data, parent=None): """Insert text data.""" if parent is None: parent = self.openElements[-1] if (not self.insertFromTable or (self.insertFromTable and self.openElements[-1].name not in tableInsertModeElements)): parent.insertText(data) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() parent.insertText(data, insertBefore) def getTableMisnestedNodePosition(self): """Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node""" # The foster parent element is the one which comes before the most # recently opened table element # XXX - this is really inelegant lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::-1]: if elm.name == "table": lastTable = elm break if lastTable: # XXX - we should really check that this parent is actually a # node here if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[ self.openElements.index(lastTable) - 1] else: fosterParent = self.openElements[0] return fosterParent, insertBefore def generateImpliedEndTags(self, exclude=None): name = self.openElements[-1].name # XXX td, th and tr are not actually needed if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and name != exclude): self.openElements.pop() # XXX This is not entirely what the specification says. We should # investigate it more closely. self.generateImpliedEndTags(exclude) def getDocument(self): "Return the final tree" return self.document def getFragment(self): "Return the final fragment" # assert self.innerHTML fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) return fragment def testSerializer(self, node): """Serialize the subtree of node in the format required by unit tests node - the node from which to start serializing""" raise NotImplementedError
gpl-3.0
NeCTAR-RC/nova
nova/api/openstack/compute/legacy_v2/limits.py
12
15266
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. This module handles rate liming at a per-user level, so it should not be used to prevent intentional Denial of Service attacks, as we can assume a DOS can easily come through multiple user accounts. DOS protection should be done at a different layer. Instead this module should be used to protect against unintentional user actions. With that in mind the limits set here should be high enough as to not rate-limit any intentional actions. To find good rate-limit values, check how long requests are taking (see logs) in your environment to assess your capabilities and multiply out to get figures. NOTE: As the rate-limiting here is done in memory, this only works per process (each process will have its own rate limiting counter). """ import collections import copy import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import http_client as httplib import webob.dec import webob.exc from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.i18n import _ from nova import quota from nova import utils from nova import wsgi as base_wsgi QUOTAS = quota.QUOTAS LIMITS_PREFIX = "limits." class LimitsController(object): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['nova.context'] project_id = req.params.get('tenant_id', context.project_id) quotas = QUOTAS.get_project_quotas(context, project_id, usages=False) abs_limits = {k: v['limit'] for k, v in quotas.items()} rate_limits = req.environ.get("nova.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def create(self, req, body): """Create a new limit.""" raise webob.exc.HTTPNotImplemented() def delete(self, req, id): """Delete the limit.""" raise webob.exc.HTTPNotImplemented() def show(self, req, id): """Show limit information.""" raise webob.exc.HTTPNotImplemented() def update(self, req, id, body): """Update existing limit.""" raise webob.exc.HTTPNotImplemented() def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = {v: k for k, v in utils.TIME_UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']), Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("GET", "*changes-since*", ".*changes-since.*", 120, utils.TIME_UNITS['MINUTE']), Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']), Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware`. It wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("nova.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.RateLimitFault(msg, error, retry) req.environ["nova.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith(LIMITS_PREFIX): username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if not group.startswith('(') or not group.endswith(')'): raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in utils.TIME_UNITS: raise ValueError("Invalid units specified") unit = utils.TIME_UNITS[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/<username>`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = httplib.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return []
apache-2.0
isralopez/geonode
geonode/proxy/tests.py
5
2790
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### """ This file demonstrates two different styles of tests (one doctest and one unittest). These will both pass when you run "manage.py test". Replace these with more appropriate tests for your application. """ from django.contrib.auth import get_user_model from django.test import TestCase, Client from django.test.utils import override_settings TEST_DOMAIN = '.github.com' TEST_URL = 'https://help%s/' % TEST_DOMAIN class ProxyTest(TestCase): def setUp(self): self.admin, created = get_user_model().objects.get_or_create(username='admin', password='admin', is_superuser=True) # FIXME(Ariel): These tests do not work when the computer is offline. self.url = TEST_URL @override_settings(DEBUG=True, PROXY_ALLOWED_HOSTS=()) def test_validate_host_disabled_in_debug(self): """If PROXY_ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass the proxy.""" c = Client() response = c.get('/proxy?url=%s' % self.url, follow=True) self.assertEqual(response.status_code, 200) @override_settings(DEBUG=False, PROXY_ALLOWED_HOSTS=()) def test_validate_host_disabled_not_in_debug(self): """If PROXY_ALLOWED_HOSTS is empty and DEBUG is False requests should return 403.""" c = Client() response = c.get('/proxy?url=%s' % self.url, follow=True) self.assertEqual(response.status_code, 403) @override_settings(DEBUG=False, PROXY_ALLOWED_HOSTS=('.google.com',)) @override_settings(DEBUG=False, PROXY_ALLOWED_HOSTS=(TEST_DOMAIN,)) def test_proxy_allowed_host(self): """If PROXY_ALLOWED_HOSTS is empty and DEBUG is False requests should return 403.""" c = Client() response = c.get('/proxy?url=%s' % self.url, follow=True) self.assertEqual(response.status_code, 200)
gpl-3.0
anomam/pvlib-python
pvlib/tests/test_bifacial.py
2
5958
import pandas as pd import numpy as np from datetime import datetime from pvlib.bifacial import pvfactors_timeseries, PVFactorsReportBuilder from conftest import requires_pvfactors import pytest @requires_pvfactors @pytest.mark.parametrize('run_parallel_calculations', [False, True]) def test_pvfactors_timeseries(run_parallel_calculations): """ Test that pvfactors is functional, using the TLDR section inputs of the package github repo README.md file: https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start""" # Create some inputs timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11), datetime(2017, 8, 31, 12)] ).set_names('timestamps') solar_zenith = [20., 10.] solar_azimuth = [110., 140.] surface_tilt = [10., 0.] surface_azimuth = [90., 90.] axis_azimuth = 0. dni = [1000., 300.] dhi = [50., 500.] gcr = 0.4 pvrow_height = 1.75 pvrow_width = 2.44 albedo = 0.2 n_pvrows = 3 index_observed_pvrow = 1 rho_front_pvrow = 0.03 rho_back_pvrow = 0.05 horizon_band_angle = 15. # Expected values expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623], index=timestamps, name=('total_inc_front')) expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215], index=timestamps, name=('total_inc_back')) # Run calculation ipoa_front, ipoa_back = pvfactors_timeseries( solar_azimuth, solar_zenith, surface_azimuth, surface_tilt, axis_azimuth, timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo, n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow, rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow, horizon_band_angle=horizon_band_angle, run_parallel_calculations=run_parallel_calculations, n_workers_for_parallel_calcs=-1) pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front) pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back) @requires_pvfactors @pytest.mark.parametrize('run_parallel_calculations', [False, True]) def test_pvfactors_timeseries_pandas_inputs(run_parallel_calculations): """ Test that pvfactors is functional, using the TLDR section inputs of the package github repo README.md file, but converted to pandas Series: https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start""" # Create some inputs timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11), datetime(2017, 8, 31, 12)] ).set_names('timestamps') solar_zenith = pd.Series([20., 10.]) solar_azimuth = pd.Series([110., 140.]) surface_tilt = pd.Series([10., 0.]) surface_azimuth = pd.Series([90., 90.]) axis_azimuth = 0. dni = pd.Series([1000., 300.]) dhi = pd.Series([50., 500.]) gcr = 0.4 pvrow_height = 1.75 pvrow_width = 2.44 albedo = 0.2 n_pvrows = 3 index_observed_pvrow = 1 rho_front_pvrow = 0.03 rho_back_pvrow = 0.05 horizon_band_angle = 15. # Expected values expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623], index=timestamps, name=('total_inc_front')) expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215], index=timestamps, name=('total_inc_back')) # Run calculation ipoa_front, ipoa_back = pvfactors_timeseries( solar_azimuth, solar_zenith, surface_azimuth, surface_tilt, axis_azimuth, timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo, n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow, rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow, horizon_band_angle=horizon_band_angle, run_parallel_calculations=run_parallel_calculations, n_workers_for_parallel_calcs=-1) pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front) pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back) def test_build_1(): """Test that build correctly instantiates a dictionary, when passed a Nones for the report and pvarray arguments. """ report = None pvarray = None expected = {'total_inc_back': [np.nan], 'total_inc_front': [np.nan]} assert expected == PVFactorsReportBuilder.build(report, pvarray) def test_merge_1(): """Test that merge correctly returns the first element of the reports argument when there is only dictionary in reports. """ test_dict = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]} reports = [test_dict] assert test_dict == PVFactorsReportBuilder.merge(reports) def test_merge_2(): """Test that merge correctly combines two dictionary reports. """ test_dict_1 = {'total_inc_back': [1, 2], 'total_inc_front': [4, 5]} test_dict_2 = {'total_inc_back': [3], 'total_inc_front': [6]} expected = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]} reports = [test_dict_1, test_dict_2] assert expected == PVFactorsReportBuilder.merge(reports) def test_merge_3(): """Test that merge correctly combines three dictionary reports. """ test_dict_1 = {'total_inc_back': [1], 'total_inc_front': [4]} test_dict_2 = {'total_inc_back': [2], 'total_inc_front': [5]} test_dict_3 = {'total_inc_back': [3], 'total_inc_front': [6]} expected = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]} reports = [test_dict_1, test_dict_2, test_dict_3] assert expected == PVFactorsReportBuilder.merge(reports)
bsd-3-clause
dlazz/ansible
lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py
11
5704
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_backend_bucket_facts description: - Gather facts for GCP BackendBucket short_description: Gather facts for GCP BackendBucket version_added: 2.7 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: filters: description: - A list of filter value pairs. Available filters are listed here U(U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).) - Each additional filter in the list will act be added as an AND condition (filter1 and filter2) . extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: a backend bucket facts gcp_compute_backend_bucket_facts: filters: - name = test_object project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' items: description: List of items returned: always type: complex contains: bucketName: description: - Cloud Storage bucket name. returned: success type: str creationTimestamp: description: - Creation timestamp in RFC3339 text format. returned: success type: str description: description: - An optional textual description of the resource; provided by the client when the resource is created. returned: success type: str enableCdn: description: - If true, enable Cloud CDN for this BackendBucket. returned: success type: bool id: description: - Unique identifier for the resource. returned: success type: int name: description: - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule( argument_spec=dict( filters=dict(type='list', elements='str') ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] items = fetch_list(module, collection(module), query_options(module.params['filters'])) if items.get('items'): items = items.get('items') else: items = [] return_value = { 'items': items } module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendBuckets".format(**module.params) def fetch_list(module, link, query): auth = GcpSession(module, 'compute') response = auth.get(link, params={'filter': query}) return return_if_object(module, response) def query_options(filters): if not filters: return '' if len(filters) == 1: return filters[0] else: queries = [] for f in filters: # For multiple queries, all queries should have () if f[0] != '(' and f[-1] != ')': queries.append("(%s)" % ''.join(f)) else: queries.append(f) return ' '.join(queries) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
gpl-3.0
QiJune/Paddle
python/paddle/v2/dataset/tests/mnist_test.py
16
1421
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.v2.dataset.mnist import unittest class TestMNIST(unittest.TestCase): def check_reader(self, reader): sum = 0 label = 0 for l in reader(): self.assertEqual(l[0].size, 784) if l[1] > label: label = l[1] sum += 1 return sum, label def test_train(self): instances, max_label_value = self.check_reader( paddle.v2.dataset.mnist.train()) self.assertEqual(instances, 60000) self.assertEqual(max_label_value, 9) def test_test(self): instances, max_label_value = self.check_reader( paddle.v2.dataset.mnist.test()) self.assertEqual(instances, 10000) self.assertEqual(max_label_value, 9) if __name__ == '__main__': unittest.main()
apache-2.0
MotorolaMobilityLLC/external-chromium_org
chrome/tools/check_grd_for_unused_strings.py
25
6425
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Without any args, this simply loads the IDs out of a bunch of the Chrome GRD files, and then checks the subset of the code that loads the strings to try and figure out what isn't in use any more. You can give paths to GRD files and source directories to control what is check instead. """ import os import re import sys import xml.sax # Extra messages along the way # 1 - Print ids that are found in sources but not in the found id set # 2 - Files that aren't processes (don't match the source name regex) DEBUG = 0 class GrdIDExtractor(xml.sax.handler.ContentHandler): """Extracts the IDs from messages in GRIT files""" def __init__(self): self.id_set_ = set() def startElement(self, name, attrs): if name == 'message': self.id_set_.add(attrs['name']) def allIDs(self): """Return all the IDs found""" return self.id_set_.copy() def CheckForUnusedGrdIDsInSources(grd_files, src_dirs): """Will collect the message ids out of the given GRD files and then scan the source directories to try and figure out what ids are not currently being used by any source. grd_files: A list of GRD files to collect the ids from. src_dirs: A list of directories to walk looking for source files. """ # Collect all the ids into a large map all_ids = set() file_id_map = {} for y in grd_files: handler = GrdIDExtractor() xml.sax.parse(y, handler) files_ids = handler.allIDs() file_id_map[y] = files_ids all_ids |= files_ids # The regex that will be used to check sources id_regex = re.compile('IDS_[A-Z0-9_]+') # Make sure the regex matches every id found. got_err = False for x in all_ids: match = id_regex.search(x) if match is None: print 'ERROR: "%s" did not match our regex' % (x) got_err = True if not match.group(0) is x: print 'ERROR: "%s" did not fully match our regex' % (x) got_err = True if got_err: return 1 # The regex for deciding what is a source file src_regex = re.compile('\.(([chm])|(mm)|(cc)|(cp)|(cpp)|(xib)|(py))$') ids_left = all_ids.copy() # Scanning time. for src_dir in src_dirs: for root, dirs, files in os.walk(src_dir): # Remove svn directories from recursion if '.svn' in dirs: dirs.remove('.svn') for file in files: if src_regex.search(file.lower()): full_path = os.path.join(root, file) src_file_contents = open(full_path).read() for match in sorted(set(id_regex.findall(src_file_contents))): if match in ids_left: ids_left.remove(match) if DEBUG: if not match in all_ids: print '%s had "%s", which was not in the found IDs' % \ (full_path, match) elif DEBUG > 1: full_path = os.path.join(root, file) print 'Skipping %s.' % (full_path) # Anything left? if len(ids_left) > 0: print 'The following ids are in GRD files, but *appear* to be unused:' for file_path, file_ids in file_id_map.iteritems(): missing = ids_left.intersection(file_ids) if len(missing) > 0: print ' %s:' % (file_path) print '\n'.join(' %s' % (x) for x in sorted(missing)) return 0 def main(): # script lives in src/chrome/tools chrome_tools_dir = os.path.dirname(os.path.abspath(sys.argv[0])) src_dir = os.path.dirname(os.path.dirname(chrome_tools_dir)) # Collect the args into the right buckets src_dirs = [] grd_files = [] for arg in sys.argv[1:]: if arg.lower().endswith('.grd'): grd_files.append(arg) else: src_dirs.append(arg) # If no GRD files were given, default them: if len(grd_files) == 0: ash_base_dir = os.path.join(src_dir, 'ash') chrome_dir = os.path.join(src_dir, 'chrome') chrome_app_dir = os.path.join(chrome_dir, 'app') chrome_app_res_dir = os.path.join(chrome_app_dir, 'resources') device_base_dir = os.path.join(src_dir, 'device') ui_base_dir = os.path.join(src_dir, 'ui', 'base',) ui_base_strings_dir = os.path.join(ui_base_dir, 'strings') grd_files = [ os.path.join(ash_base_dir, 'ash_strings.grd'), os.path.join(ash_base_dir, 'resources', 'ash_resources.grd'), os.path.join(chrome_app_dir, 'chromium_strings.grd'), os.path.join(chrome_app_dir, 'generated_resources.grd'), os.path.join(chrome_app_dir, 'google_chrome_strings.grd'), os.path.join(chrome_app_res_dir, 'locale_settings.grd'), os.path.join(chrome_app_res_dir, 'locale_settings_chromiumos.grd'), os.path.join(chrome_app_res_dir, 'locale_settings_google_chromeos.grd'), os.path.join(chrome_app_res_dir, 'locale_settings_linux.grd'), os.path.join(chrome_app_res_dir, 'locale_settings_mac.grd'), os.path.join(chrome_app_res_dir, 'locale_settings_win.grd'), os.path.join(chrome_app_dir, 'theme', 'theme_resources.grd'), os.path.join(chrome_dir, 'browser', 'browser_resources.grd'), os.path.join(chrome_dir, 'common', 'common_resources.grd'), os.path.join(chrome_dir, 'renderer', 'resources', 'renderer_resources.grd'), os.path.join(device_base_dir, 'bluetooth', 'bluetooth_strings.grd'), os.path.join(src_dir, 'ui', 'resources', 'ui_resources.grd'), os.path.join(src_dir, 'ui', 'webui', 'resources', 'webui_resources.grd'), os.path.join(ui_base_strings_dir, 'app_locale_settings.grd'), os.path.join(ui_base_strings_dir, 'ui_strings.grd'), ] # If no source directories were given, default them: if len(src_dirs) == 0: src_dirs = [ os.path.join(src_dir, 'app'), os.path.join(src_dir, 'ash'), os.path.join(src_dir, 'chrome'), os.path.join(src_dir, 'chrome_frame'), os.path.join(src_dir, 'components'), os.path.join(src_dir, 'content'), os.path.join(src_dir, 'device'), os.path.join(src_dir, 'ui'), os.path.join(src_dir, 'views'), # nsNSSCertHelper.cpp has a bunch of ids os.path.join(src_dir, 'third_party', 'mozilla_security_manager'), os.path.join(chrome_dir, 'installer'), ] return CheckForUnusedGrdIDsInSources(grd_files, src_dirs) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
jobovy/galpy
galpy/potential/SpiralArmsPotential.py
1
29343
############################################################################### # SpiralArmsPotential.py: class that implements the spiral arms potential # from Cox and Gomez (2002) # # https://arxiv.org/abs/astro-ph/0207635 # # Phi(r, phi, z) = -4*pi*G*H*rho0*exp(-(r-r0)/Rs)*sum(Cn/(Kn*Dn)*cos(n*gamma)*sech(Kn*z/Bn)^Bn) ############################################################################### from __future__ import division from .Potential import Potential from ..util import conversion import numpy class SpiralArmsPotential(Potential): """Class that implements the spiral arms potential from (`Cox and Gomez 2002 <https://arxiv.org/abs/astro-ph/0207635>`__). Should be used to modulate an existing potential (density is positive in the arms, negative outside; note that because of this, a contour plot of this potential will appear to have twice as many arms, where half are the underdense regions). .. math:: \\Phi(R, \\phi, z) = -4 \\pi GH \\,\\rho_0 exp \\left( -\\frac{R-r_{ref}}{R_s} \\right) \\sum{\\frac{C_n}{K_n D_n} \\,\cos(n \\gamma) \\,\\mathrm{sech}^{B_n} \\left( \\frac{K_n z}{B_n} \\right)} where .. math:: K_n &= \\frac{n N}{R \sin(\\alpha)} \\\\ B_n &= K_n H (1 + 0.4 K_n H) \\\\ D_n &= \\frac{1 + K_n H + 0.3 (K_n H)^2}{1 + 0.3 K_n H} \\\\ and .. math:: \\gamma = N \\left[\\phi - \\phi_{ref} - \\frac{\\ln(R/r_{ref})}{\\tan(\\alpha)} \\right] The default of :math:`C_n=[1]` gives a sinusoidal profile for the potential. An alternative from `Cox and Gomez (2002) <https://arxiv.org/abs/astro-ph/0207635>`__ creates a density that behaves approximately as a cosine squared in the arms but is separated by a flat interarm region by setting .. math:: C_n = \\left[\\frac{8}{3 \\pi}\,,\\frac{1}{2} \\,, \\frac{8}{15 \\pi}\\right] """ normalize= property() # turn off normalize def __init__(self, amp=1, ro=None, vo=None, amp_units='density', N=2, alpha=0.2, r_ref=1, phi_ref=0, Rs=0.3, H=0.125, omega=0, Cs=[1]): """ NAME: __init__ PURPOSE: initialize a spiral arms potential INPUT: :amp: amplitude to be applied to the potential (default: 1); can be a Quantity with units of density. (:math:`amp = 4 \\pi G \\rho_0`) :ro: distance scales for translation into internal units (default from configuration file) :vo: velocity scales for translation into internal units (default from configuration file) :N: number of spiral arms :alpha: pitch angle of the logarithmic spiral arms in radians (can be Quantity) :r_ref: fiducial radius where :math:`\\rho = \\rho_0` (:math:`r_0` in the paper by Cox and Gomez) (can be Quantity) :phi_ref: reference angle (:math:`\\phi_p(r_0)` in the paper by Cox and Gomez) (can be Quantity) :Rs: radial scale length of the drop-off in density amplitude of the arms (can be Quantity) :H: scale height of the stellar arm perturbation (can be Quantity) :Cs: list of constants multiplying the :math:`\cos(n \\gamma)` terms :omega: rotational pattern speed of the spiral arms (can be Quantity) OUTPUT: (none) HISTORY: Started - 2017-05-12 Jack Hong (UBC) Completed - 2017-07-04 Jack Hong (UBC) """ Potential.__init__(self, amp=amp, ro=ro, vo=vo, amp_units=amp_units) alpha= conversion.parse_angle(alpha) r_ref= conversion.parse_length(r_ref,ro=self._ro) phi_ref= conversion.parse_angle(phi_ref) Rs= conversion.parse_length(Rs,ro=self._ro) H= conversion.parse_length(H,ro=self._ro) omega= conversion.parse_frequency(omega,ro=self._ro,vo=self._vo) self._N = -N # trick to flip to left handed coordinate system; flips sign for phi and phi_ref, but also alpha. self._alpha = -alpha # we don't want sign for alpha to change, so flip alpha. (see eqn. 3 in the paper) self._sin_alpha = numpy.sin(-alpha) self._tan_alpha = numpy.tan(-alpha) self._r_ref = r_ref self._phi_ref = phi_ref self._Rs = Rs self._H = H self._Cs = self._Cs0 = numpy.array(Cs) self._ns = self._ns0 = numpy.arange(1, len(Cs) + 1) self._omega = omega self._rho0 = 1 / (4 * numpy.pi) self._HNn = self._HNn0 = self._H * self._N * self._ns0 self.isNonAxi = True # Potential is not axisymmetric self.hasC = True # Potential has C implementation to speed up orbit integrations self.hasC_dxdv = True # Potential has C implementation of second derivatives def _evaluate(self, R, z, phi=0, t=0): """ NAME: _evaluate PURPOSE: Evaluate the potential at the given coordinates. (without the amp factor; handled by super class) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: Phi(R, z, phi, t) HISTORY: 2017-05-12 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) return -self._H * numpy.exp(-(R-self._r_ref) / self._Rs) \ * numpy.sum(self._Cs / Ks / Ds * numpy.cos(self._ns * self._gamma(R, phi - self._omega * t)) / numpy.cosh(Ks * z / Bs) ** Bs,axis=0) def _Rforce(self, R, z, phi=0, t=0): """ NAME: _Rforce PURPOSE: Evaluate the radial force for this potential at the given coordinates. (-dPhi/dR) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the radial force HISTORY: 2017-05-12 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 He = self._H * numpy.exp(-(R-self._r_ref)/self._Rs) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) dKs_dR = self._dK_dR(R) dBs_dR = self._dB_dR(R) dDs_dR = self._dD_dR(R) g = self._gamma(R, phi - self._omega * t) dg_dR = self._dgamma_dR(R) cos_ng = numpy.cos(self._ns * g) sin_ng = numpy.sin(self._ns * g) zKB = z * Ks / Bs sechzKB = 1 / numpy.cosh(zKB) return -He * numpy.sum(self._Cs * sechzKB**Bs / Ds * ((self._ns * dg_dR / Ks * sin_ng + cos_ng * (z * numpy.tanh(zKB) * (dKs_dR/Ks - dBs_dR/Bs) - dBs_dR / Ks * numpy.log(sechzKB) + dKs_dR / Ks**2 + dDs_dR / Ds / Ks)) + cos_ng / Ks / self._Rs),axis=0) def _zforce(self, R, z, phi=0, t=0): """ NAME: _zforce PURPOSE: Evaluate the vertical force for this potential at the given coordinates. (-dPhi/dz) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the vertical force HISTORY: 2017-05-25 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) zK_B = z * Ks / Bs return -self._H * numpy.exp(-(R-self._r_ref) / self._Rs) \ * numpy.sum(self._Cs / Ds * numpy.cos(self._ns * self._gamma(R, phi - self._omega * t)) * numpy.tanh(zK_B) / numpy.cosh(zK_B)**Bs,axis=0) def _phiforce(self, R, z, phi=0, t=0): """ NAME: _phiforce PURPOSE: Evaluate the azimuthal force in cylindrical coordinates. (-dPhi/dphi) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the azimuthal force HISTORY: 2017-05-25 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 g = self._gamma(R, phi - self._omega * t) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) return -self._H * numpy.exp(-(R-self._r_ref) / self._Rs) \ * numpy.sum(self._N * self._ns * self._Cs / Ds / Ks / numpy.cosh(z * Ks / Bs)**Bs * numpy.sin(self._ns * g),axis=0) def _R2deriv(self, R, z, phi=0, t=0): """ NAME: _R2deriv PURPOSE: Evaluate the second (cylindrical) radial derivative of the potential. (d^2 potential / d R^2) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the second radial derivative HISTORY: 2017-05-31 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 Rs = self._Rs He = self._H * numpy.exp(-(R-self._r_ref)/self._Rs) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) dKs_dR = self._dK_dR(R) dBs_dR = self._dB_dR(R) dDs_dR = self._dD_dR(R) R_sina = R * self._sin_alpha HNn_R_sina = self._HNn / R_sina HNn_R_sina_2 = HNn_R_sina**2 x = R * (0.3 * HNn_R_sina + 1) * self._sin_alpha d2Ks_dR2 = 2 * self._N * self._ns / R**3 / self._sin_alpha d2Bs_dR2 = HNn_R_sina / R**2 * (2.4 * HNn_R_sina + 2) d2Ds_dR2 = self._sin_alpha / R / x * (self._HNn* (0.18 * self._HNn * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x**2 + 2 / R_sina - 0.6 * HNn_R_sina * (1 + 0.6 * HNn_R_sina) / x - 0.6 * (HNn_R_sina + 0.3 * HNn_R_sina_2 + 1) / x + 1.8 * self._HNn / R_sina**2)) g = self._gamma(R, phi - self._omega * t) dg_dR = self._dgamma_dR(R) d2g_dR2 = self._N / R**2 / self._tan_alpha sin_ng = numpy.sin(self._ns * g) cos_ng = numpy.cos(self._ns * g) zKB = z * Ks / Bs sechzKB = 1 / numpy.cosh(zKB) sechzKB_Bs = sechzKB**Bs log_sechzKB = numpy.log(sechzKB) tanhzKB = numpy.tanh(zKB) ztanhzKB = z * tanhzKB return -He / Rs * (numpy.sum(self._Cs * sechzKB_Bs / Ds * ((self._ns * dg_dR / Ks * sin_ng + cos_ng * (ztanhzKB * (dKs_dR/Ks - dBs_dR/Bs) - dBs_dR / Ks * log_sechzKB + dKs_dR / Ks**2 + dDs_dR / Ds / Ks)) - (Rs * (1 / Ks * ((ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR) + log_sechzKB * dBs_dR) - dDs_dR / Ds) * (self._ns * dg_dR * sin_ng + cos_ng * (ztanhzKB * Ks * (dKs_dR/Ks - dBs_dR/Bs) - dBs_dR * log_sechzKB + dKs_dR / Ks + dDs_dR / Ds)) + (self._ns * (sin_ng * (d2g_dR2 / Ks - dg_dR / Ks**2 * dKs_dR) + dg_dR**2 / Ks * cos_ng * self._ns) + z * (-sin_ng * self._ns * dg_dR * tanhzKB * (dKs_dR/Ks - dBs_dR/Bs) + cos_ng * (z * (dKs_dR/Bs - dBs_dR/Bs**2 * Ks) * (1-tanhzKB**2) * (dKs_dR/Ks - dBs_dR/Bs) + tanhzKB * (d2Ks_dR2/Ks-(dKs_dR/Ks)**2 - d2Bs_dR2/Bs + (dBs_dR/Bs)**2))) + (cos_ng * (dBs_dR/Ks * ztanhzKB * (dKs_dR/Bs - dBs_dR/Bs**2*Ks) -(d2Bs_dR2/Ks-dBs_dR*dKs_dR/Ks**2) * log_sechzKB) + dBs_dR/Ks * log_sechzKB * sin_ng * self._ns * dg_dR) + ((cos_ng * (d2Ks_dR2 / Ks**2 - 2 * dKs_dR**2 / Ks**3) - dKs_dR / Ks**2 * sin_ng * self._ns * dg_dR) + (cos_ng * (d2Ds_dR2 / Ds / Ks - (dDs_dR/Ds)**2 / Ks - dDs_dR / Ds / Ks**2 * dKs_dR) - sin_ng * self._ns * dg_dR * dDs_dR / Ds / Ks)))) - 1 / Ks * (cos_ng / Rs + (cos_ng * ((dDs_dR * Ks + Ds * dKs_dR) / (Ds * Ks) - (ztanhzKB * (dBs_dR / Bs * Ks - dKs_dR) + log_sechzKB * dBs_dR)) + sin_ng * self._ns * dg_dR)))),axis=0)) def _z2deriv(self, R, z, phi=0, t=0): """ NAME: _z2deriv PURPOSE: Evaluate the second (cylindrical) vertical derivative of the potential. (d^2 potential / d z^2) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the second vertical derivative HISTORY: 2017-05-26 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 g = self._gamma(R, phi - self._omega * t) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) zKB = z * Ks / Bs tanh2_zKB = numpy.tanh(zKB)**2 return -self._H * numpy.exp(-(R-self._r_ref)/self._Rs) \ * numpy.sum(self._Cs * Ks / Ds * ((tanh2_zKB - 1) / Bs + tanh2_zKB) * numpy.cos(self._ns * g) / numpy.cosh(zKB)**Bs,axis=0) def _phi2deriv(self, R, z, phi=0, t=0): """ NAME: _phi2deriv PURPOSE: Evaluate the second azimuthal derivative of the potential in cylindrical coordinates. (d^2 potential / d phi^2) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: d^2 potential / d phi^2 HISTORY: 2017-05-29 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 g = self._gamma(R, phi - self._omega * t) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) return self._H * numpy.exp(-(R-self._r_ref) / self._Rs) \ * numpy.sum(self._Cs * self._N**2. * self._ns**2. / Ds / Ks / numpy.cosh(z*Ks/Bs)**Bs * numpy.cos(self._ns*g),axis=0) def _Rzderiv(self, R, z, phi=0., t=0.): """ NAME: _Rzderiv PURPOSE: Evaluate the mixed (cylindrical) radial and vertical derivative of the potential (d^2 potential / dR dz). INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: d^2 potential / dR dz HISTORY: 2017-05-12 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 Rs = self._Rs He = self._H * numpy.exp(-(R-self._r_ref)/self._Rs) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) dKs_dR = self._dK_dR(R) dBs_dR = self._dB_dR(R) dDs_dR = self._dD_dR(R) g = self._gamma(R, phi - self._omega * t) dg_dR = self._dgamma_dR(R) cos_ng = numpy.cos(self._ns * g) sin_ng = numpy.sin(self._ns * g) zKB = z * Ks / Bs sechzKB = 1 / numpy.cosh(zKB) sechzKB_Bs = sechzKB**Bs log_sechzKB = numpy.log(sechzKB) tanhzKB = numpy.tanh(zKB) return - He * numpy.sum(sechzKB_Bs * self._Cs / Ds * (Ks * tanhzKB * (self._ns * dg_dR / Ks * sin_ng + cos_ng * (z * tanhzKB * (dKs_dR/Ks - dBs_dR/Bs) - dBs_dR / Ks * log_sechzKB + dKs_dR / Ks**2 + dDs_dR / Ds / Ks)) - cos_ng * ((zKB * (dKs_dR/Ks - dBs_dR/Bs) * (1 - tanhzKB**2) + tanhzKB * (dKs_dR/Ks - dBs_dR/Bs) + dBs_dR / Bs * tanhzKB) - tanhzKB / Rs)),axis=0) def _Rphideriv(self, R, z, phi=0,t=0): """ NAME: _Rphideriv PURPOSE: Return the mixed radial and azimuthal derivative of the potential in cylindrical coordinates (d^2 potential / dR dphi) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the mixed radial and azimuthal derivative HISTORY: 2017-06-09 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 He = self._H * numpy.exp(-(R - self._r_ref) / self._Rs) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) dKs_dR = self._dK_dR(R) dBs_dR = self._dB_dR(R) dDs_dR = self._dD_dR(R) g = self._gamma(R, phi - self._omega * t) dg_dR = self._dgamma_dR(R) cos_ng = numpy.cos(self._ns * g) sin_ng = numpy.sin(self._ns * g) zKB = z * Ks / Bs sechzKB = 1 / numpy.cosh(zKB) sechzKB_Bs = sechzKB ** Bs return - He * numpy.sum(self._Cs * sechzKB_Bs / Ds * self._ns * self._N * (- self._ns * dg_dR / Ks * cos_ng + sin_ng * (z * numpy.tanh(zKB) * (dKs_dR / Ks - dBs_dR / Bs) + 1/Ks * (-dBs_dR * numpy.log(sechzKB) + dKs_dR / Ks + dDs_dR / Ds + 1 / self._Rs))),axis=0) def _phizderiv(self, R, z, phi=0, t=0): """ NAME: _phizderiv PURPOSE: Evaluate the mixed azimuthal, vertical derivative for this potential at the given coordinates. (-dPhi/dz) INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: mixed azimuthal, vertical derivative HISTORY: 2021-04-30 - Jo Bovy (UofT) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) zK_B = z * Ks / Bs return -self._H * numpy.exp(-(R-self._r_ref) / self._Rs) \ * numpy.sum(self._Cs / Ds * self._ns * self._N * numpy.sin(self._ns * self._gamma(R, phi - self._omega * t)) * numpy.tanh(zK_B) / numpy.cosh(zK_B)**Bs,axis=0) def _dens(self, R, z, phi=0, t=0): """ NAME: _dens PURPOSE: Evaluate the density. If not given, the density is computed using the Poisson equation from the first and second derivatives of the potential (if all are implemented). INPUT: :param R: galactocentric cylindrical radius :param z: vertical height :param phi: azimuth :param t: time OUTPUT: :return: the density HISTORY: 2017-05-12 Jack Hong (UBC) """ if isinstance(R,numpy.ndarray) or isinstance(z,numpy.ndarray): nR= len(R) if isinstance(R,numpy.ndarray) else len(z) self._Cs=numpy.transpose(numpy.array([self._Cs0,]*nR)) self._ns=numpy.transpose(numpy.array([self._ns0,]*nR)) self._HNn=numpy.transpose(numpy.array([self._HNn0,]*nR)) else: self._Cs=self._Cs0 self._ns=self._ns0 self._HNn=self._HNn0 g = self._gamma(R, phi - self._omega * t) Ks = self._K(R) Bs = self._B(R) Ds = self._D(R) ng = self._ns * g zKB = z * Ks / Bs sech_zKB = 1 / numpy.cosh(zKB) tanh_zKB = numpy.tanh(zKB) log_sech_zKB = numpy.log(sech_zKB) # numpy of E as defined in the appendix of the paper. E = 1 + Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2) - R / self._Rs \ - (Ks * self._H) * (1 + 0.8 * Ks * self._H) * log_sech_zKB \ - 0.4 * (Ks * self._H) ** 2 * zKB * tanh_zKB # numpy array of rE' as define in the appendix of the paper. rE = -Ks * self._H / Ds * (1 - 0.3 * (1 - 0.3 * Ks * self._H) / (1 + 0.3 * Ks * self._H) ** 3) \ + (Ks * self._H / Ds * (1 - 0.3 / (1 + 0.3 * Ks * self._H) ** 2)) - R / self._Rs \ + Ks * self._H * (1 + 1.6 * Ks * self._H) * log_sech_zKB \ - (0.4 * (Ks * self._H) ** 2 * zKB * sech_zKB) ** 2 / Bs \ + 1.2 * (Ks * self._H) ** 2 * zKB * tanh_zKB return numpy.sum(self._Cs * self._rho0 * (self._H / (Ds * R)) * numpy.exp(-(R - self._r_ref) / self._Rs) * sech_zKB**Bs * (numpy.cos(ng) * (Ks * R * (Bs + 1) / Bs * sech_zKB**2 - 1 / Ks / R * (E**2 + rE)) - 2 * numpy.sin(ng)* E * numpy.cos(self._alpha)),axis=0) def OmegaP(self): """ NAME: OmegaP PURPOSE: Return the pattern speed. (used to compute the Jacobi integral for orbits). INPUT: :param self OUTPUT: :return: the pattern speed HISTORY: 2017-06-09 Jack Hong (UBC) """ return self._omega def _gamma(self, R, phi): """Return gamma. (eqn 3 in the paper)""" return self._N * (phi - self._phi_ref - numpy.log(R / self._r_ref) / self._tan_alpha) def _dgamma_dR(self, R): """Return the first derivative of gamma wrt R.""" return -self._N / R / self._tan_alpha def _K(self, R): """Return numpy array from K1 up to and including Kn. (eqn. 5)""" return self._ns * self._N / R / self._sin_alpha def _dK_dR(self, R): """Return numpy array of dK/dR from K1 up to and including Kn.""" return -self._ns * self._N / R**2 / self._sin_alpha def _B(self, R): """Return numpy array from B1 up to and including Bn. (eqn. 6)""" HNn_R = self._HNn / R return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1) def _dB_dR(self, R): """Return numpy array of dB/dR from B1 up to and including Bn.""" return -self._HNn / R**3 / self._sin_alpha**2 * (0.8 * self._HNn + R * self._sin_alpha) def _D(self, R): """Return numpy array from D1 up to and including Dn. (eqn. 7)""" return (0.3 * self._HNn**2 / self._sin_alpha / R + self._HNn + R * self._sin_alpha) / (0.3 * self._HNn + R * self._sin_alpha) def _dD_dR(self, R): """Return numpy array of dD/dR from D1 up to and including Dn.""" HNn_R_sina = self._HNn / R / self._sin_alpha return HNn_R_sina * (0.3 * (HNn_R_sina + 0.3 * HNn_R_sina**2. + 1) / R / (0.3 * HNn_R_sina + 1)**2 - (1/R * (1 + 0.6 * HNn_R_sina) / (0.3 * HNn_R_sina + 1)))
bsd-3-clause
wemanuel/smry
server-auth/ls/google-cloud-sdk/lib/protorpc/transport.py
24
12805
#!/usr/bin/env python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Transport library for ProtoRPC. Contains underlying infrastructure used for communicating RPCs over low level transports such as HTTP. Includes HTTP transport built over urllib2. """ import httplib import logging import os import socket import sys import urlparse from . import messages from . import protobuf from . import remote from . import util __all__ = [ 'RpcStateError', 'HttpTransport', 'LocalTransport', 'Rpc', 'Transport', ] class RpcStateError(messages.Error): """Raised when trying to put RPC in to an invalid state.""" class Rpc(object): """Represents a client side RPC. An RPC is created by the transport class and is used with a single RPC. While an RPC is still in process, the response is set to None. When it is complete the response will contain the response message. """ def __init__(self, request): """Constructor. Args: request: Request associated with this RPC. """ self.__request = request self.__response = None self.__state = remote.RpcState.RUNNING self.__error_message = None self.__error_name = None @property def request(self): """Request associated with RPC.""" return self.__request @property def response(self): """Response associated with RPC.""" self.wait() self.__check_status() return self.__response @property def state(self): """State associated with RPC.""" return self.__state @property def error_message(self): """Error, if any, associated with RPC.""" self.wait() return self.__error_message @property def error_name(self): """Error name, if any, associated with RPC.""" self.wait() return self.__error_name def wait(self): """Wait for an RPC to finish.""" if self.__state == remote.RpcState.RUNNING: self._wait_impl() def _wait_impl(self): """Implementation for wait().""" raise NotImplementedError() def __check_status(self): error_class = remote.RpcError.from_state(self.__state) if error_class is not None: if error_class is remote.ApplicationError: raise error_class(self.__error_message, self.__error_name) else: raise error_class(self.__error_message) def __set_state(self, state, error_message=None, error_name=None): if self.__state != remote.RpcState.RUNNING: raise RpcStateError( 'RPC must be in RUNNING state to change to %s' % state) if state == remote.RpcState.RUNNING: raise RpcStateError('RPC is already in RUNNING state') self.__state = state self.__error_message = error_message self.__error_name = error_name def set_response(self, response): # TODO: Even more specific type checking. if not isinstance(response, messages.Message): raise TypeError('Expected Message type, received %r' % (response)) self.__response = response self.__set_state(remote.RpcState.OK) def set_status(self, status): status.check_initialized() self.__set_state(status.state, status.error_message, status.error_name) class Transport(object): """Transport base class. Provides basic support for implementing a ProtoRPC transport such as one that can send and receive messages over HTTP. Implementations override _start_rpc. This method receives a RemoteInfo instance and a request Message. The transport is expected to set the rpc response or raise an exception before termination. """ @util.positional(1) def __init__(self, protocol=protobuf): """Constructor. Args: protocol: If string, will look up a protocol from the default Protocols instance by name. Can also be an instance of remote.ProtocolConfig. If neither, it must be an object that implements a protocol interface by implementing encode_message, decode_message and set CONTENT_TYPE. For example, the modules protobuf and protojson can be used directly. """ if isinstance(protocol, basestring): protocols = remote.Protocols.get_default() try: protocol = protocols.lookup_by_name(protocol) except KeyError: protocol = protocols.lookup_by_content_type(protocol) if isinstance(protocol, remote.ProtocolConfig): self.__protocol = protocol.protocol self.__protocol_config = protocol else: self.__protocol = protocol self.__protocol_config = remote.ProtocolConfig( protocol, 'default', default_content_type=protocol.CONTENT_TYPE) @property def protocol(self): """Protocol associated with this transport.""" return self.__protocol @property def protocol_config(self): """Protocol associated with this transport.""" return self.__protocol_config def send_rpc(self, remote_info, request): """Initiate sending an RPC over the transport. Args: remote_info: RemoteInfo instance describing remote method. request: Request message to send to service. Returns: An Rpc instance intialized with the request.. """ request.check_initialized() rpc = self._start_rpc(remote_info, request) return rpc def _start_rpc(self, remote_info, request): """Start a remote procedure call. Args: remote_info: RemoteInfo instance describing remote method. request: Request message to send to service. Returns: An Rpc instance initialized with the request. """ raise NotImplementedError() class HttpTransport(Transport): """Transport for communicating with HTTP servers.""" @util.positional(2) def __init__(self, service_url, protocol=protobuf): """Constructor. Args: service_url: URL where the service is located. All communication via the transport will go to this URL. protocol: The protocol implementation. Must implement encode_message and decode_message. Can also be an instance of remote.ProtocolConfig. """ super(HttpTransport, self).__init__(protocol=protocol) self.__service_url = service_url def __get_rpc_status(self, response, content): """Get RPC status from HTTP response. Args: response: HTTPResponse object. content: Content read from HTTP response. Returns: RpcStatus object parsed from response, else an RpcStatus with a generic HTTP error. """ # Status above 400 may have RpcStatus content. if response.status >= 400: content_type = response.getheader('content-type') if content_type == self.protocol_config.default_content_type: try: rpc_status = self.protocol.decode_message(remote.RpcStatus, content) except Exception, decode_err: logging.warning( 'An error occurred trying to parse status: %s\n%s', str(decode_err), content) else: if rpc_status.is_initialized(): return rpc_status else: logging.warning( 'Body does not result in an initialized RpcStatus message:\n%s', content) # If no RpcStatus message present, attempt to forward any content. If empty # use standard error message. if not content.strip(): content = httplib.responses.get(response.status, 'Unknown Error') return remote.RpcStatus(state=remote.RpcState.SERVER_ERROR, error_message='HTTP Error %s: %s' % ( response.status, content or 'Unknown Error')) def __set_response(self, remote_info, connection, rpc): """Set response on RPC. Sets response or status from HTTP request. Implements the wait method of Rpc instance. Args: remote_info: Remote info for invoked RPC. connection: HTTPConnection that is making request. rpc: Rpc instance. """ try: response = connection.getresponse() content = response.read() if response.status == httplib.OK: response = self.protocol.decode_message(remote_info.response_type, content) rpc.set_response(response) else: status = self.__get_rpc_status(response, content) rpc.set_status(status) finally: connection.close() def _start_rpc(self, remote_info, request): """Start a remote procedure call. Args: remote_info: A RemoteInfo instance for this RPC. request: The request message for this RPC. Returns: An Rpc instance initialized with a Request. """ method_url = '%s.%s' % (self.__service_url, remote_info.method.func_name) encoded_request = self.protocol.encode_message(request) url = urlparse.urlparse(method_url) if url.scheme == 'https': connection_type = httplib.HTTPSConnection else: connection_type = httplib.HTTPConnection connection = connection_type(url.hostname, url.port) try: self._send_http_request(connection, url.path, encoded_request) rpc = Rpc(request) except remote.RpcError: # Pass through all ProtoRPC errors connection.close() raise except socket.error, err: connection.close() raise remote.NetworkError('Socket error: %s %r' % (type(err).__name__, err.args), err) except Exception, err: connection.close() raise remote.NetworkError('Error communicating with HTTP server', err) else: wait_impl = lambda: self.__set_response(remote_info, connection, rpc) rpc._wait_impl = wait_impl return rpc def _send_http_request(self, connection, http_path, encoded_request): connection.request( 'POST', http_path, encoded_request, headers={'Content-type': self.protocol_config.default_content_type, 'Content-length': len(encoded_request)}) class LocalTransport(Transport): """Local transport that sends messages directly to services. Useful in tests or creating code that can work with either local or remote services. Using LocalTransport is preferrable to simply instantiating a single instance of a service and reusing it. The entire request process involves instantiating a new instance of a service, initializing it with request state and then invoking the remote method for every request. """ def __init__(self, service_factory): """Constructor. Args: service_factory: Service factory or class. """ super(LocalTransport, self).__init__() self.__service_class = getattr(service_factory, 'service_class', service_factory) self.__service_factory = service_factory @property def service_class(self): return self.__service_class @property def service_factory(self): return self.__service_factory def _start_rpc(self, remote_info, request): """Start a remote procedure call. Args: remote_info: RemoteInfo instance describing remote method. request: Request message to send to service. Returns: An Rpc instance initialized with the request. """ rpc = Rpc(request) def wait_impl(): instance = self.__service_factory() try: initalize_request_state = instance.initialize_request_state except AttributeError: pass else: host = unicode(os.uname()[1]) initalize_request_state(remote.RequestState(remote_host=host, remote_address=u'127.0.0.1', server_host=host, server_port=-1)) try: response = remote_info.method(instance, request) assert isinstance(response, remote_info.response_type) except remote.ApplicationError: raise except: exc_type, exc_value, traceback = sys.exc_info() message = 'Unexpected error %s: %s' % (exc_type.__name__, exc_value) raise remote.ServerError, message, traceback rpc.set_response(response) rpc._wait_impl = wait_impl return rpc
apache-2.0
bswartz/cinder
cinder/tests/unit/api/contrib/test_services.py
1
27106
# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from iso8601 import iso8601 import mock import webob.exc from cinder.api.contrib import services from cinder.api import extensions from cinder import context from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake fake_services_list = [ {'binary': 'cinder-scheduler', 'host': 'host1', 'availability_zone': 'cinder', 'id': 1, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), 'disabled_reason': 'test1', 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host1', 'availability_zone': 'cinder', 'id': 2, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), 'disabled_reason': 'test2', 'modified_at': ''}, {'binary': 'cinder-scheduler', 'host': 'host2', 'availability_zone': 'cinder', 'id': 3, 'disabled': False, 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host2', 'availability_zone': 'cinder', 'id': 4, 'disabled': True, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': 'test4', 'modified_at': ''}, {'binary': 'cinder-volume', 'host': 'host2', 'availability_zone': 'cinder', 'id': 5, 'disabled': True, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': 'test5', 'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'availability_zone': 'cinder', 'id': 6, 'disabled': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'availability_zone': 'cinder', 'id': 6, 'disabled': False, 'updated_at': None, 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': None}, ] class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} GET = {} # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. class FakeRequestWithService(object): environ = {"cinder.context": context.get_admin_context()} GET = {"service": "cinder-volume"} class FakeRequestWithBinary(object): environ = {"cinder.context": context.get_admin_context()} GET = {"binary": "cinder-volume"} class FakeRequestWithHost(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1"} # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. class FakeRequestWithHostService(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "service": "cinder-volume"} class FakeRequestWithHostBinary(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "binary": "cinder-volume"} def fake_service_get_all(context, **filters): result = [] host = filters.pop('host', None) for service in fake_services_list: if (host and service['host'] != host and not service['host'].startswith(host + '@')): continue if all(v is None or service.get(k) == v for k, v in filters.items()): result.append(service) return result def fake_service_get(context, service_id=None, **filters): result = fake_service_get_all(context, id=service_id, **filters) if not result: raise exception.ServiceNotFound(service_id=service_id) return result[0] def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': 'cinder-volume', 'disabled': values['disabled']} def fake_policy_enforce(context, action, target): pass def fake_utcnow(with_timezone=False): tzinfo = iso8601.Utc() if with_timezone else None return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo) @mock.patch('cinder.db.service_get_all', fake_service_get_all) @mock.patch('cinder.db.service_get', fake_service_get) @mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) @mock.patch('cinder.db.sqlalchemy.api.service_update', fake_service_update) @mock.patch('cinder.policy.enforce', fake_policy_enforce) class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = services.ServiceController(self.ext_mgr) def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None}, ]} self.assertEqual(response, res_dict) def test_services_detail(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34), 'disabled_reason': ''}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test5'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'disabled_reason': ''}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None, 'disabled_reason': ''}, ]} self.assertEqual(response, res_dict) def test_services_list_with_host(self): req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(response, res_dict) def test_services_detail_with_host(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'cinder-volume', 'frozen': False, 'replication_status': None, 'active_backend_id': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self.assertEqual(response, res_dict) def test_services_list_with_service(self): req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(response, res_dict) def test_services_detail_with_service(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test5'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': ''}]} self.assertEqual(response, res_dict) def test_services_list_with_binary(self): req = FakeRequestWithBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(response, res_dict) def test_services_detail_with_binary(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'frozen': False, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test5'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': ''}]} self.assertEqual(response, res_dict) def test_services_list_with_host_service(self): req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(response, res_dict) def test_services_detail_with_host_service(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2', 'frozen': False}]} self.assertEqual(response, res_dict) def test_services_list_with_host_binary(self): req = FakeRequestWithHostBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(response, res_dict) def test_services_detail_with_host_binary(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithHostBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self.assertEqual(response, res_dict) def test_services_enable_with_service_key(self): body = {'host': 'host1', 'service': 'cinder-volume'} req = fakes.HTTPRequest.blank( '/v2/%s/os-services/enable' % fake.PROJECT_ID) res_dict = self.controller.update(req, "enable", body) self.assertEqual('enabled', res_dict['status']) def test_services_enable_with_binary_key(self): body = {'host': 'host1', 'binary': 'cinder-volume'} req = fakes.HTTPRequest.blank( '/v2/%s/os-services/enable' % fake.PROJECT_ID) res_dict = self.controller.update(req, "enable", body) self.assertEqual('enabled', res_dict['status']) def test_services_disable_with_service_key(self): req = fakes.HTTPRequest.blank( '/v2/%s/os-services/disable' % fake.PROJECT_ID) body = {'host': 'host1', 'service': 'cinder-volume'} res_dict = self.controller.update(req, "disable", body) self.assertEqual('disabled', res_dict['status']) def test_services_disable_with_binary_key(self): req = fakes.HTTPRequest.blank( '/v2/%s/os-services/disable' % fake.PROJECT_ID) body = {'host': 'host1', 'binary': 'cinder-volume'} res_dict = self.controller.update(req, "disable", body) self.assertEqual('disabled', res_dict['status']) def test_services_disable_log_reason(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-scheduler', 'disabled_reason': 'test-reason', } res_dict = self.controller.update(req, "disable-log-reason", body) self.assertEqual('disabled', res_dict['status']) self.assertEqual('test-reason', res_dict['disabled_reason']) def test_services_disable_log_reason_none(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-scheduler', 'disabled_reason': None, } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, "disable-log-reason", body) def test_invalid_reason_field(self): # Check that empty strings are not allowed reason = ' ' * 10 self.assertFalse(self.controller._is_valid_as_reason(reason)) reason = 'a' * 256 self.assertFalse(self.controller._is_valid_as_reason(reason)) # Check that spaces at the end are also counted reason = 'a' * 255 + ' ' self.assertFalse(self.controller._is_valid_as_reason(reason)) reason = 'it\'s a valid reason.' self.assertTrue(self.controller._is_valid_as_reason(reason)) reason = None self.assertFalse(self.controller._is_valid_as_reason(reason))
apache-2.0
sumspr/scikit-learn
benchmarks/bench_plot_ward.py
290
1260
""" Benchmark scikit-learn's Ward implement compared to SciPy's """ import time import numpy as np from scipy.cluster import hierarchy import pylab as pl from sklearn.cluster import AgglomerativeClustering ward = AgglomerativeClustering(n_clusters=3, linkage='ward') n_samples = np.logspace(.5, 3, 9) n_features = np.logspace(1, 3.5, 7) N_samples, N_features = np.meshgrid(n_samples, n_features) scikits_time = np.zeros(N_samples.shape) scipy_time = np.zeros(N_samples.shape) for i, n in enumerate(n_samples): for j, p in enumerate(n_features): X = np.random.normal(size=(n, p)) t0 = time.time() ward.fit(X) scikits_time[j, i] = time.time() - t0 t0 = time.time() hierarchy.ward(X) scipy_time[j, i] = time.time() - t0 ratio = scikits_time / scipy_time pl.figure("scikit-learn Ward's method benchmark results") pl.imshow(np.log(ratio), aspect='auto', origin="lower") pl.colorbar() pl.contour(ratio, levels=[1, ], colors='k') pl.yticks(range(len(n_features)), n_features.astype(np.int)) pl.ylabel('N features') pl.xticks(range(len(n_samples)), n_samples.astype(np.int)) pl.xlabel('N samples') pl.title("Scikit's time, in units of scipy time (log)") pl.show()
bsd-3-clause
slohse/ansible
lib/ansible/modules/cloud/google/gcp_compute_instance_group_manager.py
12
22222
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_instance_group_manager description: - Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances. - A managed instance group can have up to 1000 VM instances per group. short_description: Creates a GCP InstanceGroupManager version_added: 2.6 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: ['present', 'absent'] default: 'present' base_instance_name: description: - The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. - The base instance name must comply with RFC1035. required: true description: description: - An optional description of this resource. Provide this property when you create the resource. required: false instance_template: description: - The instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. required: true name: description: - The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035. required: true named_ports: description: - Named ports configured for the Instance Groups complementary to this Instance Group Manager. required: false suboptions: name: description: - The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. required: false port: description: - The port number, which can be a value between 1 and 65535. required: false target_pools: description: - TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group. required: false target_size: description: - The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number. required: false zone: description: - The zone the managed instance group resides. required: true extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: create a network gcp_compute_network: name: "network-instancetemplate" project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: network - name: create a address gcp_compute_address: name: "address-instancetemplate" region: us-west1 project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: address - name: create a instance template gcp_compute_instance_template: name: "{{ resource_name }}" properties: disks: - auto_delete: true boot: true initialize_params: source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts machine_type: n1-standard-1 network_interfaces: - network: "{{ network }}" access_configs: - name: test-config type: ONE_TO_ONE_NAT nat_ip: "{{ address }}" project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: instancetemplate - name: create a instance group manager gcp_compute_instance_group_manager: name: "test_object" base_instance_name: test1-child instance_template: "{{ instancetemplate }}" target_size: 3 zone: us-west1-a project: "test_project" auth_kind: "service_account" service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' base_instance_name: description: - The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. - The base instance name must comply with RFC1035. returned: success type: str creation_timestamp: description: - The creation timestamp for this managed instance group in RFC3339 text format. returned: success type: str current_actions: description: - The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions. returned: success type: complex contains: abandoning: description: - The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it. returned: success type: int creating: description: - The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully. - If you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated. returned: success type: int creating_without_retries: description: - The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's targetSize value accordingly. returned: success type: int deleting: description: - The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted. returned: success type: int none: description: - The number of instances in the managed instance group that are running and have no scheduled actions. returned: success type: int recreating: description: - The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. - Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template. returned: success type: int refreshing: description: - The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance. returned: success type: int restarting: description: - The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted. returned: success type: int description: description: - An optional description of this resource. Provide this property when you create the resource. returned: success type: str id: description: - A unique identifier for this resource. returned: success type: int instance_group: description: - The instance group being managed. returned: success type: dict instance_template: description: - The instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group. returned: success type: dict name: description: - The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str named_ports: description: - Named ports configured for the Instance Groups complementary to this Instance Group Manager. returned: success type: complex contains: name: description: - The name for this named port. The name must be 1-63 characters long, and comply with RFC1035. returned: success type: str port: description: - The port number, which can be a value between 1 and 65535. returned: success type: int region: description: - The region this managed instance group resides (for regional resources). returned: success type: str target_pools: description: - TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group. returned: success type: list target_size: description: - The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number. returned: success type: int zone: description: - The zone the managed instance group resides. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json import re import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), base_instance_name=dict(required=True, type='str'), description=dict(type='str'), instance_template=dict(required=True, type='dict'), name=dict(required=True, type='str'), named_ports=dict(type='list', elements='dict', options=dict( name=dict(type='str'), port=dict(type='int') )), target_pools=dict(type='list', elements='dict'), target_size=dict(type='int'), zone=dict(required=True, type='str') ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] state = module.params['state'] kind = 'compute#instanceGroupManager' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): fetch = update(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.put(link, resource_to_request(module))) def delete(module, link, kind): auth = GcpSession(module, 'compute') return wait_for_operation(module, auth.delete(link)) def resource_to_request(module): request = { u'kind': 'compute#instanceGroupManager', u'baseInstanceName': module.params.get('base_instance_name'), u'description': module.params.get('description'), u'instanceTemplate': replace_resource_dict(module.params.get(u'instance_template', {}), 'selfLink'), u'name': module.params.get('name'), u'namedPorts': InstanceGroupManagerNamedPortsArray(module.params.get('named_ports', []), module).to_request(), u'targetPools': replace_resource_dict(module.params.get('target_pools', []), 'selfLink'), u'targetSize': module.params.get('target_size') } return_vals = {} for k, v in request.items(): if v: return_vals[k] = v return return_vals def fetch_resource(module, link, kind): auth = GcpSession(module, 'compute') return return_if_object(module, auth.get(link), kind) def self_link(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{name}".format(**module.params) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params) def return_if_object(module, response, kind): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) if result['kind'] != kind: module.fail_json(msg="Incorrect result: {kind}".format(**result)) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'baseInstanceName': response.get(u'baseInstanceName'), u'creationTimestamp': response.get(u'creationTimestamp'), u'currentActions': InstanceGroupManagerCurrentActions(response.get(u'currentActions', {}), module).from_response(), u'description': module.params.get('description'), u'id': response.get(u'id'), u'instanceGroup': response.get(u'instanceGroup'), u'instanceTemplate': response.get(u'instanceTemplate'), u'name': response.get(u'name'), u'namedPorts': InstanceGroupManagerNamedPortsArray(response.get(u'namedPorts', []), module).from_response(), u'region': response.get(u'region'), u'targetPools': response.get(u'targetPools'), u'targetSize': response.get(u'targetSize') } def region_selflink(name, params): if name is None: return url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/[a-z1-9\-]*" if not re.match(url, name): name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name return name def async_op_url(module, extra_data=None): if extra_data is None: extra_data = {} url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}" combined = extra_data.copy() combined.update(module.params) return url.format(**combined) def wait_for_operation(module, response): op_result = return_if_object(module, response, 'compute#operation') if op_result is None: return {} status = navigate_hash(op_result, ['status']) wait_done = wait_for_completion(status, op_result, module) return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroupManager') def wait_for_completion(status, op_result, module): op_id = navigate_hash(op_result, ['name']) op_uri = async_op_url(module, {'op_id': op_id}) while status != 'DONE': raise_if_errors(op_result, ['error', 'errors'], 'message') time.sleep(1.0) if status not in ['PENDING', 'RUNNING', 'DONE']: module.fail_json(msg="Invalid result %s" % status) op_result = fetch_resource(module, op_uri, 'compute#operation') status = navigate_hash(op_result, ['status']) return op_result def raise_if_errors(response, err_path, module): errors = navigate_hash(response, err_path) if errors is not None: module.fail_json(msg=errors) class InstanceGroupManagerCurrentActions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({ u'abandoning': self.request.get('abandoning'), u'creating': self.request.get('creating'), u'creatingWithoutRetries': self.request.get('creating_without_retries'), u'deleting': self.request.get('deleting'), u'none': self.request.get('none'), u'recreating': self.request.get('recreating'), u'refreshing': self.request.get('refreshing'), u'restarting': self.request.get('restarting') }) def from_response(self): return remove_nones_from_dict({ u'abandoning': self.request.get(u'abandoning'), u'creating': self.request.get(u'creating'), u'creatingWithoutRetries': self.request.get(u'creatingWithoutRetries'), u'deleting': self.request.get(u'deleting'), u'none': self.request.get(u'none'), u'recreating': self.request.get(u'recreating'), u'refreshing': self.request.get(u'refreshing'), u'restarting': self.request.get(u'restarting') }) class InstanceGroupManagerNamedPortsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({ u'name': item.get('name'), u'port': item.get('port') }) def _response_from_item(self, item): return remove_nones_from_dict({ u'name': item.get(u'name'), u'port': item.get(u'port') }) if __name__ == '__main__': main()
gpl-3.0
drjeep/django
tests/many_to_one_null/tests.py
142
6484
from __future__ import unicode_literals from django.test import TestCase from .models import Article, Car, Driver, Reporter class ManyToOneNullTests(TestCase): def setUp(self): # Create a Reporter. self.r = Reporter(name='John Smith') self.r.save() # Create an Article. self.a = Article(headline="First", reporter=self.r) self.a.save() # Create an Article via the Reporter object. self.a2 = self.r.article_set.create(headline="Second") # Create an Article with no Reporter by passing "reporter=None". self.a3 = Article(headline="Third", reporter=None) self.a3.save() # Create another article and reporter self.r2 = Reporter(name='Paul Jones') self.r2.save() self.a4 = self.r2.article_set.create(headline='Fourth') def test_get_related(self): self.assertEqual(self.a.reporter.id, self.r.id) # Article objects have access to their related Reporter objects. r = self.a.reporter self.assertEqual(r.id, self.r.id) def test_created_via_related_set(self): self.assertEqual(self.a2.reporter.id, self.r.id) def test_related_set(self): # Reporter objects have access to their related Article objects. self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>']) self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'), ['<Article: First>']) self.assertEqual(self.r.article_set.count(), 2) def test_created_without_related(self): self.assertEqual(self.a3.reporter, None) # Need to reget a3 to refresh the cache a3 = Article.objects.get(pk=self.a3.pk) self.assertRaises(AttributeError, getattr, a3.reporter, 'id') # Accessing an article's 'reporter' attribute returns None # if the reporter is set to None. self.assertEqual(a3.reporter, None) # To retrieve the articles with no reporters set, use "reporter__isnull=True". self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>']) # We can achieve the same thing by filtering for the case where the # reporter is None. self.assertQuerysetEqual(Article.objects.filter(reporter=None), ['<Article: Third>']) # Set the reporter for the Third article self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>']) self.r.article_set.add(a3) self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>', '<Article: Third>']) # Remove an article from the set, and check that it was removed. self.r.article_set.remove(a3) self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>']) self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>']) def test_remove_from_wrong_set(self): self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>']) # Try to remove a4 from a set it does not belong to self.assertRaises(Reporter.DoesNotExist, self.r.article_set.remove, self.a4) self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>']) def test_set(self): # Use manager.set() to allocate ForeignKey. Null is legal, so existing # members of the set that are not in the assignment set are set to null. self.r2.article_set.set([self.a2, self.a3]) self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>']) # Use manager.set(clear=True) self.r2.article_set.set([self.a3, self.a4], clear=True) self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>', '<Article: Third>']) # Clear the rest of the set self.r2.article_set.set([]) self.assertQuerysetEqual(self.r2.article_set.all(), []) self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Fourth>', '<Article: Second>', '<Article: Third>']) def test_assign_clear_related_set(self): # Use descriptor assignment to allocate ForeignKey. Null is legal, so # existing members of the set that are not in the assignment set are # set to null. self.r2.article_set = [self.a2, self.a3] self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>']) # Clear the rest of the set self.r.article_set.clear() self.assertQuerysetEqual(self.r.article_set.all(), []) self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: First>', '<Article: Fourth>']) def test_assign_with_queryset(self): # Ensure that querysets used in reverse FK assignments are pre-evaluated # so their value isn't affected by the clearing operation in # ForeignRelatedObjectsDescriptor.__set__. Refs #19816. self.r2.article_set = [self.a2, self.a3] qs = self.r2.article_set.filter(headline="Second") self.r2.article_set = qs self.assertEqual(1, self.r2.article_set.count()) self.assertEqual(1, qs.count()) def test_add_efficiency(self): r = Reporter.objects.create() articles = [] for _ in range(3): articles.append(Article.objects.create()) with self.assertNumQueries(1): r.article_set.add(*articles) self.assertEqual(r.article_set.count(), 3) def test_clear_efficiency(self): r = Reporter.objects.create() for _ in range(3): r.article_set.create() with self.assertNumQueries(1): r.article_set.clear() self.assertEqual(r.article_set.count(), 0) def test_related_null_to_field(self): c1 = Car.objects.create() d1 = Driver.objects.create() self.assertIs(d1.car, None) with self.assertNumQueries(0): self.assertEqual(list(c1.drivers.all()), [])
bsd-3-clause
echanna/EdxNotAFork
common/lib/xmodule/xmodule/combined_open_ended_module.py
10
22637
import logging from lxml import etree from pkg_resources import resource_string from xmodule.raw_module import RawDescriptor from .x_module import XModule, module_attr from xblock.fields import Integer, Scope, String, List, Float, Boolean from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor from collections import namedtuple from .fields import Date, Timedelta import textwrap log = logging.getLogger("edx.courseware") V1_SETTINGS_ATTRIBUTES = [ "display_name", "max_attempts", "graded", "accept_file_upload", "skip_spelling_checks", "due", "extended_due", "graceperiod", "weight", "min_to_calibrate", "max_to_calibrate", "peer_grader_count", "required_peer_grading", "peer_grade_finished_submissions_when_none_pending", ] V1_STUDENT_ATTRIBUTES = [ "current_task_number", "task_states", "state", "student_attempts", "ready_to_reset", "old_task_states", ] V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes']) VERSION_TUPLES = { 1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES, V1_STUDENT_ATTRIBUTES), } DEFAULT_VERSION = 1 DEFAULT_DATA = textwrap.dedent("""\ <combinedopenended> <prompt> <h3>Censorship in the Libraries</h3> <p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author </p> <p> Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. </p> </prompt> <rubric> <rubric> <category> <description> Ideas </description> <option> Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. </option> <option> Attempts a main idea. Sometimes loses focus or ineffectively displays focus. </option> <option> Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task. </option> <option> Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task. </option> </category> <category> <description> Content </description> <option> Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. </option> <option> Includes little information and few or no details. Explores only one or two facets of the topic. </option> <option> Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic. </option> <option> Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic. </option> </category> <category> <description> Organization </description> <option> Ideas organized illogically, transitions weak, and response difficult to follow. </option> <option> Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions. </option> <option> Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions. </option> </category> <category> <description> Style </description> <option> Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. </option> <option> Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns). </option> <option> Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences. </option> </category> <category> <description> Voice </description> <option> Demonstrates language and tone that may be inappropriate to task and reader. </option> <option> Demonstrates an attempt to adjust language and tone to task and reader. </option> <option> Demonstrates effective adjustment of language and tone to task and reader. </option> </category> </rubric> </rubric> <task> <selfassessment/></task> <task> <openended min_score_to_attempt="4" max_score_to_attempt="12" > <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended> </task> <task> <openended min_score_to_attempt="9" max_score_to_attempt="12" > <openendedparam> <initial_display>Enter essay here.</initial_display> <answer_display>This is the answer.</answer_display> <grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload> </openendedparam> </openended> </task> </combinedopenended> """) class VersionInteger(Integer): """ A model type that converts from strings to integers when reading from json. Also does error checking to see if version is correct or not. """ def from_json(self, value): try: value = int(value) if value not in VERSION_TUPLES: version_error_string = "Could not find version {0}, using version {1} instead" log.error(version_error_string.format(value, DEFAULT_VERSION)) value = DEFAULT_VERSION except: value = DEFAULT_VERSION return value class CombinedOpenEndedFields(object): display_name = String( display_name="Display Name", help="This name appears in the horizontal navigation at the top of the page.", default="Open Response Assessment", scope=Scope.settings ) current_task_number = Integer( help="Current task that the student is on.", default=0, scope=Scope.user_state ) old_task_states = List( help=("A list of lists of state dictionaries for student states that are saved." "This field is only populated if the instructor changes tasks after" "the module is created and students have attempted it (for example changes a self assessed problem to " "self and peer assessed."), scope = Scope.user_state ) task_states = List( help="List of state dictionaries of each task within this module.", scope=Scope.user_state ) state = String( help="Which step within the current task that the student is on.", default="initial", scope=Scope.user_state ) graded = Boolean( display_name="Graded", help='Defines whether the student gets credit for this problem. Credit is based on peer grades of this problem.', default=False, scope=Scope.settings ) student_attempts = Integer( help="Number of attempts taken by the student on this problem", default=0, scope=Scope.user_state ) ready_to_reset = Boolean( help="If the problem is ready to be reset or not.", default=False, scope=Scope.user_state ) max_attempts = Integer( display_name="Maximum Attempts", help="The number of times the student can try to answer this problem.", default=1, scope=Scope.settings, values={"min": 1 } ) accept_file_upload = Boolean( display_name="Allow File Uploads", help="Whether or not the student can submit files as a response.", default=False, scope=Scope.settings ) skip_spelling_checks = Boolean( display_name="Disable Quality Filter", help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.", default=False, scope=Scope.settings ) due = Date( help="Date that this problem is due by", scope=Scope.settings ) extended_due = Date( help="Date that this problem is due by for a particular student. This " "can be set by an instructor, and will override the global due " "date if it is set to a date that is later than the global due " "date.", default=None, scope=Scope.user_state, ) graceperiod = Timedelta( help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings ) version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings) data = String(help="XML data for the problem", scope=Scope.content, default=DEFAULT_DATA) weight = Float( display_name="Problem Weight", help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.", scope=Scope.settings, values={"min": 0, "step": ".1"}, default=1 ) min_to_calibrate = Integer( display_name="Minimum Peer Grading Calibrations", help="The minimum number of calibration essays each student will need to complete for peer grading.", default=3, scope=Scope.settings, values={"min": 1, "max": 20, "step": "1"} ) max_to_calibrate = Integer( display_name="Maximum Peer Grading Calibrations", help="The maximum number of calibration essays each student will need to complete for peer grading.", default=6, scope=Scope.settings, values={"min": 1, "max": 20, "step": "1"} ) peer_grader_count = Integer( display_name="Peer Graders per Response", help="The number of peers who will grade each submission.", default=3, scope=Scope.settings, values={"min": 1, "step": "1", "max": 5} ) required_peer_grading = Integer( display_name="Required Peer Grading", help="The number of other students each student making a submission will have to grade.", default=3, scope=Scope.settings, values={"min": 1, "step": "1", "max": 5} ) peer_grade_finished_submissions_when_none_pending = Boolean( display_name='Allow "overgrading" of peer submissions', help=("EXPERIMENTAL FEATURE. Allow students to peer grade submissions that already have the requisite number of graders, " "but ONLY WHEN all submissions they are eligible to grade already have enough graders. " "This is intended for use when settings for `Required Peer Grading` > `Peer Graders per Response`"), default=False, scope=Scope.settings, ) markdown = String( help="Markdown source of this module", default=textwrap.dedent("""\ [prompt] <h3>Censorship in the Libraries</h3> <p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author </p> <p> Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading. </p> [prompt] [rubric] + Ideas - Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus. - Attempts a main idea. Sometimes loses focus or ineffectively displays focus. - Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task. - Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task. + Content - Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic. - Includes little information and few or no details. Explores only one or two facets of the topic. - Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic. - Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic. + Organization - Ideas organized illogically, transitions weak, and response difficult to follow. - Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions. - Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions. + Style - Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns. - Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns). - Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences. + Voice - Demonstrates language and tone that may be inappropriate to task and reader. - Demonstrates an attempt to adjust language and tone to task and reader. - Demonstrates effective adjustment of language and tone to task and reader. [rubric] [tasks] (Self), ({4-12}AI), ({9-12}Peer) [tasks] """), scope=Scope.settings ) class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule): """ This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc). It transitions between problems, and support arbitrary ordering. Each combined open ended module contains one or multiple "child" modules. Child modules track their own state, and can transition between states. They also implement get_html and handle_ajax. The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem) ajax actions implemented by all children are: 'save_answer' -- Saves the student answer 'save_assessment' -- Saves the student assessment (or external grader assessment) 'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc) ajax actions implemented by combined open ended module are: 'reset' -- resets the whole combined open ended module and returns to the first child module 'next_problem' -- moves to the next child module 'get_results' -- gets results from a given child module Types of children. Task is synonymous with child module, so each combined open ended module incorporates multiple children (tasks): openendedmodule selfassessmentmodule CombinedOpenEndedModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__ """ STATE_VERSION = 1 # states INITIAL = 'initial' ASSESSING = 'assessing' INTERMEDIATE_DONE = 'intermediate_done' DONE = 'done' icon_class = 'problem' js = { 'coffee': [ resource_string(__name__, 'js/src/combinedopenended/display.coffee'), resource_string(__name__, 'js/src/javascript_loader.coffee'), ], 'js': [ resource_string(__name__, 'js/src/collapsible.js'), ] } js_module_name = "CombinedOpenEnded" css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]} def __init__(self, *args, **kwargs): """ Definition file should have one or many task blocks, a rubric block, and a prompt block. See DEFAULT_DATA for a sample. """ super(CombinedOpenEndedModule, self).__init__(*args, **kwargs) self.system.set('location', self.location) if self.task_states is None: self.task_states = [] if self.old_task_states is None: self.old_task_states = [] version_tuple = VERSION_TUPLES[self.version] self.student_attributes = version_tuple.student_attributes self.settings_attributes = version_tuple.settings_attributes attributes = self.student_attributes + self.settings_attributes static_data = {} instance_state = {k: getattr(self, k) for k in attributes} self.child_descriptor = version_tuple.descriptor(self.system) self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system) self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor, instance_state=instance_state, static_data=static_data, attributes=attributes) self.save_instance_data() def get_html(self): self.save_instance_data() return_value = self.child_module.get_html() return return_value def handle_ajax(self, dispatch, data): self.save_instance_data() return_value = self.child_module.handle_ajax(dispatch, data) self.save_instance_data() return return_value def get_instance_state(self): return self.child_module.get_instance_state() def get_score(self): return self.child_module.get_score() def max_score(self): return self.child_module.max_score() def get_progress(self): return self.child_module.get_progress() @property def due_date(self): return self.child_module.due_date def save_instance_data(self): for attribute in self.student_attributes: setattr(self, attribute, getattr(self.child_module, attribute)) class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor): """ Module for adding combined open ended questions """ mako_template = "widgets/open-ended-edit.html" module_class = CombinedOpenEndedModule has_score = True always_recalculate_grades = True template_dir_name = "combinedopenended" #Specify whether or not to pass in S3 interface needs_s3_interface = True #Specify whether or not to pass in open ended interface needs_open_ended_interface = True metadata_attributes = RawDescriptor.metadata_attributes js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/edit.coffee')]} js_module_name = "OpenEndedMarkdownEditingDescriptor" css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]} metadata_translations = { 'is_graded': 'graded', 'attempts': 'max_attempts', } def get_context(self): _context = RawDescriptor.get_context(self) _context.update({'markdown': self.markdown, 'enable_markdown': self.markdown is not None}) return _context @property def non_editable_metadata_fields(self): non_editable_fields = super(CombinedOpenEndedDescriptor, self).non_editable_metadata_fields non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod, CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version]) return non_editable_fields # Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working # with a module or a descriptor child_module = module_attr('child_module')
agpl-3.0
elainenaomi/sciwonc-dataflow-examples
sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/pegasus/externals/python/boto/dynamodb/table.py
153
21808
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.dynamodb.batch import BatchList from boto.dynamodb.schema import Schema from boto.dynamodb.item import Item from boto.dynamodb import exceptions as dynamodb_exceptions import time class TableBatchGenerator(object): """ A low-level generator used to page through results from batch_get_item operations. :ivar consumed_units: An integer that holds the number of ConsumedCapacityUnits accumulated thus far for this generator. """ def __init__(self, table, keys, attributes_to_get=None, consistent_read=False): self.table = table self.keys = keys self.consumed_units = 0 self.attributes_to_get = attributes_to_get self.consistent_read = consistent_read def _queue_unprocessed(self, res): if u'UnprocessedKeys' not in res: return if self.table.name not in res[u'UnprocessedKeys']: return keys = res[u'UnprocessedKeys'][self.table.name][u'Keys'] for key in keys: h = key[u'HashKeyElement'] r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None self.keys.append((h, r)) def __iter__(self): while self.keys: # Build the next batch batch = BatchList(self.table.layer2) batch.add_batch(self.table, self.keys[:100], self.attributes_to_get) res = batch.submit() # parse the results if self.table.name not in res[u'Responses']: continue self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits'] for elem in res[u'Responses'][self.table.name][u'Items']: yield elem # re-queue un processed keys self.keys = self.keys[100:] self._queue_unprocessed(res) class Table(object): """ An Amazon DynamoDB table. :ivar name: The name of the table. :ivar create_time: The date and time that the table was created. :ivar status: The current status of the table. One of: 'ACTIVE', 'UPDATING', 'DELETING'. :ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing the schema defined for the table. :ivar item_count: The number of items in the table. This value is set only when the Table object is created or refreshed and may not reflect the actual count. :ivar size_bytes: Total size of the specified table, in bytes. Amazon DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. :ivar read_units: The ReadCapacityUnits of the tables Provisioned Throughput. :ivar write_units: The WriteCapacityUnits of the tables Provisioned Throughput. :ivar schema: The Schema object associated with the table. """ def __init__(self, layer2, response): """ :type layer2: :class:`boto.dynamodb.layer2.Layer2` :param layer2: A `Layer2` api object. :type response: dict :param response: The output of `boto.dynamodb.layer1.Layer1.describe_table`. """ self.layer2 = layer2 self._dict = {} self.update_from_response(response) @classmethod def create_from_schema(cls, layer2, name, schema): """Create a Table object. If you know the name and schema of your table, you can create a ``Table`` object without having to make any API calls (normally an API call is made to retrieve the schema of a table). Example usage:: table = Table.create_from_schema( boto.connect_dynamodb(), 'tablename', Schema.create(hash_key=('keyname', 'N'))) :type layer2: :class:`boto.dynamodb.layer2.Layer2` :param layer2: A ``Layer2`` api object. :type name: str :param name: The name of the table. :type schema: :class:`boto.dynamodb.schema.Schema` :param schema: The schema associated with the table. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the table. """ table = cls(layer2, {'Table': {'TableName': name}}) table._schema = schema return table def __repr__(self): return 'Table(%s)' % self.name @property def name(self): return self._dict['TableName'] @property def create_time(self): return self._dict.get('CreationDateTime', None) @property def status(self): return self._dict.get('TableStatus', None) @property def item_count(self): return self._dict.get('ItemCount', 0) @property def size_bytes(self): return self._dict.get('TableSizeBytes', 0) @property def schema(self): return self._schema @property def read_units(self): try: return self._dict['ProvisionedThroughput']['ReadCapacityUnits'] except KeyError: return None @property def write_units(self): try: return self._dict['ProvisionedThroughput']['WriteCapacityUnits'] except KeyError: return None def update_from_response(self, response): """ Update the state of the Table object based on the response data received from Amazon DynamoDB. """ # 'Table' is from a describe_table call. if 'Table' in response: self._dict.update(response['Table']) # 'TableDescription' is from a create_table call. elif 'TableDescription' in response: self._dict.update(response['TableDescription']) if 'KeySchema' in self._dict: self._schema = Schema(self._dict['KeySchema']) def refresh(self, wait_for_active=False, retry_seconds=5): """ Refresh all of the fields of the Table object by calling the underlying DescribeTable request. :type wait_for_active: bool :param wait_for_active: If True, this command will not return until the table status, as returned from Amazon DynamoDB, is 'ACTIVE'. :type retry_seconds: int :param retry_seconds: If wait_for_active is True, this parameter controls the number of seconds of delay between calls to update_table in Amazon DynamoDB. Default is 5 seconds. """ done = False while not done: response = self.layer2.describe_table(self.name) self.update_from_response(response) if wait_for_active: if self.status == 'ACTIVE': done = True else: time.sleep(retry_seconds) else: done = True def update_throughput(self, read_units, write_units): """ Update the ProvisionedThroughput for the Amazon DynamoDB Table. :type read_units: int :param read_units: The new value for ReadCapacityUnits. :type write_units: int :param write_units: The new value for WriteCapacityUnits. """ self.layer2.update_throughput(self, read_units, write_units) def delete(self): """ Delete this table and all items in it. After calling this the Table objects status attribute will be set to 'DELETING'. """ self.layer2.delete_table(self) def get_item(self, hash_key, range_key=None, attributes_to_get=None, consistent_read=False, item_class=Item): """ Retrieve an existing item from the table. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the requested item. The type of the value must match the type defined in the schema for the table. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return self.layer2.get_item(self, hash_key, range_key, attributes_to_get, consistent_read, item_class) lookup = get_item def has_item(self, hash_key, range_key=None, consistent_read=False): """ Checks the table to see if the Item with the specified ``hash_key`` exists. This may save a tiny bit of time/bandwidth over a straight :py:meth:`get_item` if you have no intention to touch the data that is returned, since this method specifically tells Amazon not to return anything but the Item's key. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the requested item. The type of the value must match the type defined in the schema for the table. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :rtype: bool :returns: ``True`` if the Item exists, ``False`` if not. """ try: # Attempt to get the key. If it can't be found, it'll raise # an exception. self.get_item(hash_key, range_key=range_key, # This minimizes the size of the response body. attributes_to_get=[hash_key], consistent_read=consistent_read) except dynamodb_exceptions.DynamoDBKeyNotFoundError: # Key doesn't exist. return False return True def new_item(self, hash_key=None, range_key=None, attrs=None, item_class=Item): """ Return an new, unsaved Item which can later be PUT to Amazon DynamoDB. This method has explicit (but optional) parameters for the hash_key and range_key values of the item. You can use these explicit parameters when calling the method, such as:: >>> my_item = my_table.new_item(hash_key='a', range_key=1, attrs={'key1': 'val1', 'key2': 'val2'}) >>> my_item {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} Or, if you prefer, you can simply put the hash_key and range_key in the attrs dictionary itself, like this:: >>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'} >>> my_item = my_table.new_item(attrs=attrs) >>> my_item {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} The effect is the same. .. note: The explicit parameters take priority over the values in the attrs dict. So, if you have a hash_key or range_key in the attrs dict and you also supply either or both using the explicit parameters, the values in the attrs will be ignored. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the new item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the new item. The type of the value must match the type defined in the schema for the table. :type attrs: dict :param attrs: A dictionary of key value pairs used to populate the new item. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return item_class(self, hash_key, range_key, attrs) def query(self, hash_key, *args, **kw): """ Perform a query on the table. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key_condition: :class:`boto.dynamodb.condition.Condition` :param range_key_condition: A Condition object. Condition object can be one of the following types: EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN The only condition which expects or will accept two values is 'BETWEEN', otherwise a single value should be passed to the Condition constructor. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type scan_index_forward: bool :param scan_index_forward: Specified forward or backward traversal of the index. Default is forward (True). :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Query operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return self.layer2.query(self, hash_key, *args, **kw) def scan(self, *args, **kw): """ Scan through this table, this is a very long and expensive operation, and should be avoided if at all possible. :type scan_filter: A dict :param scan_filter: A dictionary where the key is the attribute name and the value is a :class:`boto.dynamodb.condition.Condition` object. Valid Condition objects include: * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :return: A TableGenerator (generator) object which will iterate over all results :rtype: :class:`boto.dynamodb.layer2.TableGenerator` """ return self.layer2.scan(self, *args, **kw) def batch_get_item(self, keys, attributes_to_get=None): """ Return a set of attributes for a multiple items from a single table using their primary keys. This abstraction removes the 100 Items per batch limitations as well as the "UnprocessedKeys" logic. :type keys: list :param keys: A list of scalar or tuple values. Each element in the list represents one Item to retrieve. If the schema for the table has both a HashKey and a RangeKey, each element in the list should be a tuple consisting of (hash_key, range_key). If the schema for the table contains only a HashKey, each element in the list should be a scalar value of the appropriate type for the table schema. NOTE: The maximum number of items that can be retrieved for a single operation is 100. Also, the number of items retrieved is constrained by a 1 MB size limit. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :return: A TableBatchGenerator (generator) object which will iterate over all results :rtype: :class:`boto.dynamodb.table.TableBatchGenerator` """ return TableBatchGenerator(self, keys, attributes_to_get)
gpl-3.0
xiaohan2012/cotrain
util.py
1
1501
def sample_from_list_and_split(items, n, rng): """Sample n examples from items using random generator rng >>> from numpy.random import RandomState >>> rng = RandomState(1) >>> items = ['a', 'b', 'c', 'd', 'e', 'f'] >>> sample_from_list_and_split(items, 2, rng) (['b', 'c'], ['a', 'd', 'e', 'f']) """ indices = rng.permutation(len(items))[:n] indices = set(indices.tolist()) sampled = [r for i, r in enumerate(items) if i in indices] remained = [r for i, r in enumerate(items) if i not in indices] return sampled, remained def show_most_informative_features(vectorizer, clf, n=20, sep="|"): """ Copied from: http://stackoverflow.com/questions/11116697/how-to-get-most-informative-features-for-scikit-learn-classifiers """ feature_names = vectorizer.get_feature_names() coefs_with_fns = sorted(zip(clf.coef_[0], feature_names)) top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1]) print "{sep}{:5}{sep}{:15}{sep}{:5}{sep}{:15}{sep}".format( 'weight', 'neg term', 'weight', 'pos term', sep=sep) for (coef_1, fn_1), (coef_2, fn_2) in top: print "{sep}{:.4f}{sep}{:15}{sep}{:.4f}{sep}{:15}{sep}".format( coef_1, fn_1, coef_2, fn_2, sep=sep) def remove_certain_tokens(tokens, certain_tokens): """ >>> remove_certain_tokens(['a', 'b', 'c'], set(['d', 'b'])) ['a', 'c'] """ return [t for t in tokens if t not in certain_tokens]
mit
IONISx/edx-platform
openedx/core/djangoapps/theming/test_util.py
23
3004
""" Test helpers for Comprehensive Theming. """ from functools import wraps import os import os.path from mock import patch from django.conf import settings from django.test.utils import override_settings import edxmako from .core import comprehensive_theme_changes def with_comp_theme(theme_dir): """ A decorator to run a test with a particular comprehensive theme. Arguments: theme_dir (str): the full path to the theme directory to use. This will likely use `settings.REPO_ROOT` to get the full path. """ # This decorator gets the settings changes needed for a theme, and applies # them using the override_settings and edxmako.paths.add_lookup context # managers. changes = comprehensive_theme_changes(theme_dir) def _decorator(func): # pylint: disable=missing-docstring @wraps(func) def _decorated(*args, **kwargs): # pylint: disable=missing-docstring with override_settings(COMP_THEME_DIR=theme_dir, **changes['settings']): with edxmako.save_lookups(): for template_dir in changes['mako_paths']: edxmako.paths.add_lookup('main', template_dir, prepend=True) return func(*args, **kwargs) return _decorated return _decorator def with_is_edx_domain(is_edx_domain): """ A decorator to run a test as if IS_EDX_DOMAIN is true or false. We are transitioning away from IS_EDX_DOMAIN and are moving toward an edX theme. This decorator changes both settings to let tests stay isolated from the details. Arguments: is_edx_domain (bool): are we an edX domain or not? """ # This is weird, it's a decorator that conditionally applies other # decorators, which is confusing. def _decorator(func): # pylint: disable=missing-docstring if is_edx_domain: # This applies @with_comp_theme to the func. func = with_comp_theme(settings.REPO_ROOT / "themes" / "edx.org")(func) # This applies @patch.dict() to the func to set IS_EDX_DOMAIN. func = patch.dict('django.conf.settings.FEATURES', {"IS_EDX_DOMAIN": is_edx_domain})(func) return func return _decorator def dump_theming_info(): """Dump a bunch of theming information, for debugging.""" for namespace, lookup in edxmako.LOOKUP.items(): print "--- %s: %s" % (namespace, lookup.template_args['module_directory']) for directory in lookup.directories: print " %s" % (directory,) print "=" * 80 for dirname, __, filenames in os.walk(settings.MAKO_MODULE_DIR): print "%s ----------------" % (dir,) for filename in sorted(filenames): if filename.endswith(".pyc"): continue with open(os.path.join(dirname, filename)) as f: content = len(f.read()) print " %s: %d" % (filename, content)
agpl-3.0
apporc/cinder
cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py
2
6588
# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc san lookup service.""" import mock from oslo_concurrency import processutils as putils from oslo_config import cfg from cinder import exception from cinder import ssh_utils from cinder import test from cinder.volume import configuration as conf import cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service \ as brcd_lookup from cinder.zonemanager.drivers.brocade import fc_zone_constants parsed_switch_port_wwns = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:90:fa:34:40:f6'] switch_data = (""" Type Pid COS PortName NodeName TTL(sec) N 011a00; 2,3; %(port_1)s; 20:1a:00:05:1e:e8:e3:29; na FC4s: FCP PortSymb: [26] "222222 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 7211" Fabric Port Name: 20:1a:00:05:1e:e8:e3:29 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No N 010100; 2,3; %(port_2)s; 20:00:00:00:af:00:00:af; na FC4s: FCP PortSymb: [26] "333333 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 2222" Fabric Port Name: 10:00:00:90:fa:34:40:f6 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No""" % {'port_1': parsed_switch_port_wwns[0], 'port_2': parsed_switch_port_wwns[1]}) _device_map_to_verify = { 'BRCD_FAB_2': { 'initiator_port_wwn_list': [parsed_switch_port_wwns[1].replace(':', '')], 'target_port_wwn_list': [parsed_switch_port_wwns[0].replace(':', '')]}} class TestBrcdFCSanLookupService(brcd_lookup.BrcdFCSanLookupService, test.TestCase): def setUp(self): super(TestBrcdFCSanLookupService, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_2', 'fc-zone-manager') self.configuration.fc_fabric_names = 'BRCD_FAB_2' self.create_configuration() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address', default='10.24.49.100', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password', default='password', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('fc_fabric_port', default=22, help='')) fc_fabric_opts.append(cfg.StrOpt('principal_switch_wwn', default='100000051e55a100', help='')) config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2') self.fabric_configs = {'BRCD_FAB_2': config} @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, 'get_nameserver_info') @mock.patch('cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service' '.ssh_utils.SSHPool') def test_get_device_mapping_from_network(self, mock_ssh_pool, get_nameserver_info_mock): initiator_list = [parsed_switch_port_wwns[1]] target_list = [parsed_switch_port_wwns[0], '20240002ac000a40'] get_nameserver_info_mock.return_value = parsed_switch_port_wwns device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictMatch(_device_map_to_verify, device_map) @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, '_get_switch_data') def test_get_nameserver_info(self, get_switch_data_mock): ns_info_list = [] get_switch_data_mock.return_value = (switch_data) # get_switch_data will be called twice with the results appended ns_info_list_expected = (parsed_switch_port_wwns + parsed_switch_port_wwns) ns_info_list = self.get_nameserver_info(None) self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(putils, 'ssh_execute', return_value=(switch_data, '')) @mock.patch.object(ssh_utils.SSHPool, 'item') def test__get_switch_data(self, ssh_pool_mock, ssh_execute_mock): actual_switch_data = self._get_switch_data(ssh_pool_mock, fc_zone_constants.NS_SHOW) self.assertEqual(actual_switch_data, switch_data) ssh_execute_mock.side_effect = putils.ProcessExecutionError() self.assertRaises(exception.FCSanLookupServiceException, self._get_switch_data, ssh_pool_mock, fc_zone_constants.NS_SHOW) def test__parse_ns_output(self): invalid_switch_data = ' N 011a00;20:1a:00:05:1e:e8:e3:29' return_wwn_list = [] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(parsed_switch_port_wwns, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) def test_get_formatted_wwn(self): wwn_list = ['10008c7cff523b01'] return_wwn_list = [] expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] return_wwn_list.append(self.get_formatted_wwn(wwn_list[0])) self.assertEqual(expected_wwn_list, return_wwn_list)
apache-2.0
milinda/samza
samza-test/src/main/python/templates.py
25
1511
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from contextlib import nested from jinja2 import Template def render_config(template_location, rendered_location, properties): """ A method for rendering simple key/value configs into a template. Uses Jinja2 style templating. param: template_location -- File path of the input Jinja2 template. param: rendered_location -- File path where rendered output should be saved. param: properties -- A dictionary of key/value pairs to be passed to the template with the accessor name 'properties'. """ with nested(open(template_location, 'r'), open(rendered_location, 'w')) as (input, output): template = Template(input.read()) rendered = template.render(properties=properties) output.write(rendered)
apache-2.0
piffey/ansible
lib/ansible/plugins/terminal/nxos.py
8
2005
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from ansible.plugins.terminal import TerminalBase from ansible.errors import AnsibleConnectionFailure class TerminalModule(TerminalBase): terminal_stdout_re = [ re.compile(br'[\r\n](?!\s*<)?(\x1b\S+)*[a-zA-Z_]{1}[a-zA-Z0-9-_.]*[>|#|%](?:\s*)*(\x1b\S+)*$'), re.compile(br'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-_.]*\(.+\)#(?:\s*)$') ] terminal_stderr_re = [ re.compile(br"% ?Error"), re.compile(br"^% \w+", re.M), re.compile(br"% ?Bad secret"), re.compile(br"invalid input", re.I), re.compile(br"(?:incomplete|ambiguous) command", re.I), re.compile(br"connection timed out", re.I), re.compile(br"[^\r\n]+ not found", re.I), re.compile(br"'[^']' +returned error code: ?\d+"), re.compile(br"syntax error"), re.compile(br"unknown command"), re.compile(br"user not present"), re.compile(br"invalid (.+?)at '\^' marker", re.I) ] def on_open_shell(self): try: for cmd in (b'terminal length 0', b'terminal width 511'): self._exec_cli_command(cmd) except AnsibleConnectionFailure: raise AnsibleConnectionFailure('unable to set terminal parameters')
gpl-3.0
ASMfreaK/sensorika-robot
sensorika/locator.py
2
10172
import datetime try: import ujson as json except: #print('ujson not found, using json') import json import logging import signal import threading import time import plyvel import zmq from .connector import ConnectorAsync from .tools import getLocalIp PORT = 15701 """ class ThreadedConnector(threading.Thread): def __init__(self, ip, port, params=None, database=None, *args, **kwargs): threading.Thread.__init__(self, args=args, kwargs=args) try: if params: self.params = params else: import random self.params = dict(frequency=1.0, name='none_{0}'.format(random.randint(0, 1000))) self.Estop = threading.Event() self.connector = Connector(ip, port) self.data = [] self.db = database self.name = self.params['name'] self.db.statSession(self.name) self.thread = self.start() except Exception as e: print(e) def run(self): while not self.Estop.is_set(): t0 = time.time() data = self.connector.get() t, d = data # do not store equal data canAdd = False if self.data: if self.data[-1][0] != t: canAdd = True else: canAdd = True if canAdd: if isinstance(t, float): self.data.append(([time.time()] + [t], d)) else: self.data.append(([time.time()] + t, d)) self.db.add(self.name, self.data[-1]) if len(self.data) > 100: self.data = self.data[-100:] dt = time.time() - t0 time.sleep(1 / self.params['frequency']) def stop(self): self.Estop.set() """ class ConnectorAsyncDB(): def __init__(self, ip, port, params, database): self.db = database self.params = params self.data = [] self.name = params['name'] self.db.statSession(params['name']) print("NEW connector") self.connector = ConnectorAsync(ip, port, callback=self.add) def add(self, data): print("adding data", self.params['name']) t, d = data canAdd = False if self.data: if self.data[-1][0] != t: canAdd = True else: canAdd = True if canAdd: if isinstance(t, float): self.data.append(([time.time()] + [t], d)) else: self.data.append(([time.time()] + t, d)) self.db.add(self.name, self.data[-1]) if len(self.data) > 100: self.data = self.data[-100:] def stop(self): self.connector.stop() class DatabaserLEVELDB(): def __init__(self): self.db = plyvel.DB('./db', create_if_missing=True) def close(self): if not self.db.closed: self.db.close() def statSession(self, name): self.db.put("session|{1}|{0}".format(name, time.time()).encode('utf8'), b'ok') def add(self, name, data): if not self.db.closed: self.db.put("{0}-{1}".format(name, time.time()).encode('utf8'), json.dumps(data).encode("utf8")) def getSessions(self, datefrom=None, dateto=None): df = b"0" dt = str(time.mktime(datetime.datetime.now().timetuple())).encode("utf8") if datefrom: df = str(time.mktime(datefrom.timetuple())).encode("utf8") if dateto: dt = str(time.mktime(dateto.timetuple())).encode("utf8") d = [] for k, v in self.db.iterator(start=b'session|' + df, stop=b'session|' + dt, reverse=True): d.append(k.decode('utf8').split('|')[1:]) d[-1][0] = str(datetime.datetime.fromtimestamp(float(d[-1][0]))) return d def getdata(self, name, datefrom=None, dateto=None, limit=1000): df = b"0" dt = str(time.mktime(datetime.datetime.now().timetuple())).encode("utf8") if datefrom: df = str(time.mktime(datefrom.timetuple())).encode("utf8") if dateto: dt = str(time.mktime(dateto.timetuple())).encode("utf8") d = [] l = 0 for k, v in self.db.iterator(start=name.encode('utf8') + b"-" + df, stop=name.encode('utf8') + b"-" + dt, reverse=True): dbtime = float(k.decode('utf8').split("-")[1]) # d.append([k.decode('utf8'), v.decode('utf8')]) data = json.loads(v.decode('utf8')) # d[-1][0]=float(d[-1][0].split("-")[1]) # d[-1][1]=json.loads(d[-1][1]) d.append(data) d[-1][0] = [dbtime] + d[-1][0] l += 1 if l >= limit: break return d class Locator(threading.Thread): def __init__(self, *a, **k): threading.Thread.__init__(self, args=a, kwargs=k) self.programs = dict() self.EStop = threading.Event() self.db = DatabaserLEVELDB() def stop(self, *p1, **p2): self.EStop.set() for k, v in self.programs.items(): v['con'].stop() self.db.close() print("STOP!") def run(self): logging.debug("Starting nameserver on {0}:{1}".format(getLocalIp(), str(PORT))) time.sleep(1) context = zmq.Context() socket = context.socket(zmq.REP) try: socket.bind("tcp://*:" + str(PORT)) except: self.EStop.set() logging.error("port busy") self.programs = {} counter = 0 poller = zmq.Poller() poller.register(socket, zmq.POLLIN) while not self.EStop.is_set(): data = {} logging.debug('tick') try: socks = dict(poller.poll(1000)) except: pass if socket in socks: try: data = socket.recv_json() except Exception as e: print(e) continue else: continue logging.debug('Recive') try: data['action'] except: data['action'] = 'ping' print(data) try: answer = None if data['action'] == 'register': try: data['location'] except: data['location'] = 'local' if data['name'] in self.programs.keys(): if self.programs[data['name']]['port'] != data['port']: self.programs[data['name']]['params'] = data['params'] self.programs[data['name']]['con'].stop() self.programs[data['name']]['con'] = ConnectorAsyncDB(data['ip'], data['async_port'], data['params'], self.db) else: print("MKCON") try: self.programs[data['name']] = dict(time=time.time(), params=data['params']) self.programs[data['name']]['con'] = ConnectorAsyncDB(data['ip'], data['async_port'], data['params'], self.db) except Exception as e: print(e, "AAAAAA") answer = dict(status='ok') if data['action'] == 'list': d = [] for k, v in self.programs.items(): d.append(dict(name=k, data=v['params'])) # TODO: Fix, couse` ThreadedConnector is not json answer = d if data['action'] == 'listsessions': df = None dt = None if 'df' in data.keys(): df = data['df'] if 'dt' in data.keys(): dt = data['dt'] answer = self.db.getSessions(datefrom=df, dateto=dt) if data['action'] == 'getdata': df = None dt = None limit = 10 if 'df' in data.keys(): df = data['df'] if 'dt' in data.keys(): dt = data['dt'] if 'limit' in data.keys(): limit = data['limit'] answer = self.db.getdata(name=data['name'], datefrom=df, dateto=dt, limit=limit) if data['action'] == 'get': if 'count' not in data.keys(): data['count'] = 1 if 'name' not in data.keys(): # get all data tmp = dict() print("asd", self.programs.items()) for k, v in self.programs.items(): tmp[k] = v['con'].data[-data['count']:] answer = dict(status='ok', data=tmp) else: d = self.programs[data['name']]['con'].data[-data['count']:] answer = dict(status='ok', data=d) if data['action'] == 'ping': answer = dict(status='ok') except Exception as e: answer = dict(status='fail', text=str(e)) if answer: socket.send_json(answer) else: socket.send_json(dict(status='fail', text='wrong action')) socket.close() context.term() time.sleep(0.01) logging.debug("closing") def serve(): l = Locator() signal.signal(signal.SIGINT, l.stop) signal.signal(signal.SIGTERM, l.stop) print('Serving at {0}'.format(PORT)) l.start() if __name__ == "__main__": serve()
lgpl-3.0
sachitanandpandey/sos_spandey
sos/plugins/qpid.py
5
2541
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from sos.plugins import Plugin, RedHatPlugin class Qpid(Plugin, RedHatPlugin): """Qpid messaging """ plugin_name = 'qpid' profiles = ('services',) packages = ('qpidd', 'qpid-cpp-server', 'qpid-tools') def setup(self): """ performs data collection for qpid broker """ self.add_cmd_output([ "qpid-stat -g", # applies since 0.18 version "qpid-stat -b", # applies to pre-0.18 versions "qpid-stat -c", "qpid-stat -e", "qpid-stat -q", "qpid-stat -u", "qpid-stat -m", # applies since 0.18 version "qpid-config exchanges" "qpid-config queues" "qpid-config exchanges -b", # applies to pre-0.18 versions "qpid-config queues -b", # applies to pre-0.18 versions "qpid-config exchanges -r", # applies since 0.18 version "qpid-config queues -r", # applies since 0.18 version "qpid-route link list", "qpid-route route list", "qpid-cluster", # applies to pre-0.22 versions "qpid-ha query", # applies since 0.22 version "ls -lanR /var/lib/qpidd" ]) self.add_copy_spec([ "/etc/qpidd.conf", # applies to pre-0.22 versions "/etc/qpid/qpidd.conf", # applies since 0.22 version "/var/lib/qpid/syslog", "/etc/ais/openais.conf", "/var/log/cumin.log", "/var/log/mint.log", "/etc/sasl2/qpidd.conf", "/etc/qpid/qpidc.conf", "/etc/sesame/sesame.conf", "/etc/cumin/cumin.conf", "/etc/corosync/corosync.conf", "/var/lib/sesame", "/var/log/qpidd.log", "/var/log/sesame", "/var/log/cumin" ]) # vim: et ts=4 sw=4
gpl-2.0
tchernomax/ansible
test/units/modules/network/nxos/test_nxos_evpn_global.py
53
2780
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_evpn_global from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosEvpnGlobalModule(TestNxosModule): module = nxos_evpn_global def setUp(self): super(TestNxosEvpnGlobalModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_evpn_global.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_evpn_global.load_config') self.load_config = self.mock_load_config.start() self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_evpn_global.get_capabilities') self.get_capabilities = self.mock_get_capabilities.start() self.get_capabilities.return_value = {'network_api': 'cliconf'} def tearDown(self): super(TestNxosEvpnGlobalModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_get_capabilities.stop() def load_fixtures(self, commands=None, device=''): self.load_config.return_value = None def start_configured(self, *args, **kwargs): self.get_config.return_value = load_fixture('nxos_evpn_global', 'configured.cfg') return self.execute_module(*args, **kwargs) def start_unconfigured(self, *args, **kwargs): self.get_config.return_value = load_fixture('nxos_evpn_global', 'unconfigured.cfg') return self.execute_module(*args, **kwargs) def test_nxos_evpn_global_enable(self): set_module_args(dict(nv_overlay_evpn=True)) commands = ['nv overlay evpn'] self.start_unconfigured(changed=True, commands=commands) def test_nxos_evpn_global_disable(self): set_module_args(dict(nv_overlay_evpn=False)) commands = ['no nv overlay evpn'] self.start_configured(changed=True, commands=commands)
gpl-3.0
Fokko/incubator-airflow
tests/contrib/sensors/test_jira_sensor_test.py
1
2859
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from unittest.mock import Mock, patch from airflow import DAG from airflow.contrib.sensors.jira_sensor import JiraTicketSensor from airflow.models import Connection from airflow.utils import db, timezone DEFAULT_DATE = timezone.datetime(2017, 1, 1) jira_client_mock = Mock( name="jira_client_for_test" ) minimal_test_ticket = { "id": "911539", "self": "https://sandbox.localhost/jira/rest/api/2/issue/911539", "key": "TEST-1226", "fields": { "labels": [ "test-label-1", "test-label-2" ], "description": "this is a test description", } } class TestJiraSensor(unittest.TestCase): def setUp(self): args = { 'owner': 'airflow', 'start_date': DEFAULT_DATE } dag = DAG('test_dag_id', default_args=args) self.dag = dag db.merge_conn( Connection( conn_id='jira_default', conn_type='jira', host='https://localhost/jira/', port=443, extra='{"verify": "False", "project": "AIRFLOW"}')) @patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True, return_value=jira_client_mock) def test_issue_label_set(self, jira_mock): jira_mock.return_value.issue.return_value = minimal_test_ticket ticket_label_sensor = JiraTicketSensor( task_id='search-ticket-test', ticket_id='TEST-1226', field_checker_func=TestJiraSensor.field_checker_func, timeout=518400, poke_interval=10, dag=self.dag) ticket_label_sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) self.assertTrue(jira_mock.called) self.assertTrue(jira_mock.return_value.issue.called) @staticmethod def field_checker_func(context, issue): # pylint: disable=unused-argument return "test-label-1" in issue['fields']['labels'] if __name__ == '__main__': unittest.main()
apache-2.0
Azure/azure-documentdb-python
test/session_token_unit_tests.py
1
3972
import unittest import uuid import pytest import azure.cosmos.documents as documents from azure.cosmos.vector_session_token import VectorSessionToken from azure.cosmos.errors import CosmosError @pytest.mark.usefixtures("teardown") class SessionTokenUnitTest(unittest.TestCase): """Test to ensure escaping of non-ascii characters from partition key""" def test_validate_successful_session_token_parsing(self): #valid session token session_token = "1#100#1=20#2=5#3=30" self.assertEquals(VectorSessionToken.create(session_token).convert_to_string(), "1#100#1=20#2=5#3=30") def test_validate_session_token_parsing_with_invalid_version(self): session_token = "foo#100#1=20#2=5#3=30" self.assertIsNone(VectorSessionToken.create(session_token)) def test_validate_session_token_parsing_with_invalid_global_lsn(self): session_token = "1#foo#1=20#2=5#3=30" self.assertIsNone(VectorSessionToken.create(session_token)) def test_validate_session_token_parsing_with_invalid_region_progress(self): session_token = "1#100#1=20#2=x#3=30" self.assertIsNone(VectorSessionToken.create(session_token)) def test_validate_session_token_parsing_with_invalid_format(self): session_token = "1;100#1=20#2=40" self.assertIsNone(VectorSessionToken.create(session_token)) def test_validate_session_token_parsing_from_empty_string(self): session_token = "" self.assertIsNone(VectorSessionToken.create(session_token)) def test_validate_session_token_comparison(self): #valid session token session_token1 = VectorSessionToken.create("1#100#1=20#2=5#3=30") session_token2 = VectorSessionToken.create("2#105#4=10#2=5#3=30") self.assertIsNotNone(session_token1) self.assertIsNotNone(session_token2) self.assertFalse(session_token1.equals(session_token2)) self.assertFalse(session_token2.equals(session_token1)) session_token_merged = VectorSessionToken.create("2#105#2=5#3=30#4=10") self.assertIsNotNone(session_token_merged) self.assertTrue(session_token1.merge(session_token2).equals(session_token_merged)) session_token1 = VectorSessionToken.create("1#100#1=20#2=5#3=30") session_token2 = VectorSessionToken.create("1#100#1=10#2=8#3=30") self.assertIsNotNone(session_token1) self.assertIsNotNone(session_token2) self.assertFalse(session_token1.equals(session_token2)) self.assertFalse(session_token2.equals(session_token1)) session_token_merged = VectorSessionToken.create("1#100#1=20#2=8#3=30") self.assertIsNotNone(session_token_merged) self.assertTrue(session_token_merged.equals(session_token1.merge(session_token2))) session_token1 = VectorSessionToken.create("1#100#1=20#2=5#3=30") session_token2 = VectorSessionToken.create("1#102#1=100#2=8#3=30") self.assertIsNotNone(session_token1) self.assertIsNotNone(session_token1) self.assertFalse(session_token1.equals(session_token2)) self.assertFalse(session_token2.equals(session_token1)) session_token_merged = VectorSessionToken.create("1#102#2=8#3=30#1=100") self.assertIsNotNone(session_token_merged) self.assertTrue(session_token_merged.equals(session_token1.merge(session_token2))) session_token1 = VectorSessionToken.create("1#101#1=20#2=5#3=30") session_token2 = VectorSessionToken.create("1#100#1=20#2=5#3=30#4=40") self.assertIsNotNone(session_token1) self.assertIsNotNone(session_token2) try: session_token1.merge(session_token2) self.fail("Region progress can not be different when version is same") except CosmosError as e: self.assertEquals(str(e), "Status Code: 500. Compared session tokens '1#101#1=20#2=5#3=30' and '1#100#1=20#2=5#3=30#4=40' have unexpected regions.")
mit
clovertrail/cloudinit-bis
cloudinit/util.py
1
73447
# vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser <scott.moser@canonical.com> # Author: Juerg Haefliger <juerg.haefliger@hp.com> # Author: Joshua Harlow <harlowja@yahoo-inc.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import contextlib import copy as obj_copy import ctypes import email import errno import glob import grp import gzip import hashlib import json import os import os.path import platform import pwd import random import re import shutil import socket import stat import string import subprocess import sys import tempfile import time from base64 import b64decode, b64encode from six.moves.urllib import parse as urlparse import six import yaml from cloudinit import importer from cloudinit import log as logging from cloudinit import mergers from cloudinit import safeyaml from cloudinit import type_utils from cloudinit import url_helper from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) try: string_types = (basestring,) except NameError: string_types = (str,) _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) # Helps cleanup filenames to ensure they aren't FS incompatible FN_REPLACEMENTS = { os.sep: '_', } FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) TRUE_STRINGS = ('true', '1', 'on', 'yes') FALSE_STRINGS = ('off', '0', 'no', 'false') # Helper utils to see if running in a container CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], ['running-in-container'], ['lxc-is-container']) PROC_CMDLINE = None _LSB_RELEASE = {} def get_architecture(target=None): out, _ = subp(['dpkg', '--print-architecture'], capture=True, target=target) return out.strip() def _lsb_release(target=None): fmap = {'Codename': 'codename', 'Description': 'description', 'Distributor ID': 'id', 'Release': 'release'} data = {} try: out, _ = subp(['lsb_release', '--all'], capture=True, target=target) for line in out.splitlines(): fname, _, val = line.partition(":") if fname in fmap: data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): LOG.warn("Missing fields in lsb_release --all output: %s", ','.join(missing)) except ProcessExecutionError as err: LOG.warn("Unable to get lsb_release --all: %s", err) data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data def lsb_release(target=None): if target_path(target) != "/": # do not use or update cache if target is provided return _lsb_release(target) global _LSB_RELEASE if not _LSB_RELEASE: data = _lsb_release() _LSB_RELEASE.update(data) return _LSB_RELEASE def target_path(target, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" elif not isinstance(target, string_types): raise ValueError("Unexpected input for target: %s" % target) else: target = os.path.abspath(target) # abspath("//") returns "//" specifically for 2 slashes. if target.startswith("//"): target = target[1:] if not path: return target # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /. while len(path) and path[0] == "/": path = path[1:] return os.path.join(target, path) def decode_binary(blob, encoding='utf-8'): # Converts a binary type into a text type using given encoding. if isinstance(blob, six.string_types): return blob return blob.decode(encoding) def encode_text(text, encoding='utf-8'): # Converts a text string into a binary type using given encoding. if isinstance(text, six.binary_type): return text return text.encode(encoding) def b64d(source): # Base64 decode some data, accepting bytes or unicode/str, and returning # str/unicode if the result is utf-8 compatible, otherwise returning bytes. decoded = b64decode(source) try: return decoded.decode('utf-8') except UnicodeDecodeError: return decoded def b64e(source): # Base64 encode some data, accepting bytes or unicode/str, and returning # str/unicode if the result is utf-8 compatible, otherwise returning bytes. if not isinstance(source, bytes): source = source.encode('utf-8') return b64encode(source).decode('utf-8') def fully_decoded_payload(part): # In Python 3, decoding the payload will ironically hand us a bytes object. # 'decode' means to decode according to Content-Transfer-Encoding, not # according to any charset in the Content-Type. So, if we end up with # bytes, first try to decode to str via CT charset, and failing that, try # utf-8 using surrogate escapes. cte_payload = part.get_payload(decode=True) if (six.PY3 and part.get_content_maintype() == 'text' and isinstance(cte_payload, bytes)): charset = part.get_charset() if charset and charset.input_codec: encoding = charset.input_codec else: encoding = 'utf-8' return cte_payload.decode(encoding, 'surrogateescape') return cte_payload # Path for DMI Data DMI_SYS_PATH = "/sys/class/dmi/id" # dmidecode and /sys/class/dmi/id/* use different names for the same value, # this allows us to refer to them by one canonical name DMIDECODE_TO_DMI_SYS_MAPPING = { 'baseboard-asset-tag': 'board_asset_tag', 'baseboard-manufacturer': 'board_vendor', 'baseboard-product-name': 'board_name', 'baseboard-serial-number': 'board_serial', 'baseboard-version': 'board_version', 'bios-release-date': 'bios_date', 'bios-vendor': 'bios_vendor', 'bios-version': 'bios_version', 'chassis-asset-tag': 'chassis_asset_tag', 'chassis-manufacturer': 'chassis_vendor', 'chassis-serial-number': 'chassis_serial', 'chassis-version': 'chassis_version', 'system-manufacturer': 'sys_vendor', 'system-product-name': 'product_name', 'system-serial-number': 'product_serial', 'system-uuid': 'product_uuid', 'system-version': 'product_version', } class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Reason: %(reason)s\n' 'Stdout: %(stdout)s\n' 'Stderr: %(stderr)s') empty_attr = '-' def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None, reason=None, errno=None): if not cmd: self.cmd = self.empty_attr else: self.cmd = cmd if not description: self.description = 'Unexpected error while running command.' else: self.description = description if not isinstance(exit_code, six.integer_types): self.exit_code = self.empty_attr else: self.exit_code = exit_code if not stderr: self.stderr = self.empty_attr else: self.stderr = self._indent_text(stderr) if not stdout: self.stdout = self.empty_attr else: self.stdout = self._indent_text(stdout) if reason: self.reason = reason else: self.reason = self.empty_attr self.errno = errno message = self.MESSAGE_TMPL % { 'description': self._ensure_string(self.description), 'cmd': self._ensure_string(self.cmd), 'exit_code': self._ensure_string(self.exit_code), 'stdout': self._ensure_string(self.stdout), 'stderr': self._ensure_string(self.stderr), 'reason': self._ensure_string(self.reason), } IOError.__init__(self, message) def _ensure_string(self, text): """ if data is bytes object, decode """ return text.decode() if isinstance(text, six.binary_type) else text def _indent_text(self, text, indent_level=8): """ indent text on all but the first line, allowing for easy to read output """ cr = '\n' indent = ' ' * indent_level # if input is bytes, return bytes if isinstance(text, six.binary_type): cr = cr.encode() indent = indent.encode() # remove any newlines at end of text first to prevent unneeded blank # line in output return text.rstrip(cr).replace(cr, cr + indent) class SeLinuxGuard(object): def __init__(self, path, recursive=False): # Late import since it might not always # be possible to use this try: self.selinux = importer.import_module('selinux') except ImportError: self.selinux = None self.path = path self.recursive = recursive def __enter__(self): if self.selinux and self.selinux.is_selinux_enabled(): return True else: return False def __exit__(self, excp_type, excp_value, excp_traceback): if not self.selinux or not self.selinux.is_selinux_enabled(): return if not os.path.lexists(self.path): return path = os.path.realpath(self.path) # path should be a string, not unicode if six.PY2: path = str(path) try: stats = os.lstat(path) self.selinux.matchpathcon(path, stats[stat.ST_MODE]) except OSError: return LOG.debug("Restoring selinux mode for %s (recursive=%s)", path, self.recursive) self.selinux.restorecon(path, recursive=self.recursive) class MountFailedError(Exception): pass class DecompressionError(Exception): pass def ExtendedTemporaryFile(**kwargs): fh = tempfile.NamedTemporaryFile(**kwargs) # Replace its unlink with a quiet version # that does not raise errors when the # file to unlink has been unlinked elsewhere.. LOG.debug("Created temporary file %s", fh.name) fh.unlink = del_file # Add a new method that will unlink # right 'now' but still lets the exit # method attempt to remove it (which will # not throw due to our del file being quiet # about files that are not there) def unlink_now(): fh.unlink(fh.name) setattr(fh, 'unlink_now', unlink_now) return fh def fork_cb(child_cb, *args, **kwargs): fid = os.fork() if fid == 0: try: child_cb(*args, **kwargs) os._exit(0) except Exception: logexc(LOG, "Failed forking and calling callback %s", type_utils.obj_name(child_cb)) os._exit(1) else: LOG.debug("Forked child %s who will run callback %s", fid, type_utils.obj_name(child_cb)) def is_true(val, addons=None): if isinstance(val, (bool)): return val is True check_set = TRUE_STRINGS if addons: check_set = list(check_set) + addons if six.text_type(val).lower().strip() in check_set: return True return False def is_false(val, addons=None): if isinstance(val, (bool)): return val is False check_set = FALSE_STRINGS if addons: check_set = list(check_set) + addons if six.text_type(val).lower().strip() in check_set: return True return False def translate_bool(val, addons=None): if not val: # This handles empty lists and false and # other things that python believes are false return False # If its already a boolean skip if isinstance(val, (bool)): return val return is_true(val, addons) def rand_str(strlen=32, select_from=None): if not select_from: select_from = string.ascii_letters + string.digits return "".join([random.choice(select_from) for _x in range(0, strlen)]) def rand_dict_key(dictionary, postfix=None): if not postfix: postfix = "" while True: newkey = rand_str(strlen=8) + "_" + postfix if newkey not in dictionary: break return newkey def read_conf(fname): try: return load_yaml(load_file(fname), default={}) except IOError as e: if e.errno == errno.ENOENT: return {} else: raise # Merges X lists, and then keeps the # unique ones, but orders by sort order # instead of by the original order def uniq_merge_sorted(*lists): return sorted(uniq_merge(*lists)) # Merges X lists and then iterates over those # and only keeps the unique items (order preserving) # and returns that merged and uniqued list as the # final result. # # Note: if any entry is a string it will be # split on commas and empty entries will be # evicted and merged in accordingly. def uniq_merge(*lists): combined_list = [] for a_list in lists: if isinstance(a_list, six.string_types): a_list = a_list.strip().split(",") # Kickout the empty ones a_list = [a for a in a_list if len(a)] combined_list.extend(a_list) return uniq_list(combined_list) def clean_filename(fn): for (k, v) in FN_REPLACEMENTS.items(): fn = fn.replace(k, v) removals = [] for k in fn: if k not in FN_ALLOWED: removals.append(k) for k in removals: fn = fn.replace(k, '') fn = fn.strip() return fn def decomp_gzip(data, quiet=True, decode=True): try: buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: if decode: return decode_binary(gh.read()) else: return gh.read() except Exception as e: if quiet: return data else: raise DecompressionError(six.text_type(e)) def extract_usergroup(ug_pair): if not ug_pair: return (None, None) ug_parted = ug_pair.split(':', 1) u = ug_parted[0].strip() if len(ug_parted) == 2: g = ug_parted[1].strip() else: g = None if not u or u == "-1" or u.lower() == "none": u = None if not g or g == "-1" or g.lower() == "none": g = None return (u, g) def find_modules(root_dir): entries = dict() for fname in glob.glob(os.path.join(root_dir, "*.py")): if not os.path.isfile(fname): continue modname = os.path.basename(fname)[0:-3] modname = modname.strip() if modname and modname.find(".") == -1: entries[fname] = modname return entries def multi_log(text, console=True, stderr=True, log=None, log_level=logging.DEBUG): if stderr: sys.stderr.write(text) if console: conpath = "/dev/console" if os.path.exists(conpath): with open(conpath, 'w') as wfh: wfh.write(text) wfh.flush() else: # A container may lack /dev/console (arguably a container bug). If # it does not exist, then write output to stdout. this will result # in duplicate stderr and stdout messages if stderr was True. # # even though upstart or systemd might have set up output to go to # /dev/console, the user may have configured elsewhere via # cloud-config 'output'. If there is /dev/console, messages will # still get there. sys.stdout.write(text) if log: if text[-1] == "\n": log.log(log_level, text[:-1]) else: log.log(log_level, text) def load_json(text, root_types=(dict,)): decoded = json.loads(decode_binary(text)) if not isinstance(decoded, tuple(root_types)): expected_types = ", ".join([str(t) for t in root_types]) raise TypeError("(%s) root types expected, got %s instead" % (expected_types, type(decoded))) return decoded def is_ipv4(instr): """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') if len(toks) != 4: return False try: toks = [x for x in toks if int(x) < 256 and int(x) >= 0] except Exception: return False return len(toks) == 4 def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default return translate_bool(yobj[key]) def get_cfg_option_str(yobj, key, default=None): if key not in yobj: return default val = yobj[key] if not isinstance(val, six.string_types): val = str(val) return val def get_cfg_option_int(yobj, key, default=0): return int(get_cfg_option_str(yobj, key, default=default)) def system_info(): return { 'platform': platform.platform(), 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), 'dist': platform.linux_distribution(), } def get_cfg_option_list(yobj, key, default=None): """ Gets the C{key} config option from C{yobj} as a list of strings. If the key is present as a single string it will be returned as a list with one string arg. @param yobj: The configuration object. @param key: The configuration key to get. @param default: The default to return if key is not found. @return: The configuration option as a list of strings or default if key is not found. """ if key not in yobj: return default if yobj[key] is None: return [] val = yobj[key] if isinstance(val, (list)): cval = [v for v in val] return cval if not isinstance(val, six.string_types): val = str(val) return [val] # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): cur = yobj for tok in keyp: if tok not in cur: return default cur = cur[tok] return cur def fixup_output(cfg, mode): (outfmt, errfmt) = get_output_cfg(cfg, mode) redirect_output(outfmt, errfmt) return (outfmt, errfmt) # redirect_output(outfmt, errfmt, orig_out, orig_err) # replace orig_out and orig_err with filehandles specified in outfmt or errfmt # fmt can be: # > FILEPATH # >> FILEPATH # | program [ arg1 [ arg2 [ ... ] ] ] # # with a '|', arguments are passed to shell, so one level of # shell escape is required. # # if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true # value then output input will not be closed (useful for debugging). # def redirect_output(outfmt, errfmt, o_out=None, o_err=None): if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")): LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT") return if not o_out: o_out = sys.stdout if not o_err: o_err = sys.stderr if outfmt: LOG.debug("Redirecting %s to %s", o_out, outfmt) (mode, arg) = outfmt.split(" ", 1) if mode == ">" or mode == ">>": owith = "ab" if mode == ">": owith = "wb" new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) new_fp = proc.stdin else: raise TypeError("Invalid type for output format: %s" % outfmt) if o_out: os.dup2(new_fp.fileno(), o_out.fileno()) if errfmt == outfmt: LOG.debug("Redirecting %s to %s", o_err, outfmt) os.dup2(new_fp.fileno(), o_err.fileno()) return if errfmt: LOG.debug("Redirecting %s to %s", o_err, errfmt) (mode, arg) = errfmt.split(" ", 1) if mode == ">" or mode == ">>": owith = "ab" if mode == ">": owith = "wb" new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) new_fp = proc.stdin else: raise TypeError("Invalid type for error format: %s" % errfmt) if o_err: os.dup2(new_fp.fileno(), o_err.fileno()) def make_url(scheme, host, port=None, path='', params='', query='', fragment=''): pieces = [] pieces.append(scheme or '') netloc = '' if host: netloc = str(host) if port is not None: netloc += ":" + "%s" % (port) pieces.append(netloc or '') pieces.append(path or '') pieces.append(params or '') pieces.append(query or '') pieces.append(fragment or '') return urlparse.urlunparse(pieces) def mergemanydict(srcs, reverse=False): if reverse: srcs = reversed(srcs) merged_cfg = {} for cfg in srcs: if cfg: # Figure out which mergers to apply... mergers_to_apply = mergers.dict_extract_mergers(cfg) if not mergers_to_apply: mergers_to_apply = mergers.default_mergers() merger = mergers.construct(mergers_to_apply) merged_cfg = merger.merge(merged_cfg, cfg) return merged_cfg @contextlib.contextmanager def chdir(ndir): curr = os.getcwd() try: os.chdir(ndir) yield ndir finally: os.chdir(curr) @contextlib.contextmanager def umask(n_msk): old = os.umask(n_msk) try: yield old finally: os.umask(old) @contextlib.contextmanager def tempdir(**kwargs): # This seems like it was only added in python 3.2 # Make it since its useful... # See: http://bugs.python.org/file12970/tempdir.patch tdir = tempfile.mkdtemp(**kwargs) try: yield tdir finally: del_dir(tdir) def center(text, fill, max_len): return '{0:{fill}{align}{size}}'.format(text, fill=fill, align="^", size=max_len) def del_dir(path): LOG.debug("Recursively deleting %s", path) shutil.rmtree(path) def runparts(dirp, skip_no_exist=True, exe_prefix=None): if skip_no_exist and not os.path.isdir(dirp): return failed = [] attempted = [] if exe_prefix is None: prefix = [] elif isinstance(exe_prefix, str): prefix = [str(exe_prefix)] elif isinstance(exe_prefix, list): prefix = exe_prefix else: raise TypeError("exe_prefix must be None, str, or list") for exe_name in sorted(os.listdir(dirp)): exe_path = os.path.join(dirp, exe_name) if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): attempted.append(exe_path) try: subp(prefix + [exe_path], capture=False) except ProcessExecutionError as e: logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code) failed.append(e) if failed and attempted: raise RuntimeError('Runparts: %s failures in %s attempted commands' % (len(failed), len(attempted))) # read_optional_seed # returns boolean indicating success or failure (presense of files) # if files are present, populates 'fill' dictionary with 'user-data' and # 'meta-data' entries def read_optional_seed(fill, base="", ext="", timeout=5): try: (md, ud) = read_seeded(base, ext, timeout) fill['user-data'] = ud fill['meta-data'] = md return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: return False raise def fetch_ssl_details(paths=None): ssl_details = {} # Lookup in these locations for ssl key/cert files ssl_cert_paths = [ '/var/lib/cloud/data/ssl', '/var/lib/cloud/instance/data/ssl', ] if paths: ssl_cert_paths.extend([ os.path.join(paths.get_ipath_cur('data'), 'ssl'), os.path.join(paths.get_cpath('data'), 'ssl'), ]) ssl_cert_paths = uniq_merge(ssl_cert_paths) ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)] cert_file = None for d in ssl_cert_paths: if os.path.isfile(os.path.join(d, 'cert.pem')): cert_file = os.path.join(d, 'cert.pem') break key_file = None for d in ssl_cert_paths: if os.path.isfile(os.path.join(d, 'key.pem')): key_file = os.path.join(d, 'key.pem') break if cert_file and key_file: ssl_details['cert_file'] = cert_file ssl_details['key_file'] = key_file elif cert_file: ssl_details['cert_file'] = cert_file return ssl_details def read_file_or_url(url, timeout=5, retries=10, headers=None, data=None, sec_between=1, ssl_details=None, headers_cb=None, exception_cb=None): url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): if data: LOG.warn("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: contents = load_file(file_path, decode=False) except IOError as e: code = e.errno if e.errno == errno.ENOENT: code = url_helper.NOT_FOUND raise url_helper.UrlError(cause=e, code=code, headers=None, url=url) return url_helper.FileResponse(file_path, contents=contents) else: return url_helper.readurl(url, timeout=timeout, retries=retries, headers=headers, headers_cb=headers_cb, data=data, sec_between=sec_between, ssl_details=ssl_details, exception_cb=exception_cb) def load_yaml(blob, default=None, allowed=(dict,)): loaded = default blob = decode_binary(blob) try: LOG.debug("Attempting to load yaml from string " "of length %s with allowed root types %s", len(blob), allowed) converted = safeyaml.load(blob) if not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted except (yaml.YAMLError, TypeError, ValueError): if len(blob) == 0: LOG.debug("load_yaml given empty string, returning default") else: logexc(LOG, "Failed loading yaml blob") return loaded def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): if base.startswith("/"): base = "file://%s" % base # default retries for file is 0. for network is 10 if base.startswith("file://"): retries = file_retries if base.find("%s") >= 0: ud_url = base % ("user-data" + ext) md_url = base % ("meta-data" + ext) else: ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None if ud_resp.ok(): ud = ud_resp.contents return (md, ud) def read_conf_d(confd): # Get reverse sorted list (later trumps newer) confs = sorted(os.listdir(confd), reverse=True) # Remove anything not ending in '.cfg' confs = [f for f in confs if f.endswith(".cfg")] # Remove anything not a file confs = [f for f in confs if os.path.isfile(os.path.join(confd, f))] # Load them all so that they can be merged cfgs = [] for fn in confs: cfgs.append(read_conf(os.path.join(confd, fn))) return mergemanydict(cfgs) def read_conf_with_confd(cfgfile): cfg = read_conf(cfgfile) confd = False if "conf_d" in cfg: confd = cfg['conf_d'] if confd: if not isinstance(confd, six.string_types): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): confd = "%s.d" % cfgfile if not confd or not os.path.isdir(confd): return cfg # Conf.d settings override input configuration confd_cfg = read_conf_d(confd) return mergemanydict([confd_cfg, cfg]) def read_cc_from_cmdline(cmdline=None): # this should support reading cloud-config information from # the kernel command line. It is intended to support content of the # format: # cc: <yaml content here> [end_cc] # this would include: # cc: ssh_import_id: [smoser, kirkland]\\n # cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc # cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc if cmdline is None: cmdline = get_cmdline() tag_begin = "cc:" tag_end = "end_cc" begin_l = len(tag_begin) end_l = len(tag_end) clen = len(cmdline) tokens = [] begin = cmdline.find(tag_begin) while begin >= 0: end = cmdline.find(tag_end, begin + begin_l) if end < 0: end = clen tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n", "\n")) begin = cmdline.find(tag_begin, end + end_l) return '\n'.join(tokens) def dos2unix(contents): # find first end of line pos = contents.find('\n') if pos <= 0 or contents[pos - 1] != '\r': return contents return contents.replace('\r\n', '\n') def get_hostname_fqdn(cfg, cloud): # return the hostname and fqdn from 'cfg'. If not found in cfg, # then fall back to data from cloud if "fqdn" in cfg: # user specified a fqdn. Default hostname then is based off that fqdn = cfg['fqdn'] hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0]) else: if "hostname" in cfg and cfg['hostname'].find('.') > 0: # user specified hostname, and it had '.' in it # be nice to them. set fqdn and hostname from that fqdn = cfg['hostname'] hostname = cfg['hostname'][:fqdn.find('.')] else: # no fqdn set, get fqdn from cloud. # get hostname from cfg if available otherwise cloud fqdn = cloud.get_hostname(fqdn=True) if "hostname" in cfg: hostname = cfg['hostname'] else: hostname = cloud.get_hostname() return (hostname, fqdn) def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): """ For each host a single line should be present with the following information: IP_address canonical_hostname [aliases...] Fields of the entry are separated by any number of blanks and/or tab characters. Text from a "#" character until the end of the line is a comment, and is ignored. Host names may contain only alphanumeric characters, minus signs ("-"), and periods ("."). They must begin with an alphabetic character and end with an alphanumeric character. Optional aliases provide for name changes, alternate spellings, shorter hostnames, or generic hostnames (for example, localhost). """ fqdn = None try: for line in load_file(filename).splitlines(): hashpos = line.find("#") if hashpos >= 0: line = line[0:hashpos] line = line.strip() if not line: continue # If there there is less than 3 entries # (IP_address, canonical_hostname, alias) # then ignore this line toks = line.split() if len(toks) < 3: continue if hostname in toks[2:]: fqdn = toks[1] break except IOError: pass return fqdn def get_cmdline_url(names=('cloud-config-url', 'url'), starts=b"#cloud-config", cmdline=None): if cmdline is None: cmdline = get_cmdline() data = keyval_str_to_dict(cmdline) url = None key = None for key in names: if key in data: url = data[key] break if not url: return (None, None, None) resp = read_file_or_url(url) # allow callers to pass starts as text when comparing to bytes contents starts = encode_text(starts) if resp.ok() and resp.contents.startswith(starts): return (key, url, resp.contents) return (key, url, None) def is_resolvable(name): """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. Note, that normal nsswitch resolution is used here. So in order to avoid any utilization of 'search' entries in /etc/resolv.conf we have to append '.'. The top level 'invalid' domain is invalid per RFC. And example.com should also not exist. The random entry will be resolved inside the search list. """ global _DNS_REDIRECT_IP if _DNS_REDIRECT_IP is None: badips = set() badnames = ("does-not-exist.example.com.", "example.invalid.", rand_str()) badresults = {} for iname in badnames: try: result = socket.getaddrinfo(iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME) badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) except (socket.gaierror, socket.error): pass _DNS_REDIRECT_IP = badips if badresults: LOG.debug("detected dns redirection: %s", badresults) try: result = socket.getaddrinfo(name, None) # check first result's sockaddr field addr = result[0][4][0] if addr in _DNS_REDIRECT_IP: return False return True except (socket.gaierror, socket.error): return False def get_hostname(): hostname = socket.gethostname() return hostname def gethostbyaddr(ip): try: return socket.gethostbyaddr(ip)[0] except socket.herror: return None def is_resolvable_url(url): """determine if this url is resolvable (existing or ip).""" return is_resolvable(urlparse.urlparse(url).hostname) def search_for_mirror(candidates): """ Search through a list of mirror urls for one that works This needs to return quickly. """ for cand in candidates: try: if is_resolvable_url(cand): return cand except Exception: pass return None def close_stdin(): """ reopen stdin as /dev/null so even subprocesses or other os level things get /dev/null as input. if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true value then input will not be closed (useful for debugging). """ if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")): return with open(os.devnull) as fp: os.dup2(fp.fileno(), sys.stdin.fileno()) def find_devs_with(criteria=None, oformat='device', tag=None, no_cache=False, path=None): """ find devices matching given criteria (via blkid) criteria can be *one* of: TYPE=<filesystem> LABEL=<label> UUID=<uuid> """ blk_id_cmd = ['blkid'] options = [] if criteria: # Search for block devices with tokens named NAME that # have the value 'value' and display any devices which are found. # Common values for NAME include TYPE, LABEL, and UUID. # If there are no devices specified on the command line, # all block devices will be searched; otherwise, # only search the devices specified by the user. options.append("-t%s" % (criteria)) if tag: # For each (specified) device, show only the tags that match tag. options.append("-s%s" % (tag)) if no_cache: # If you want to start with a clean cache # (i.e. don't report devices previously scanned # but not necessarily available at this time), specify /dev/null. options.extend(["-c", "/dev/null"]) if oformat: # Display blkid's output using the specified format. # The format parameter may be: # full, value, list, device, udev, export options.append('-o%s' % (oformat)) if path: options.append(path) cmd = blk_id_cmd + options # See man blkid for why 2 is added try: (out, _err) = subp(cmd, rcs=[0, 2]) except ProcessExecutionError as e: if e.errno == errno.ENOENT: # blkid not found... out = "" else: raise entries = [] for line in out.splitlines(): line = line.strip() if line: entries.append(line) return entries def peek_file(fname, max_bytes): LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) with open(fname, 'rb') as ifh: return ifh.read(max_bytes) def uniq_list(in_list): out_list = [] for i in in_list: if i in out_list: continue else: out_list.append(i) return out_list def load_file(fname, read_cb=None, quiet=False, decode=True): LOG.debug("Reading from %s (quiet=%s)", fname, quiet) ofh = six.BytesIO() try: with open(fname, 'rb') as ifh: pipe_in_out(ifh, ofh, chunk_cb=read_cb) except IOError as e: if not quiet: raise if e.errno != errno.ENOENT: raise contents = ofh.getvalue() LOG.debug("Read %s bytes from %s", len(contents), fname) if decode: return decode_binary(contents) else: return contents def get_cmdline(): if 'DEBUG_PROC_CMDLINE' in os.environ: return os.environ["DEBUG_PROC_CMDLINE"] global PROC_CMDLINE if PROC_CMDLINE is not None: return PROC_CMDLINE if is_container(): try: contents = load_file("/proc/1/cmdline") # replace nulls with space and drop trailing null cmdline = contents.replace("\x00", " ")[:-1] except Exception as e: LOG.warn("failed reading /proc/1/cmdline: %s", e) cmdline = "" else: try: cmdline = load_file("/proc/cmdline").strip() except Exception: cmdline = "" PROC_CMDLINE = cmdline return cmdline def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None): bytes_piped = 0 while True: data = in_fh.read(chunk_size) if len(data) == 0: break else: out_fh.write(data) bytes_piped += len(data) if chunk_cb: chunk_cb(bytes_piped) out_fh.flush() return bytes_piped def chownbyid(fname, uid=None, gid=None): if uid in [None, -1] and gid in [None, -1]: # Nothing to do return LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid) os.chown(fname, uid, gid) def chownbyname(fname, user=None, group=None): uid = -1 gid = -1 try: if user: uid = pwd.getpwnam(user).pw_uid if group: gid = grp.getgrnam(group).gr_gid except KeyError as e: raise OSError("Unknown user or group: %s" % (e)) chownbyid(fname, uid, gid) # Always returns well formated values # cfg is expected to have an entry 'output' in it, which is a dictionary # that includes entries for 'init', 'config', 'final' or 'all' # init: /var/log/cloud.out # config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ] # final: # output: "| logger -p" # error: "> /dev/null" # this returns the specific 'mode' entry, cleanly formatted, with value def get_output_cfg(cfg, mode): ret = [None, None] if not cfg or 'output' not in cfg: return ret outcfg = cfg['output'] if mode in outcfg: modecfg = outcfg[mode] else: if 'all' not in outcfg: return ret # if there is a 'all' item in the output list # then it applies to all users of this (init, config, final) modecfg = outcfg['all'] # if value is a string, it specifies stdout and stderr if isinstance(modecfg, str): ret = [modecfg, modecfg] # if its a list, then we expect (stdout, stderr) if isinstance(modecfg, list): if len(modecfg) > 0: ret[0] = modecfg[0] if len(modecfg) > 1: ret[1] = modecfg[1] # if it is a dictionary, expect 'out' and 'error' # items, which indicate out and error if isinstance(modecfg, dict): if 'output' in modecfg: ret[0] = modecfg['output'] if 'error' in modecfg: ret[1] = modecfg['error'] # if err's entry == "&1", then make it same as stdout # as in shell syntax of "echo foo >/dev/null 2>&1" if ret[1] == "&1": ret[1] = ret[0] swlist = [">>", ">", "|"] for i in range(len(ret)): if not ret[i]: continue val = ret[i].lstrip() found = False for s in swlist: if val.startswith(s): val = "%s %s" % (s, val[len(s):].strip()) found = True break if not found: # default behavior is append val = "%s %s" % (">>", val.strip()) ret[i] = val return ret def logexc(log, msg, *args): # Setting this here allows this to change # levels easily (not always error level) # or even desirable to have that much junk # coming out to a non-debug stream if msg: log.warn(msg, *args) # Debug gets the full trace. However, nose has a bug whereby its # logcapture plugin doesn't properly handle the case where there is no # actual exception. To avoid tracebacks during the test suite then, we'll # do the actual exc_info extraction here, and if there is no exception in # flight, we'll just pass in None. exc_info = sys.exc_info() if exc_info == (None, None, None): exc_info = None log.debug(msg, exc_info=exc_info, *args) def hash_blob(blob, routine, mlen=None): hasher = hashlib.new(routine) hasher.update(encode_text(blob)) digest = hasher.hexdigest() # Don't get to long now if mlen is not None: return digest[0:mlen] else: return digest def is_user(name): try: if pwd.getpwnam(name): return True except KeyError: return False def is_group(name): try: if grp.getgrnam(name): return True except KeyError: return False def rename(src, dest): LOG.debug("Renaming %s to %s", src, dest) # TODO(harlowja) use a se guard here?? os.rename(src, dest) def ensure_dirs(dirlist, mode=0o755): for d in dirlist: ensure_dir(d, mode) def read_write_cmdline_url(target_fn): if not os.path.exists(target_fn): try: (key, url, content) = get_cmdline_url() except Exception: logexc(LOG, "Failed fetching command line url") return try: if key and content: write_file(target_fn, content, mode=0o600) LOG.debug(("Wrote to %s with contents of command line" " url %s (len=%s)"), target_fn, url, len(content)) elif key and not content: LOG.debug(("Command line key %s with url" " %s had no contents"), key, url) except Exception: logexc(LOG, "Failed writing url content to %s", target_fn) def yaml_dumps(obj, explicit_start=True, explicit_end=True): return yaml.safe_dump(obj, line_break="\n", indent=4, explicit_start=explicit_start, explicit_end=explicit_end, default_flow_style=False) def ensure_dir(path, mode=None): if not os.path.isdir(path): # Make the dir and adjust the mode with SeLinuxGuard(os.path.dirname(path), recursive=True): os.makedirs(path) chmod(path, mode) else: # Just adjust the mode chmod(path, mode) @contextlib.contextmanager def unmounter(umount): try: yield umount finally: if umount: umount_cmd = ["umount", umount] subp(umount_cmd) def mounts(): mounted = {} try: # Go through mounts to see what is already mounted if os.path.exists("/proc/mounts"): mount_locs = load_file("/proc/mounts").splitlines() method = 'proc' else: (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() method = 'mount' mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$' for mpline in mount_locs: # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered) # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates) try: if method == 'proc': (dev, mp, fstype, opts, _freq, _passno) = mpline.split() else: m = re.search(mountre, mpline) dev = m.group(1) mp = m.group(2) fstype = m.group(3) opts = m.group(4) except Exception: continue # If the name of the mount point contains spaces these # can be escaped as '\040', so undo that.. mp = mp.replace("\\040", " ") mounted[dev] = { 'fstype': fstype, 'mountpoint': mp, 'opts': opts, } LOG.debug("Fetched %s mounts from %s", mounted, method) except (IOError, OSError): logexc(LOG, "Failed fetching mount points") return mounted def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): """ Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' returned. If data != None, also pass data to callback. mtype is a filesystem type. it may be a list, string (a single fsname) or a list of fsnames. """ if isinstance(mtype, str): mtypes = [mtype] elif isinstance(mtype, (list, tuple)): mtypes = list(mtype) elif mtype is None: mtypes = None # clean up 'mtype' input a bit based on platform. platsys = platform.system().lower() if platsys == "linux": if mtypes is None: mtypes = ["auto"] elif platsys.endswith("bsd"): if mtypes is None: mtypes = ['ufs', 'cd9660', 'vfat'] for index, mtype in enumerate(mtypes): if mtype == "iso9660": mtypes[index] = "cd9660" else: # we cannot do a smart "auto", so just call 'mount' once with no -t mtypes = [''] mounted = mounts() with tempdir() as tmpd: umount = False if os.path.realpath(device) in mounted: mountpoint = mounted[os.path.realpath(device)]['mountpoint'] else: failure_reason = None for mtype in mtypes: mountpoint = None try: mountcmd = ['mount'] mountopts = [] if rw: mountopts.append('rw') else: mountopts.append('ro') if sync: # This seems like the safe approach to do # (ie where this is on by default) mountopts.append("sync") if mountopts: mountcmd.extend(["-o", ",".join(mountopts)]) if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) mountcmd.append(tmpd) subp(mountcmd) umount = tmpd # This forces it to be unmounted (when set) mountpoint = tmpd break except (IOError, OSError) as exc: LOG.debug("Failed mount of '%s' as '%s': %s", device, mtype, exc) failure_reason = exc if not mountpoint: raise MountFailedError("Failed mounting %s to %s due to: %s" % (device, tmpd, failure_reason)) # Be nice and ensure it ends with a slash if not mountpoint.endswith("/"): mountpoint += "/" with unmounter(umount): if data is None: ret = callback(mountpoint) else: ret = callback(mountpoint, data) return ret def get_builtin_cfg(): # Deep copy so that others can't modify return obj_copy.deepcopy(CFG_BUILTIN) def is_link(path): LOG.debug("Testing if a link exists for %s", path) return os.path.islink(path) def sym_link(source, link, force=False): LOG.debug("Creating symbolic link from %r => %r", link, source) if force and os.path.exists(link): del_file(link) os.symlink(source, link) def del_file(path): LOG.debug("Attempting to remove %s", path) try: os.unlink(path) except OSError as e: if e.errno != errno.ENOENT: raise e def copy(src, dest): LOG.debug("Copying %s to %s", src, dest) shutil.copy(src, dest) def time_rfc2822(): try: ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) except Exception: ts = "??" return ts def uptime(): uptime_str = '??' method = 'unknown' try: if os.path.exists("/proc/uptime"): method = '/proc/uptime' contents = load_file("/proc/uptime") if contents: uptime_str = contents.split()[0] else: method = 'ctypes' libc = ctypes.CDLL('/lib/libc.so.7') size = ctypes.c_size_t() buf = ctypes.c_int() size.value = ctypes.sizeof(buf) libc.sysctlbyname("kern.boottime", ctypes.byref(buf), ctypes.byref(size), None, 0) now = time.time() bootup = buf.value uptime_str = now - bootup except Exception: logexc(LOG, "Unable to read uptime using method: %s" % method) return uptime_str def append_file(path, content): write_file(path, content, omode="ab", mode=None) def ensure_file(path, mode=0o644): write_file(path, content='', omode="ab", mode=mode) def safe_int(possible_int): try: return int(possible_int) except (ValueError, TypeError): return None def chmod(path, mode): real_mode = safe_int(mode) if path and real_mode: with SeLinuxGuard(path): os.chmod(path, real_mode) def write_file(filename, content, mode=0o644, omode="wb"): """ Writes a file with the given content and sets the file mode as specified. Resotres the SELinux context if possible. @param filename: The full path of the file to write. @param content: The content to write to the file. @param mode: The filesystem mode to set on the file. @param omode: The open mode used when opening the file (w, wb, a, etc.) """ ensure_dir(os.path.dirname(filename)) if 'b' in omode.lower(): content = encode_text(content) write_type = 'bytes' else: content = decode_binary(content) write_type = 'characters' LOG.debug("Writing to %s - %s: [%s] %s %s", filename, omode, mode, len(content), write_type) with SeLinuxGuard(path=filename): with open(filename, omode) as fh: fh.write(content) fh.flush() chmod(filename, mode) def delete_dir_contents(dirname): """ Deletes all contents of a directory without deleting the directory itself. @param dirname: The directory whose contents should be deleted. """ for node in os.listdir(dirname): node_fullpath = os.path.join(dirname, node) if os.path.isdir(node_fullpath): del_dir(node_fullpath) else: del_file(node_fullpath) def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, logstring=False, decode="replace", target=None, update_env=None): # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin if target is not None: raise ValueError("target arg not supported by cloud-init") if rcs is None: rcs = [0] devnull_fp = None if update_env: if env is None: env = os.environ env = env.copy() env.update(update_env) try: if target_path(target) != "/": args = ['chroot', target] + list(args) if not logstring: LOG.debug(("Running command %s with allowed return codes %s" " (shell=%s, capture=%s)"), args, rcs, shell, capture) else: LOG.debug(("Running hidden command to protect sensitive " "input/output logstring: %s"), logstring) stdin = None stdout = None stderr = None if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE if data is None: # using devnull assures any reads get null, rather # than possibly waiting on input. devnull_fp = open(os.devnull) stdin = devnull_fp else: stdin = subprocess.PIPE if not isinstance(data, bytes): data = data.encode() sp = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, env=env, shell=shell) (out, err) = sp.communicate(data) # Just ensure blank instead of none. if not out and capture: out = b'' if not err and capture: err = b'' if decode: def ldecode(data, m='utf-8'): if not isinstance(data, bytes): return data return data.decode(m, decode) out = ldecode(out) err = ldecode(err) except OSError as e: raise ProcessExecutionError(cmd=args, reason=e, errno=e.errno) finally: if devnull_fp: devnull_fp.close() rc = sp.returncode if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, exit_code=rc, cmd=args) return (out, err) def make_header(comment_char="#", base='created'): ci_ver = version.version_string() header = str(comment_char) header += " %s by cloud-init v. %s" % (base.title(), ci_ver) header += " on %s" % time_rfc2822() return header def abs_join(*paths): return os.path.abspath(os.path.join(*paths)) # shellify, takes a list of commands # for each entry in the list # if it is an array, shell protect it (with single ticks) # if it is a string, do nothing def shellify(cmdlist, add_header=True): content = '' if add_header: content += "#!/bin/sh\n" escaped = "%s%s%s%s" % ("'", '\\', "'", "'") cmds_made = 0 for args in cmdlist: # If the item is a list, wrap all items in single tick. # If its not, then just write it directly. if isinstance(args, list): fixed = [] for f in args: fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) content = "%s%s\n" % (content, ' '.join(fixed)) cmds_made += 1 elif isinstance(args, six.string_types): content = "%s%s\n" % (content, args) cmds_made += 1 else: raise RuntimeError(("Unable to shellify type %s" " which is not a list or string") % (type_utils.obj_name(args))) LOG.debug("Shellified %s commands.", cmds_made) return content def strip_prefix_suffix(line, prefix=None, suffix=None): if prefix and line.startswith(prefix): line = line[len(prefix):] if suffix and line.endswith(suffix): line = line[:-len(suffix)] return line def is_container(): """ Checks to see if this code running in a container of some sort """ for helper in CONTAINER_TESTS: try: # try to run a helper program. if it returns true/zero # then we're inside a container. otherwise, no subp(helper) return True except (IOError, OSError): pass # this code is largely from the logic in # ubuntu's /etc/init/container-detect.conf try: # Detect old-style libvirt # Detect OpenVZ containers pid1env = get_proc_env(1) if "container" in pid1env: return True if "LIBVIRT_LXC_UUID" in pid1env: return True except (IOError, OSError): pass # Detect OpenVZ containers if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"): return True try: # Detect Vserver containers lines = load_file("/proc/self/status").splitlines() for line in lines: if line.startswith("VxID:"): (_key, val) = line.strip().split(":", 1) if val != "0": return True except (IOError, OSError): pass return False def get_proc_env(pid): """ Return the environment in a dict that a given process id was started with. """ env = {} fn = os.path.join("/proc/", str(pid), "environ") try: contents = load_file(fn) toks = contents.split("\x00") for tok in toks: if tok == "": continue (name, val) = tok.split("=", 1) if name: env[name] = val except (IOError, OSError): pass return env def keyval_str_to_dict(kvstring): ret = {} for tok in kvstring.split(): try: (key, val) = tok.split("=", 1) except ValueError: key = tok val = True ret[key] = val return ret def is_partition(device): if device.startswith("/dev/"): device = device[5:] return os.path.isfile("/sys/class/block/%s/partition" % device) def expand_package_list(version_fmt, pkgs): # we will accept tuples, lists of tuples, or just plain lists if not isinstance(pkgs, list): pkgs = [pkgs] pkglist = [] for pkg in pkgs: if isinstance(pkg, six.string_types): pkglist.append(pkg) continue if isinstance(pkg, (tuple, list)): if len(pkg) < 1 or len(pkg) > 2: raise RuntimeError("Invalid package & version tuple.") if len(pkg) == 2 and pkg[1]: pkglist.append(version_fmt % tuple(pkg)) continue pkglist.append(pkg[0]) else: raise RuntimeError("Invalid package type.") return pkglist def parse_mount_info(path, mountinfo_lines, log=LOG): """Return the mount information for PATH given the lines from /proc/$$/mountinfo.""" path_elements = [e for e in path.split('/') if e] devpth = None fs_type = None match_mount_point = None match_mount_point_elements = None for i, line in enumerate(mountinfo_lines): parts = line.split() # Completely fail if there is anything in any line that is # unexpected, as continuing to parse past a bad line could # cause an incorrect result to be returned, so it's better # return nothing than an incorrect result. # The minimum number of elements in a valid line is 10. if len(parts) < 10: log.debug("Line %d has two few columns (%d): %s", i + 1, len(parts), line) return None mount_point = parts[4] mount_point_elements = [e for e in mount_point.split('/') if e] # Ignore mounts deeper than the path in question. if len(mount_point_elements) > len(path_elements): continue # Ignore mounts where the common path is not the same. x = min(len(mount_point_elements), len(path_elements)) if mount_point_elements[0:x] != path_elements[0:x]: continue # Ignore mount points higher than an already seen mount # point. if (match_mount_point_elements is not None and len(match_mount_point_elements) > len(mount_point_elements)): continue # Find the '-' which terminates a list of optional columns to # find the filesystem type and the path to the device. See # man 5 proc for the format of this file. try: i = parts.index('-') except ValueError: log.debug("Did not find column named '-' in line %d: %s", i + 1, line) return None # Get the path to the device. try: fs_type = parts[i + 1] devpth = parts[i + 2] except IndexError: log.debug("Too few columns after '-' column in line %d: %s", i + 1, line) return None match_mount_point = mount_point match_mount_point_elements = mount_point_elements if devpth and fs_type and match_mount_point: return (devpth, fs_type, match_mount_point) else: return None def parse_mtab(path): """On older kernels there's no /proc/$$/mountinfo, so use mtab.""" for line in load_file("/etc/mtab").splitlines(): devpth, mount_point, fs_type = line.split()[:3] if mount_point == path: return devpth, fs_type, mount_point return None def parse_mount(path): (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() for line in mount_locs: m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) devpth = m.group(1) mount_point = m.group(2) fs_type = m.group(3) if mount_point == path: return devpth, fs_type, mount_point return None def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. # # Here, / has a device of 18 (decimal). # # $ stat / # File: '/' # Size: 234 Blocks: 0 IO Block: 4096 directory # Device: 12h/18d Inode: 256 Links: 1 # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) # Access: 2013-01-13 07:31:04.358011255 +0000 # Modify: 2013-01-13 18:48:25.930011255 +0000 # Change: 2013-01-13 18:48:25.930011255 +0000 # Birth: - # # Find where / is mounted: # # $ mount | grep ' / ' # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) # # And the device ID for /dev/vda1 is not 18: # # $ ls -l /dev/vda1 # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 # # So use /proc/$$/mountinfo to find the device underlying the # input path. mountinfo_path = '/proc/%s/mountinfo' % os.getpid() if os.path.exists(mountinfo_path): lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) elif os.path.exists("/etc/mtab"): return parse_mtab(path) else: return parse_mount(path) def which(program): # Return path of program for execution if found in path def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) _fpath, _ = os.path.split(program) if _fpath: if is_exe(program): return program else: for path in os.environ.get("PATH", "").split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False): if args is None: args = [] if kwargs is None: kwargs = {} start = time.time() ustart = None if get_uptime: try: ustart = float(uptime()) except ValueError: pass try: ret = func(*args, **kwargs) finally: delta = time.time() - start udelta = None if ustart is not None: try: udelta = float(uptime()) - ustart except ValueError: pass tmsg = " took %0.3f seconds" % delta if get_uptime: if isinstance(udelta, (float)): tmsg += " (%0.2f)" % udelta else: tmsg += " (N/A)" try: logfunc(msg + tmsg) except Exception: pass return ret def expand_dotted_devname(dotted): toks = dotted.rsplit(".", 1) if len(toks) > 1: return toks else: return (dotted, None) def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): # return a dictionary populated with keys in 'required' and 'optional' # by reading files in prefix + delim + entry if required is None: required = [] if optional is None: optional = [] missing = [] ret = {} for f in required + optional: try: ret[f] = load_file(base + delim + f, quiet=False, decode=False) except IOError as e: if e.errno != errno.ENOENT: raise if f in required: missing.append(f) if len(missing): raise ValueError("Missing required files: %s", ','.join(missing)) return ret def read_meminfo(meminfo="/proc/meminfo", raw=False): # read a /proc/meminfo style file and return # a dict with 'total', 'free', and 'available' mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30} kmap = {'MemTotal:': 'total', 'MemFree:': 'free', 'MemAvailable:': 'available'} ret = {} for line in load_file(meminfo).splitlines(): try: key, value, unit = line.split() except ValueError: key, value = line.split() unit = 'B' if raw: ret[key] = int(value) * mpliers[unit] elif key in kmap: ret[kmap[key]] = int(value) * mpliers[unit] return ret def human2bytes(size): """Convert human string or integer to size in bytes 10M => 10485760 .5G => 536870912 """ size_in = size if size.endswith("B"): size = size[:-1] mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40} num = size mplier = 'B' for m in mpliers: if size.endswith(m): mplier = m num = size[0:-len(m)] try: num = float(num) except ValueError: raise ValueError("'%s' is not valid input." % size_in) if num < 0: raise ValueError("'%s': cannot be negative" % size_in) return int(num * mpliers[mplier]) def _read_dmi_syspath(key): """ Reads dmi data with from /sys/class/dmi/id """ if key not in DMIDECODE_TO_DMI_SYS_MAPPING: return None mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key] dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key) LOG.debug("querying dmi data %s", dmi_key_path) try: if not os.path.exists(dmi_key_path): LOG.debug("did not find %s", dmi_key_path) return None key_data = load_file(dmi_key_path, decode=False) if not key_data: LOG.debug("%s did not return any data", dmi_key_path) return None # uninitialized dmi values show as all \xff and /sys appends a '\n'. # in that event, return a string of '.' in the same length. if key_data == b'\xff' * (len(key_data) - 1) + b'\n': key_data = b"" str_data = key_data.decode('utf8').strip() LOG.debug("dmi data %s returned %s", dmi_key_path, str_data) return str_data except Exception: logexc(LOG, "failed read of %s", dmi_key_path) return None def _call_dmidecode(key, dmidecode_path): """ Calls out to dmidecode to get the data out. This is mostly for supporting OS's without /sys/class/dmi/id support. """ try: cmd = [dmidecode_path, "--string", key] (result, _err) = subp(cmd) LOG.debug("dmidecode returned '%s' for '%s'", result, key) result = result.strip() if result.replace(".", "") == "": return "" return result except (IOError, OSError) as _err: LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err) return None def read_dmi_data(key): """ Wrapper for reading DMI data. This will do the following (returning the first that produces a result): 1) Use a mapping to translate `key` from dmidecode naming to sysfs naming and look in /sys/class/dmi/... for a value. 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/... 3) Fall-back to passing `key` to `dmidecode --string`. If all of the above fail to find a value, None will be returned. """ syspath_value = _read_dmi_syspath(key) if syspath_value is not None: return syspath_value # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] if not (uname_arch == "x86_64" or (uname_arch.startswith("i") and uname_arch[2:] == "86") or uname_arch == 'aarch64'): LOG.debug("dmidata is not supported on %s", uname_arch) return None dmidecode_path = which('dmidecode') if dmidecode_path: return _call_dmidecode(key, dmidecode_path) LOG.warn("did not find either path %s or dmidecode command", DMI_SYS_PATH) return None def message_from_string(string): if sys.version_info[:2] < (2, 7): return email.message_from_file(six.StringIO(string)) return email.message_from_string(string) def get_installed_packages(target=None): (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True) pkgs_inst = set() for line in out.splitlines(): try: (state, pkg, _) = line.split(None, 2) except ValueError: continue if state.startswith("hi") or state.startswith("ii"): pkgs_inst.add(re.sub(":.*", "", pkg)) return pkgs_inst def system_is_snappy(): # channel.ini is configparser loadable. # snappy will move to using /etc/system-image/config.d/*.ini # this is certainly not a perfect test, but good enough for now. content = load_file("/etc/system-image/channel.ini", quiet=True) if 'ubuntu-core' in content.lower(): return True if os.path.isdir("/etc/system-image/config.d/"): return True return False
gpl-3.0
rockyzhang/zhangyanhit-python-for-android-mips
python-modules/twisted/twisted/conch/scripts/conch.py
59
18094
# -*- test-case-name: twisted.conch.test.test_conch -*- # # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # # $Id: conch.py,v 1.65 2004/03/11 00:29:14 z3p Exp $ #""" Implementation module for the `conch` command. #""" from twisted.conch.client import connect, default, options from twisted.conch.error import ConchError from twisted.conch.ssh import connection, common from twisted.conch.ssh import session, forwarding, channel from twisted.internet import reactor, stdio, task from twisted.python import log, usage import os, sys, getpass, struct, tty, fcntl, signal class ClientOptions(options.ConchOptions): synopsis = """Usage: conch [options] host [command] """ longdesc = ("conch is a SSHv2 client that allows logging into a remote " "machine and executing commands.") optParameters = [['escape', 'e', '~'], ['localforward', 'L', None, 'listen-port:host:port Forward local port to remote address'], ['remoteforward', 'R', None, 'listen-port:host:port Forward remote port to local address'], ] optFlags = [['null', 'n', 'Redirect input from /dev/null.'], ['fork', 'f', 'Fork to background after authentication.'], ['tty', 't', 'Tty; allocate a tty even if command is given.'], ['notty', 'T', 'Do not allocate a tty.'], ['noshell', 'N', 'Do not execute a shell or command.'], ['subsystem', 's', 'Invoke command (mandatory) as SSH2 subsystem.'], ] #zsh_altArgDescr = {"foo":"use this description for foo instead"} #zsh_multiUse = ["foo", "bar"] #zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")] #zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)"} zsh_actionDescr = {"localforward":"listen-port:host:port", "remoteforward":"listen-port:host:port"} zsh_extras = ["*:command: "] localForwards = [] remoteForwards = [] def opt_escape(self, esc): "Set escape character; ``none'' = disable" if esc == 'none': self['escape'] = None elif esc[0] == '^' and len(esc) == 2: self['escape'] = chr(ord(esc[1])-64) elif len(esc) == 1: self['escape'] = esc else: sys.exit("Bad escape character '%s'." % esc) def opt_localforward(self, f): "Forward local port to remote address (lport:host:port)" localPort, remoteHost, remotePort = f.split(':') # doesn't do v6 yet localPort = int(localPort) remotePort = int(remotePort) self.localForwards.append((localPort, (remoteHost, remotePort))) def opt_remoteforward(self, f): """Forward remote port to local address (rport:host:port)""" remotePort, connHost, connPort = f.split(':') # doesn't do v6 yet remotePort = int(remotePort) connPort = int(connPort) self.remoteForwards.append((remotePort, (connHost, connPort))) def parseArgs(self, host, *command): self['host'] = host self['command'] = ' '.join(command) # Rest of code in "run" options = None conn = None exitStatus = 0 old = None _inRawMode = 0 _savedRawMode = None def run(): global options, old args = sys.argv[1:] if '-l' in args: # cvs is an idiot i = args.index('-l') args = args[i:i+2]+args del args[i+2:i+4] for arg in args[:]: try: i = args.index(arg) if arg[:2] == '-o' and args[i+1][0]!='-': args[i:i+2] = [] # suck on it scp except ValueError: pass options = ClientOptions() try: options.parseOptions(args) except usage.UsageError, u: print 'ERROR: %s' % u options.opt_help() sys.exit(1) if options['log']: if options['logfile']: if options['logfile'] == '-': f = sys.stdout else: f = file(options['logfile'], 'a+') else: f = sys.stderr realout = sys.stdout log.startLogging(f) sys.stdout = realout else: log.discardLogs() doConnect() fd = sys.stdin.fileno() try: old = tty.tcgetattr(fd) except: old = None try: oldUSR1 = signal.signal(signal.SIGUSR1, lambda *a: reactor.callLater(0, reConnect)) except: oldUSR1 = None try: reactor.run() finally: if old: tty.tcsetattr(fd, tty.TCSANOW, old) if oldUSR1: signal.signal(signal.SIGUSR1, oldUSR1) if (options['command'] and options['tty']) or not options['notty']: signal.signal(signal.SIGWINCH, signal.SIG_DFL) if sys.stdout.isatty() and not options['command']: print 'Connection to %s closed.' % options['host'] sys.exit(exitStatus) def handleError(): from twisted.python import failure global exitStatus exitStatus = 2 reactor.callLater(0.01, _stopReactor) log.err(failure.Failure()) raise def _stopReactor(): try: reactor.stop() except: pass def doConnect(): # log.deferr = handleError # HACK if '@' in options['host']: options['user'], options['host'] = options['host'].split('@',1) if not options.identitys: options.identitys = ['~/.ssh/id_rsa', '~/.ssh/id_dsa'] host = options['host'] if not options['user']: options['user'] = getpass.getuser() if not options['port']: options['port'] = 22 else: options['port'] = int(options['port']) host = options['host'] port = options['port'] vhk = default.verifyHostKey uao = default.SSHUserAuthClient(options['user'], options, SSHConnection()) connect.connect(host, port, options, vhk, uao).addErrback(_ebExit) def _ebExit(f): global exitStatus if hasattr(f.value, 'value'): s = f.value.value else: s = str(f) exitStatus = "conch: exiting with error %s" % f reactor.callLater(0.1, _stopReactor) def onConnect(): # if keyAgent and options['agent']: # cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal, conn) # cc.connectUNIX(os.environ['SSH_AUTH_SOCK']) if hasattr(conn.transport, 'sendIgnore'): _KeepAlive(conn) if options.localForwards: for localPort, hostport in options.localForwards: s = reactor.listenTCP(localPort, forwarding.SSHListenForwardingFactory(conn, hostport, SSHListenClientForwardingChannel)) conn.localForwards.append(s) if options.remoteForwards: for remotePort, hostport in options.remoteForwards: log.msg('asking for remote forwarding for %s:%s' % (remotePort, hostport)) conn.requestRemoteForwarding(remotePort, hostport) reactor.addSystemEventTrigger('before', 'shutdown', beforeShutdown) if not options['noshell'] or options['agent']: conn.openChannel(SSHSession()) if options['fork']: if os.fork(): os._exit(0) os.setsid() for i in range(3): try: os.close(i) except OSError, e: import errno if e.errno != errno.EBADF: raise def reConnect(): beforeShutdown() conn.transport.transport.loseConnection() def beforeShutdown(): remoteForwards = options.remoteForwards for remotePort, hostport in remoteForwards: log.msg('cancelling %s:%s' % (remotePort, hostport)) conn.cancelRemoteForwarding(remotePort) def stopConnection(): if not options['reconnect']: reactor.callLater(0.1, _stopReactor) class _KeepAlive: def __init__(self, conn): self.conn = conn self.globalTimeout = None self.lc = task.LoopingCall(self.sendGlobal) self.lc.start(300) def sendGlobal(self): d = self.conn.sendGlobalRequest("conch-keep-alive@twistedmatrix.com", "", wantReply = 1) d.addBoth(self._cbGlobal) self.globalTimeout = reactor.callLater(30, self._ebGlobal) def _cbGlobal(self, res): if self.globalTimeout: self.globalTimeout.cancel() self.globalTimeout = None def _ebGlobal(self): if self.globalTimeout: self.globalTimeout = None self.conn.transport.loseConnection() class SSHConnection(connection.SSHConnection): def serviceStarted(self): global conn conn = self self.localForwards = [] self.remoteForwards = {} if not isinstance(self, connection.SSHConnection): # make these fall through del self.__class__.requestRemoteForwarding del self.__class__.cancelRemoteForwarding onConnect() def serviceStopped(self): lf = self.localForwards self.localForwards = [] for s in lf: s.loseConnection() stopConnection() def requestRemoteForwarding(self, remotePort, hostport): data = forwarding.packGlobal_tcpip_forward(('0.0.0.0', remotePort)) d = self.sendGlobalRequest('tcpip-forward', data, wantReply=1) log.msg('requesting remote forwarding %s:%s' %(remotePort, hostport)) d.addCallback(self._cbRemoteForwarding, remotePort, hostport) d.addErrback(self._ebRemoteForwarding, remotePort, hostport) def _cbRemoteForwarding(self, result, remotePort, hostport): log.msg('accepted remote forwarding %s:%s' % (remotePort, hostport)) self.remoteForwards[remotePort] = hostport log.msg(repr(self.remoteForwards)) def _ebRemoteForwarding(self, f, remotePort, hostport): log.msg('remote forwarding %s:%s failed' % (remotePort, hostport)) log.msg(f) def cancelRemoteForwarding(self, remotePort): data = forwarding.packGlobal_tcpip_forward(('0.0.0.0', remotePort)) self.sendGlobalRequest('cancel-tcpip-forward', data) log.msg('cancelling remote forwarding %s' % remotePort) try: del self.remoteForwards[remotePort] except: pass log.msg(repr(self.remoteForwards)) def channel_forwarded_tcpip(self, windowSize, maxPacket, data): log.msg('%s %s' % ('FTCP', repr(data))) remoteHP, origHP = forwarding.unpackOpen_forwarded_tcpip(data) log.msg(self.remoteForwards) log.msg(remoteHP) if self.remoteForwards.has_key(remoteHP[1]): connectHP = self.remoteForwards[remoteHP[1]] log.msg('connect forwarding %s' % (connectHP,)) return SSHConnectForwardingChannel(connectHP, remoteWindow = windowSize, remoteMaxPacket = maxPacket, conn = self) else: raise ConchError(connection.OPEN_CONNECT_FAILED, "don't know about that port") # def channel_auth_agent_openssh_com(self, windowSize, maxPacket, data): # if options['agent'] and keyAgent: # return agent.SSHAgentForwardingChannel(remoteWindow = windowSize, # remoteMaxPacket = maxPacket, # conn = self) # else: # return connection.OPEN_CONNECT_FAILED, "don't have an agent" def channelClosed(self, channel): log.msg('connection closing %s' % channel) log.msg(self.channels) if len(self.channels) == 1: # just us left log.msg('stopping connection') stopConnection() else: # because of the unix thing self.__class__.__bases__[0].channelClosed(self, channel) class SSHSession(channel.SSHChannel): name = 'session' def channelOpen(self, foo): log.msg('session %s open' % self.id) if options['agent']: d = self.conn.sendRequest(self, 'auth-agent-req@openssh.com', '', wantReply=1) d.addBoth(lambda x:log.msg(x)) if options['noshell']: return if (options['command'] and options['tty']) or not options['notty']: _enterRawMode() c = session.SSHSessionClient() if options['escape'] and not options['notty']: self.escapeMode = 1 c.dataReceived = self.handleInput else: c.dataReceived = self.write c.connectionLost = lambda x=None,s=self:s.sendEOF() self.stdio = stdio.StandardIO(c) fd = 0 if options['subsystem']: self.conn.sendRequest(self, 'subsystem', \ common.NS(options['command'])) elif options['command']: if options['tty']: term = os.environ['TERM'] winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678') winSize = struct.unpack('4H', winsz) ptyReqData = session.packRequest_pty_req(term, winSize, '') self.conn.sendRequest(self, 'pty-req', ptyReqData) signal.signal(signal.SIGWINCH, self._windowResized) self.conn.sendRequest(self, 'exec', \ common.NS(options['command'])) else: if not options['notty']: term = os.environ['TERM'] winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678') winSize = struct.unpack('4H', winsz) ptyReqData = session.packRequest_pty_req(term, winSize, '') self.conn.sendRequest(self, 'pty-req', ptyReqData) signal.signal(signal.SIGWINCH, self._windowResized) self.conn.sendRequest(self, 'shell', '') #if hasattr(conn.transport, 'transport'): # conn.transport.transport.setTcpNoDelay(1) def handleInput(self, char): #log.msg('handling %s' % repr(char)) if char in ('\n', '\r'): self.escapeMode = 1 self.write(char) elif self.escapeMode == 1 and char == options['escape']: self.escapeMode = 2 elif self.escapeMode == 2: self.escapeMode = 1 # so we can chain escapes together if char == '.': # disconnect log.msg('disconnecting from escape') stopConnection() return elif char == '\x1a': # ^Z, suspend def _(): _leaveRawMode() sys.stdout.flush() sys.stdin.flush() os.kill(os.getpid(), signal.SIGTSTP) _enterRawMode() reactor.callLater(0, _) return elif char == 'R': # rekey connection log.msg('rekeying connection') self.conn.transport.sendKexInit() return elif char == '#': # display connections self.stdio.write('\r\nThe following connections are open:\r\n') channels = self.conn.channels.keys() channels.sort() for channelId in channels: self.stdio.write(' #%i %s\r\n' % (channelId, str(self.conn.channels[channelId]))) return self.write('~' + char) else: self.escapeMode = 0 self.write(char) def dataReceived(self, data): self.stdio.write(data) def extReceived(self, t, data): if t==connection.EXTENDED_DATA_STDERR: log.msg('got %s stderr data' % len(data)) sys.stderr.write(data) def eofReceived(self): log.msg('got eof') self.stdio.loseWriteConnection() def closeReceived(self): log.msg('remote side closed %s' % self) self.conn.sendClose(self) def closed(self): global old log.msg('closed %s' % self) log.msg(repr(self.conn.channels)) def request_exit_status(self, data): global exitStatus exitStatus = int(struct.unpack('>L', data)[0]) log.msg('exit status: %s' % exitStatus) def sendEOF(self): self.conn.sendEOF(self) def stopWriting(self): self.stdio.pauseProducing() def startWriting(self): self.stdio.resumeProducing() def _windowResized(self, *args): winsz = fcntl.ioctl(0, tty.TIOCGWINSZ, '12345678') winSize = struct.unpack('4H', winsz) newSize = winSize[1], winSize[0], winSize[2], winSize[3] self.conn.sendRequest(self, 'window-change', struct.pack('!4L', *newSize)) class SSHListenClientForwardingChannel(forwarding.SSHListenClientForwardingChannel): pass class SSHConnectForwardingChannel(forwarding.SSHConnectForwardingChannel): pass def _leaveRawMode(): global _inRawMode if not _inRawMode: return fd = sys.stdin.fileno() tty.tcsetattr(fd, tty.TCSANOW, _savedMode) _inRawMode = 0 def _enterRawMode(): global _inRawMode, _savedMode if _inRawMode: return fd = sys.stdin.fileno() try: old = tty.tcgetattr(fd) new = old[:] except: log.msg('not a typewriter!') else: # iflage new[0] = new[0] | tty.IGNPAR new[0] = new[0] & ~(tty.ISTRIP | tty.INLCR | tty.IGNCR | tty.ICRNL | tty.IXON | tty.IXANY | tty.IXOFF) if hasattr(tty, 'IUCLC'): new[0] = new[0] & ~tty.IUCLC # lflag new[3] = new[3] & ~(tty.ISIG | tty.ICANON | tty.ECHO | tty.ECHO | tty.ECHOE | tty.ECHOK | tty.ECHONL) if hasattr(tty, 'IEXTEN'): new[3] = new[3] & ~tty.IEXTEN #oflag new[1] = new[1] & ~tty.OPOST new[6][tty.VMIN] = 1 new[6][tty.VTIME] = 0 _savedMode = old tty.tcsetattr(fd, tty.TCSANOW, new) #tty.setraw(fd) _inRawMode = 1 if __name__ == '__main__': run()
apache-2.0
preete-dixit-ck/incubator-airflow
airflow/contrib/operators/databricks_operator.py
15
11115
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import six import time from airflow.exceptions import AirflowException from airflow.contrib.hooks.databricks_hook import DatabricksHook from airflow.models import BaseOperator LINE_BREAK = ('-' * 80) class DatabricksSubmitRunOperator(BaseOperator): """ Submits an Spark job run to Databricks using the `api/2.0/jobs/runs/submit <https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_ API endpoint. There are two ways to instantiate this operator. In the first way, you can take the JSON payload that you typically use to call the ``api/2.0/jobs/runs/submit`` endpoint and pass it directly to our ``DatabricksSubmitRunOperator`` through the ``json`` parameter. For example :: json = { 'new_cluster': { 'spark_version': '2.1.0-db3-scala2.11', 'num_workers': 2 }, 'notebook_task': { 'notebook_path': '/Users/airflow@example.com/PrepareData', }, } notebook_run = DatabricksSubmitRunOperator(task_id='notebook_run', json=json) Another way to accomplish the same thing is to use the named parameters of the ``DatabricksSubmitRunOperator`` directly. Note that there is exactly one named parameter for each top level parameter in the ``runs/submit`` endpoint. In this method, your code would look like this: :: new_cluster = { 'spark_version': '2.1.0-db3-scala2.11', 'num_workers': 2 } notebook_task = { 'notebook_path': '/Users/airflow@example.com/PrepareData', } notebook_run = DatabricksSubmitRunOperator( task_id='notebook_run', new_cluster=new_cluster, notebook_task=notebook_task) In the case where both the json parameter **AND** the named parameters are provided, they will be merged together. If there are conflicts during the merge, the named parameters will take precedence and override the top level ``json`` keys. Currently the named parameters that ``DatabricksSubmitRunOperator`` supports are - ``spark_jar_task`` - ``notebook_task`` - ``new_cluster`` - ``existing_cluster_id`` - ``libraries`` - ``run_name`` - ``timeout_seconds`` :param json: A JSON object containing API parameters which will be passed directly to the ``api/2.0/jobs/runs/submit`` endpoint. The other named parameters (i.e. ``spark_jar_task``, ``notebook_task``..) to this operator will be merged with this json dictionary if they are provided. If there are conflicts during the merge, the named parameters will take precedence and override the top level json keys. This field will be templated. .. seealso:: For more information about templating see :ref:`jinja-templating`. https://docs.databricks.com/api/latest/jobs.html#runs-submit :type json: dict :param spark_jar_task: The main class and parameters for the JAR task. Note that the actual JAR is specified in the ``libraries``. *EITHER* ``spark_jar_task`` *OR* ``notebook_task`` should be specified. This field will be templated. .. seealso:: https://docs.databricks.com/api/latest/jobs.html#jobssparkjartask :type spark_jar_task: dict :param notebook_task: The notebook path and parameters for the notebook task. *EITHER* ``spark_jar_task`` *OR* ``notebook_task`` should be specified. This field will be templated. .. seealso:: https://docs.databricks.com/api/latest/jobs.html#jobsnotebooktask :type notebook_task: dict :param new_cluster: Specs for a new cluster on which this task will be run. *EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified. This field will be templated. .. seealso:: https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster :type new_cluster: dict :param existing_cluster_id: ID for existing cluster on which to run this task. *EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified. This field will be templated. :type existing_cluster_id: string :param libraries: Libraries which this run will use. This field will be templated. .. seealso:: https://docs.databricks.com/api/latest/libraries.html#managedlibrarieslibrary :type libraries: list of dicts :param run_name: The run name used for this task. By default this will be set to the Airflow ``task_id``. This ``task_id`` is a required parameter of the superclass ``BaseOperator``. This field will be templated. :type run_name: string :param timeout_seconds: The timeout for this run. By default a value of 0 is used which means to have no timeout. This field will be templated. :type timeout_seconds: int32 :param databricks_conn_id: The name of the Airflow connection to use. By default and in the common case this will be ``databricks_default``. :type databricks_conn_id: string :param polling_period_seconds: Controls the rate which we poll for the result of this run. By default the operator will poll every 30 seconds. :type polling_period_seconds: int :param databricks_retry_limit: Amount of times retry if the Databricks backend is unreachable. Its value must be greater than or equal to 1. :type databricks_retry_limit: int """ # Used in airflow.models.BaseOperator template_fields = ('json',) # Databricks brand color (blue) under white text ui_color = '#1CB1C2' ui_fgcolor = '#fff' def __init__( self, json=None, spark_jar_task=None, notebook_task=None, new_cluster=None, existing_cluster_id=None, libraries=None, run_name=None, timeout_seconds=None, databricks_conn_id='databricks_default', polling_period_seconds=30, databricks_retry_limit=3, **kwargs): """ Creates a new ``DatabricksSubmitRunOperator``. """ super(DatabricksSubmitRunOperator, self).__init__(**kwargs) self.json = json or {} self.databricks_conn_id = databricks_conn_id self.polling_period_seconds = polling_period_seconds self.databricks_retry_limit = databricks_retry_limit if spark_jar_task is not None: self.json['spark_jar_task'] = spark_jar_task if notebook_task is not None: self.json['notebook_task'] = notebook_task if new_cluster is not None: self.json['new_cluster'] = new_cluster if existing_cluster_id is not None: self.json['existing_cluster_id'] = existing_cluster_id if libraries is not None: self.json['libraries'] = libraries if run_name is not None: self.json['run_name'] = run_name if timeout_seconds is not None: self.json['timeout_seconds'] = timeout_seconds if 'run_name' not in self.json: self.json['run_name'] = run_name or kwargs['task_id'] self.json = self._deep_string_coerce(self.json) # This variable will be used in case our task gets killed. self.run_id = None def _deep_string_coerce(self, content, json_path='json'): """ Coerces content or all values of content if it is a dict to a string. The function will throw if content contains non-string or non-numeric types. The reason why we have this function is because the ``self.json`` field must be a dict with only string values. This is because ``render_template`` will fail for numerical values. """ c = self._deep_string_coerce if isinstance(content, six.string_types): return content elif isinstance(content, six.integer_types+(float,)): # Databricks can tolerate either numeric or string types in the API backend. return str(content) elif isinstance(content, (list, tuple)): return [c(e, '{0}[{1}]'.format(json_path, i)) for i, e in enumerate(content)] elif isinstance(content, dict): return {k: c(v, '{0}[{1}]'.format(json_path, k)) for k, v in list(content.items())} else: param_type = type(content) msg = 'Type {0} used for parameter {1} is not a number or a string' \ .format(param_type, json_path) raise AirflowException(msg) def _log_run_page_url(self, url): logging.info('View run status, Spark UI, and logs at {}'.format(url)) def get_hook(self): return DatabricksHook( self.databricks_conn_id, retry_limit=self.databricks_retry_limit) def execute(self, context): hook = self.get_hook() self.run_id = hook.submit_run(self.json) run_page_url = hook.get_run_page_url(self.run_id) logging.info(LINE_BREAK) logging.info('Run submitted with run_id: {}'.format(self.run_id)) self._log_run_page_url(run_page_url) logging.info(LINE_BREAK) while True: run_state = hook.get_run_state(self.run_id) if run_state.is_terminal: if run_state.is_successful: logging.info('{} completed successfully.'.format( self.task_id)) self._log_run_page_url(run_page_url) return else: error_message = '{t} failed with terminal state: {s}'.format( t=self.task_id, s=run_state) raise AirflowException(error_message) else: logging.info('{t} in run state: {s}'.format(t=self.task_id, s=run_state)) self._log_run_page_url(run_page_url) logging.info('Sleeping for {} seconds.'.format( self.polling_period_seconds)) time.sleep(self.polling_period_seconds) def on_kill(self): hook = self.get_hook() hook.cancel_run(self.run_id) logging.info('Task: {t} with run_id: {r} was requested to be cancelled.'.format( t=self.task_id, r=self.run_id))
apache-2.0
moacirnq/camdroid_server_usp
config.py
1
1175
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config: SECRET_KEY = os.environ.get('SECRET_KEY') or '123456' SQLALCHEMY_COMMIT_ON_TEARDOWN = True FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]' FLASKY_MAIL_SENDER = 'Flasky Admin <camdroid_usp@gmail.com>' FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN') @staticmethod def init_app(app): pass class DevelopmentConfig(Config): DEBUG = True MAIL_SERVER = 'smtp.googlemail.com' MAIL_PORT = 587 MAIL_USE_TLS = True MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or 'camdroid.usp@gmail.com' MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or 'cam123456' SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \ 'postgres://testuser:test@127.0.0.1/test' class TestingConfig(Config): TESTING = True SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') class ProductionConfig(Config): SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') config = { 'developmet': DevelopmentConfig, 'testing': TestingConfig, 'production': ProductionConfig, 'default': DevelopmentConfig }
gpl-2.0
ProfessionalIT/professionalit-webiste
sdk/google_appengine/lib/django-1.3/django/views/generic/base.py
56
6110
from django import http from django.core.exceptions import ImproperlyConfigured from django.template import RequestContext, loader from django.template.response import TemplateResponse from django.utils.functional import update_wrapper from django.utils.log import getLogger from django.utils.decorators import classonlymethod logger = getLogger('django.request') class View(object): """ Intentionally simple parent class for all views. Only implements dispatch-by-method and simple sanity checking. """ http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace'] def __init__(self, **kwargs): """ Constructor. Called in the URLconf; can contain helpful extra keyword arguments, and other things. """ # Go through keyword arguments, and either save their values to our # instance, or raise an error. for key, value in kwargs.iteritems(): setattr(self, key, value) @classonlymethod def as_view(cls, **initkwargs): """ Main entry point for a request-response process. """ # sanitize keyword arguments for key in initkwargs: if key in cls.http_method_names: raise TypeError(u"You tried to pass in the %s method name as a " u"keyword argument to %s(). Don't do that." % (key, cls.__name__)) if not hasattr(cls, key): raise TypeError(u"%s() received an invalid keyword %r" % ( cls.__name__, key)) def view(request, *args, **kwargs): self = cls(**initkwargs) return self.dispatch(request, *args, **kwargs) # take name and docstring from class update_wrapper(view, cls, updated=()) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper(view, cls.dispatch, assigned=()) return view def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs return handler(request, *args, **kwargs) def http_method_not_allowed(self, request, *args, **kwargs): allowed_methods = [m for m in self.http_method_names if hasattr(self, m)] logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path), extra={ 'status_code': 405, 'request': self.request } ) return http.HttpResponseNotAllowed(allowed_methods) class TemplateResponseMixin(object): """ A mixin that can be used to render a template. """ template_name = None response_class = TemplateResponse def render_to_response(self, context, **response_kwargs): """ Returns a response with a template rendered with the given context. """ return self.response_class( request = self.request, template = self.get_template_names(), context = context, **response_kwargs ) def get_template_names(self): """ Returns a list of template names to be used for the request. Must return a list. May not be called if render_to_response is overridden. """ if self.template_name is None: raise ImproperlyConfigured( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'") else: return [self.template_name] class TemplateView(TemplateResponseMixin, View): """ A view that renders a template. """ def get_context_data(self, **kwargs): return { 'params': kwargs } def get(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) return self.render_to_response(context) class RedirectView(View): """ A view that provides a redirect on any GET request. """ permanent = True url = None query_string = False def get_redirect_url(self, **kwargs): """ Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method. """ if self.url: args = self.request.META["QUERY_STRING"] if args and self.query_string: url = "%s?%s" % (self.url, args) else: url = self.url return url % kwargs else: return None def get(self, request, *args, **kwargs): url = self.get_redirect_url(**kwargs) if url: if self.permanent: return http.HttpResponsePermanentRedirect(url) else: return http.HttpResponseRedirect(url) else: logger.warning('Gone: %s' % self.request.path, extra={ 'status_code': 410, 'request': self.request }) return http.HttpResponseGone() def head(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def post(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def options(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.get(request, *args, **kwargs)
lgpl-3.0
oli-kester/advanced-av-examples
amp-osc-lv2/.waf-1.8.5-3556be08f33a5066528395b11fed89fa/waflib/ConfigSet.py
2
3763
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import copy,re,os from waflib import Logs,Utils re_imp=re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$',re.M) class ConfigSet(object): __slots__=('table','parent') def __init__(self,filename=None): self.table={} if filename: self.load(filename) def __contains__(self,key): if key in self.table:return True try:return self.parent.__contains__(key) except AttributeError:return False def keys(self): keys=set() cur=self while cur: keys.update(cur.table.keys()) cur=getattr(cur,'parent',None) keys=list(keys) keys.sort() return keys def __str__(self): return"\n".join(["%r %r"%(x,self.__getitem__(x))for x in self.keys()]) def __getitem__(self,key): try: while 1: x=self.table.get(key,None) if not x is None: return x self=self.parent except AttributeError: return[] def __setitem__(self,key,value): self.table[key]=value def __delitem__(self,key): self[key]=[] def __getattr__(self,name): if name in self.__slots__: return object.__getattr__(self,name) else: return self[name] def __setattr__(self,name,value): if name in self.__slots__: object.__setattr__(self,name,value) else: self[name]=value def __delattr__(self,name): if name in self.__slots__: object.__delattr__(self,name) else: del self[name] def derive(self): newenv=ConfigSet() newenv.parent=self return newenv def detach(self): tbl=self.get_merged_dict() try: delattr(self,'parent') except AttributeError: pass else: keys=tbl.keys() for x in keys: tbl[x]=copy.deepcopy(tbl[x]) self.table=tbl def get_flat(self,key): s=self[key] if isinstance(s,str):return s return' '.join(s) def _get_list_value_for_modification(self,key): try: value=self.table[key] except KeyError: try:value=self.parent[key] except AttributeError:value=[] if isinstance(value,list): value=value[:] else: value=[value] else: if not isinstance(value,list): value=[value] self.table[key]=value return value def append_value(self,var,val): if isinstance(val,str): val=[val] current_value=self._get_list_value_for_modification(var) current_value.extend(val) def prepend_value(self,var,val): if isinstance(val,str): val=[val] self.table[var]=val+self._get_list_value_for_modification(var) def append_unique(self,var,val): if isinstance(val,str): val=[val] current_value=self._get_list_value_for_modification(var) for x in val: if x not in current_value: current_value.append(x) def get_merged_dict(self): table_list=[] env=self while 1: table_list.insert(0,env.table) try:env=env.parent except AttributeError:break merged_table={} for table in table_list: merged_table.update(table) return merged_table def store(self,filename): try: os.makedirs(os.path.split(filename)[0]) except OSError: pass buf=[] merged_table=self.get_merged_dict() keys=list(merged_table.keys()) keys.sort() try: fun=ascii except NameError: fun=repr for k in keys: if k!='undo_stack': buf.append('%s = %s\n'%(k,fun(merged_table[k]))) Utils.writef(filename,''.join(buf)) def load(self,filename): tbl=self.table code=Utils.readf(filename,m='rU') for m in re_imp.finditer(code): g=m.group tbl[g(2)]=eval(g(3)) Logs.debug('env: %s'%str(self.table)) def update(self,d): for k,v in d.items(): self[k]=v def stash(self): orig=self.table tbl=self.table=self.table.copy() for x in tbl.keys(): tbl[x]=copy.deepcopy(tbl[x]) self.undo_stack=self.undo_stack+[orig] def revert(self): self.table=self.undo_stack.pop(-1)
gpl-2.0
selvakarthik21/newspaper
newspaperdemo/source/source.py
1
15524
# -*- coding: utf-8 -*- """ Source objects abstract online news source websites & domains. www.cnn.com would be its own source. """ __title__ = 'newspaper' __author__ = 'Lucas Ou-Yang' __license__ = 'MIT' __copyright__ = 'Copyright 2014, Lucas Ou-Yang' import logging from urllib.parse import urljoin, urlsplit, urlunsplit from tldextract import tldextract from . import network from . import urls from . import utils from .article import Article from .configuration import Configuration from .extractors import ContentExtractor from .settings import ANCHOR_DIRECTORY log = logging.getLogger(__name__) class Category(object): def __init__(self, url): self.url = url self.html = None self.doc = None class Feed(object): def __init__(self, url): self.url = url self.rss = None # TODO self.dom = None, speed up Feedparser class Source(object): """Sources are abstractions of online news vendors like huffpost or cnn. domain = 'www.cnn.com' scheme = 'http' categories = ['http://cnn.com/world', 'http://money.cnn.com'] feeds = ['http://cnn.com/rss.atom', ..] articles = [<article obj>, <article obj>, ..] brand = 'cnn' """ def __init__(self, url, config=None, **kwargs): """The config object for this source will be passed into all of this source's children articles unless specified otherwise or re-set. """ if (url is None) or ('://' not in url) or (url[:4] != 'http'): raise Exception('Input url is bad!') self.config = config or Configuration() self.config = utils.extend_config(self.config, kwargs) self.extractor = ContentExtractor(self.config) self.url = url self.url = urls.prepare_url(url) self.domain = urls.get_domain(self.url) self.scheme = urls.get_scheme(self.url) self.categories = [] self.feeds = [] self.articles = [] self.html = '' self.doc = None self.logo_url = '' self.favicon = '' self.brand = tldextract.extract(self.url).domain self.description = '' self.is_parsed = False self.is_downloaded = False def build(self): """Encapsulates download and basic parsing with lxml. May be a good idea to split this into download() and parse() methods. """ self.download() self.parse() self.set_categories() self.download_categories() # mthread self.parse_categories() self.set_feeds() self.download_feeds() # mthread # self.parse_feeds() self.generate_articles() def purge_articles(self, reason, articles): """Delete rejected articles, if there is an articles param, purge from there, otherwise purge from source instance. Reference this StackOverflow post for some of the wonky syntax below: http://stackoverflow.com/questions/1207406/remove-items-from-a- list-while-iterating-in-python """ if reason == 'url': articles[:] = [a for a in articles if a.is_valid_url()] elif reason == 'body': articles[:] = [a for a in articles if a.is_valid_body()] return articles @utils.cache_disk(seconds=(86400 * 1), cache_folder=ANCHOR_DIRECTORY) def _get_category_urls(self, domain): """The domain param is **necessary**, see .utils.cache_disk for reasons. the boilerplate method is so we can use this decorator right. We are caching categories for 1 day. """ return self.extractor.get_category_urls(self.url, self.doc) def set_categories(self): urls = self._get_category_urls(self.domain) self.categories = [Category(url=url) for url in urls] def set_feeds(self): """Don't need to cache getting feed urls, it's almost instant with xpath """ common_feed_urls = ['/feed', '/feeds', '/rss'] common_feed_urls = [urljoin(self.url, url) for url in common_feed_urls] split = urlsplit(self.url) if split.netloc in ('medium.com', 'www.medium.com'): # should handle URL to user or user's post if split.path.startswith('/@'): new_path = '/feed/' + split.path.split('/')[1] new_parts = split.scheme, split.netloc, new_path, '', '' common_feed_urls.append(urlunsplit(new_parts)) common_feed_urls_as_categories = [Category(url=url) for url in common_feed_urls] category_urls = [c.url for c in common_feed_urls_as_categories] requests = network.multithread_request(category_urls, self.config) for index, _ in enumerate(common_feed_urls_as_categories): response = requests[index].resp if response and response.ok: common_feed_urls_as_categories[index].html = network.get_html( response.url, response=response) common_feed_urls_as_categories = [c for c in common_feed_urls_as_categories if c.html] for _ in common_feed_urls_as_categories: doc = self.config.get_parser().fromstring(_.html) _.doc = doc common_feed_urls_as_categories = [c for c in common_feed_urls_as_categories if c.doc is not None] categories_and_common_feed_urls = self.categories + common_feed_urls_as_categories urls = self.extractor.get_feed_urls(self.url, categories_and_common_feed_urls) self.feeds = [Feed(url=url) for url in urls] def set_description(self): """Sets a blurb for this source, for now we just query the desc html attribute """ desc = self.extractor.get_meta_description(self.doc) self.description = desc def download(self): """Downloads html of source """ self.html = network.get_html(self.url, self.config) def download_categories(self): """Download all category html, can use mthreading """ category_urls = [c.url for c in self.categories] requests = network.multithread_request(category_urls, self.config) for index, _ in enumerate(self.categories): req = requests[index] if req.resp is not None: self.categories[index].html = network.get_html( req.url, response=req.resp) else: if self.config.verbose: print(('deleting category', self.categories[index].url, 'due to download err')) self.categories = [c for c in self.categories if c.html] def download_feeds(self): """Download all feed html, can use mthreading """ feed_urls = [f.url for f in self.feeds] requests = network.multithread_request(feed_urls, self.config) for index, _ in enumerate(self.feeds): req = requests[index] if req.resp is not None: self.feeds[index].rss = network.get_html( req.url, response=req.resp) else: if self.config.verbose: print(('deleting feed', self.categories[index].url, 'due to download err')) self.feeds = [f for f in self.feeds if f.rss] def parse(self): """Sets the lxml root, also sets lxml roots of all children links, also sets description """ # TODO: This is a terrible idea, ill try to fix it when i'm more rested self.doc = self.config.get_parser().fromstring(self.html) if self.doc is None: if self.config.verbose: print('[Source parse ERR]', self.url) return self.set_description() def parse_categories(self): """Parse out the lxml root in each category """ log.debug('We are extracting from %d categories' % len(self.categories)) for category in self.categories: doc = self.config.get_parser().fromstring(category.html) category.doc = doc self.categories = [c for c in self.categories if c.doc is not None] def _map_title_to_feed(self, feed): doc = self.config.get_parser().fromstring(feed.rss) if doc is None: # http://stackoverflow.com/a/24893800 return None elements = self.config.get_parser().getElementsByTag(doc, tag='title') feed.title = next((element.text for element in elements if element.text), self.brand) return feed def parse_feeds(self): """Add titles to feeds """ log.debug('We are parsing %d feeds' % len(self.feeds)) self.feeds = [self._map_title_to_feed(f) for f in self.feeds] def feeds_to_articles(self): """Returns articles given the url of a feed """ articles = [] for feed in self.feeds: urls = self.extractor.get_urls(feed.rss, regex=True) cur_articles = [] before_purge = len(urls) for url in urls: article = Article( url=url, source_url=feed.url, config=self.config) cur_articles.append(article) cur_articles = self.purge_articles('url', cur_articles) after_purge = len(cur_articles) if self.config.memoize_articles: cur_articles = utils.memoize_articles(self, cur_articles) after_memo = len(cur_articles) articles.extend(cur_articles) if self.config.verbose: print(('%d->%d->%d for %s' % (before_purge, after_purge, after_memo, feed.url))) log.debug('%d->%d->%d for %s' % (before_purge, after_purge, after_memo, feed.url)) return articles def categories_to_articles(self): """Takes the categories, splays them into a big list of urls and churns the articles out of each url with the url_to_article method """ articles = [] for category in self.categories: cur_articles = [] url_title_tups = self.extractor.get_urls(category.doc, titles=True) before_purge = len(url_title_tups) for tup in url_title_tups: indiv_url = tup[0] indiv_title = tup[1] _article = Article( url=indiv_url, source_url=category.url, title=indiv_title, config=self.config ) cur_articles.append(_article) cur_articles = self.purge_articles('url', cur_articles) after_purge = len(cur_articles) if self.config.memoize_articles: cur_articles = utils.memoize_articles(self, cur_articles) after_memo = len(cur_articles) articles.extend(cur_articles) if self.config.verbose: print(('%d->%d->%d for %s' % (before_purge, after_purge, after_memo, category.url))) log.debug('%d->%d->%d for %s' % (before_purge, after_purge, after_memo, category.url)) return articles def _generate_articles(self): """Returns a list of all articles, from both categories and feeds """ category_articles = self.categories_to_articles() feed_articles = self.feeds_to_articles() articles = feed_articles + category_articles uniq = {article.url: article for article in articles} return list(uniq.values()) def generate_articles(self, limit=5000): """Saves all current articles of news source, filter out bad urls """ articles = self._generate_articles() self.articles = articles[:limit] log.debug('%d articles generated and cutoff at %d', len(articles), limit) def download_articles(self, threads=1): """Downloads all articles attached to self """ # TODO fix how the article's is_downloaded is not set! urls = [a.url for a in self.articles] failed_articles = [] if threads == 1: for index, article in enumerate(self.articles): url = urls[index] html = network.get_html(url, config=self.config) self.articles[index].set_html(html) if not html: failed_articles.append(self.articles[index]) self.articles = [a for a in self.articles if a.html] else: if threads > 5: print(('Using 5+ threads on a single source ' 'may get you rate limited!')) filled_requests = network.multithread_request(urls, self.config) # Note that the responses are returned in original order for index, req in enumerate(filled_requests): html = network.get_html(req.url, response=req.resp) self.articles[index].set_html(html) if not req.resp: failed_articles.append(self.articles[index]) self.articles = [a for a in self.articles if a.html] self.is_downloaded = True if len(failed_articles) > 0: if self.config.verbose: print('[ERROR], these article urls failed the download:', [a.url for a in failed_articles]) def parse_articles(self): """Parse all articles, delete if too small """ for index, article in enumerate(self.articles): article.parse() self.articles = self.purge_articles('body', self.articles) self.is_parsed = True def size(self): """Number of articles linked to this news source """ if self.articles is None: return 0 return len(self.articles) def clean_memo_cache(self): """Clears the memoization cache for this specific news domain """ utils.clear_memo_cache(self) def feed_urls(self): """Returns a list of feed urls """ return [feed.url for feed in self.feeds] def category_urls(self): """Returns a list of category urls """ return [category.url for category in self.categories] def article_urls(self): """Returns a list of article urls """ return [article.url for article in self.articles] def print_summary(self): """Prints out a summary of the data in our source instance """ print('[source url]:', self.url) print('[source brand]:', self.brand) print('[source domain]:', self.domain) print('[source len(articles)]:', len(self.articles)) print('[source description[:50]]:', self.description[:50]) print('printing out 10 sample articles...') for a in self.articles[:10]: print('\t', '[url]:', a.url) print('\t[title]:', a.title) print('\t[len of text]:', len(a.text)) print('\t[keywords]:', a.keywords) print('\t[len of html]:', len(a.html)) print('\t==============') print('feed_urls:', self.feed_urls()) print('\r\n') print('category_urls:', self.category_urls())
mit
alexteodor/odoo
addons/membership/membership.py
128
27626
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ STATE = [ ('none', 'Non Member'), ('canceled', 'Cancelled Member'), ('old', 'Old Member'), ('waiting', 'Waiting Member'), ('invoiced', 'Invoiced Member'), ('free', 'Free Member'), ('paid', 'Paid Member'), ] STATE_PRIOR = { 'none': 0, 'canceled': 1, 'old': 2, 'waiting': 3, 'invoiced': 4, 'free': 6, 'paid': 7 } class membership_line(osv.osv): '''Member line''' def _get_partners(self, cr, uid, ids, context=None): list_membership_line = [] member_line_obj = self.pool.get('membership.membership_line') for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context): if partner.member_lines: list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context) return list_membership_line def _get_membership_lines(self, cr, uid, ids, context=None): list_membership_line = [] member_line_obj = self.pool.get('membership.membership_line') for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context): if invoice.invoice_line: list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context) return list_membership_line def _check_membership_date(self, cr, uid, ids, context=None): """Check if membership product is not in the past @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Membership Line IDs @param context: A standard dictionary for contextual values """ cr.execute(''' SELECT MIN(ml.date_to - ai.date_invoice) FROM membership_membership_line ml JOIN account_invoice_line ail ON ( ml.account_invoice_line = ail.id ) JOIN account_invoice ai ON ( ai.id = ail.invoice_id) WHERE ml.id IN %s''', (tuple(ids),)) res = cr.fetchall() for r in res: if r[0] and r[0] < 0: return False return True def _state(self, cr, uid, ids, name, args, context=None): """Compute the state lines @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Membership Line IDs @param name: Field Name @param context: A standard dictionary for contextual values @param return: Dictionary of state Value """ res = {} inv_obj = self.pool.get('account.invoice') for line in self.browse(cr, uid, ids, context=context): cr.execute(''' SELECT i.state, i.id FROM account_invoice i WHERE i.id = ( SELECT l.invoice_id FROM account_invoice_line l WHERE l.id = ( SELECT ml.account_invoice_line FROM membership_membership_line ml WHERE ml.id = %s ) ) ''', (line.id,)) fetched = cr.fetchone() if not fetched: res[line.id] = 'canceled' continue istate = fetched[0] state = 'none' if (istate == 'draft') | (istate == 'proforma'): state = 'waiting' elif istate == 'open': state = 'invoiced' elif istate == 'paid': state = 'paid' inv = inv_obj.browse(cr, uid, fetched[1], context=context) for payment in inv.payment_ids: if payment.invoice and payment.invoice.type == 'out_refund': state = 'canceled' elif istate == 'cancel': state = 'canceled' res[line.id] = state return res _description = __doc__ _name = 'membership.membership_line' _columns = { 'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1), 'membership_id': fields.many2one('product.product', string="Membership", required=True), 'date_from': fields.date('From', readonly=True), 'date_to': fields.date('To', readonly=True), 'date_cancel': fields.date('Cancel date'), 'date': fields.date('Join Date', help="Date on which member has joined the membership"), 'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'), 'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True), 'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True), 'state': fields.function(_state, string='Membership Status', type='selection', selection=STATE, store = { 'account.invoice': (_get_membership_lines, ['state'], 10), 'res.partner': (_get_partners, ['membership_state'], 12), }, help="""It indicates the membership status. -Non Member: A member who has not applied for any membership. -Cancelled Member: A member who has cancelled his membership. -Old Member: A member whose membership date has expired. -Waiting Member: A member who has applied for the membership and whose invoice is going to be created. -Invoiced Member: A member whose invoice has been created. -Paid Member: A member who has paid the membership amount."""), 'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True) } _rec_name = 'partner' _order = 'id desc' _constraints = [ (_check_membership_date, 'Error, this membership product is out of date', []) ] class Partner(osv.osv): '''Partner''' _inherit = 'res.partner' def _get_partner_id(self, cr, uid, ids, context=None): member_line_obj = self.pool.get('membership.membership_line') res_obj = self.pool.get('res.partner') data_inv = member_line_obj.browse(cr, uid, ids, context=context) list_partner = [] for data in data_inv: list_partner.append(data.partner.id) ids2 = list_partner while ids2: ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context) list_partner += ids2 return list_partner def _get_invoice_partner(self, cr, uid, ids, context=None): inv_obj = self.pool.get('account.invoice') res_obj = self.pool.get('res.partner') data_inv = inv_obj.browse(cr, uid, ids, context=context) list_partner = [] for data in data_inv: list_partner.append(data.partner_id.id) ids2 = list_partner while ids2: ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context) list_partner += ids2 return list_partner def _membership_state(self, cr, uid, ids, name, args, context=None): """This Function return Membership State For Given Partner. @param self: The object pointer @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Partner IDs @param name: Field Name @param context: A standard dictionary for contextual values @param return: Dictionary of Membership state Value """ res = {} for id in ids: res[id] = 'none' today = time.strftime('%Y-%m-%d') for id in ids: partner_data = self.browse(cr, uid, id, context=context) if partner_data.membership_cancel and today > partner_data.membership_cancel: res[id] = 'canceled' continue if partner_data.membership_stop and today > partner_data.membership_stop: res[id] = 'old' continue s = 4 if partner_data.member_lines: for mline in partner_data.member_lines: if mline.date_to >= today: if mline.account_invoice_line and mline.account_invoice_line.invoice_id: mstate = mline.account_invoice_line.invoice_id.state if mstate == 'paid': s = 0 inv = mline.account_invoice_line.invoice_id for payment in inv.payment_ids: if payment.invoice.type == 'out_refund': s = 2 break elif mstate == 'open' and s!=0: s = 1 elif mstate == 'cancel' and s!=0 and s!=1: s = 2 elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1: s = 3 if s==4: for mline in partner_data.member_lines: if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and (mline.account_invoice_line and mline.account_invoice_line.invoice_id.state) == 'paid': s = 5 else: s = 6 if s==0: res[id] = 'paid' elif s==1: res[id] = 'invoiced' elif s==2: res[id] = 'canceled' elif s==3: res[id] = 'waiting' elif s==5: res[id] = 'old' elif s==6: res[id] = 'none' if partner_data.free_member and s!=0: res[id] = 'free' if partner_data.associate_member: res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context) res[id] = res_state[partner_data.associate_member.id] return res def _membership_date(self, cr, uid, ids, name, args, context=None): """Return date of membership""" name = name[0] res = {} member_line_obj = self.pool.get('membership.membership_line') for partner in self.browse(cr, uid, ids, context=context): if partner.associate_member: partner_id = partner.associate_member.id else: partner_id = partner.id res[partner.id] = { 'membership_start': False, 'membership_stop': False, 'membership_cancel': False } if name == 'membership_start': line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)], limit=1, order='date_from', context=context) if line_id: res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]], ['date_from'], context=context)[0]['date_from'] if name == 'membership_stop': line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)], limit=1, order='date_to desc', context=context) if line_id1: res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]], ['date_to'], context=context)[0]['date_to'] if name == 'membership_cancel': if partner.membership_state == 'canceled': line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context) if line_id2: res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel'] return res def _get_partners(self, cr, uid, ids, context=None): ids2 = ids while ids2: ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context) ids += ids2 return ids def __get_membership_state(self, *args, **kwargs): return self._membership_state(*args, **kwargs) _columns = { 'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."), 'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'), 'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."), 'membership_amount': fields.float( 'Membership Amount', digits=(16, 2), help = 'The price negotiated by the partner'), 'membership_state': fields.function( __get_membership_state, string = 'Current Membership Status', type = 'selection', selection = STATE, store = { 'account.invoice': (_get_invoice_partner, ['state'], 10), 'membership.membership_line': (_get_partner_id, ['state'], 10), 'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10) }, help='It indicates the membership state.\n' '-Non Member: A partner who has not applied for any membership.\n' '-Cancelled Member: A member who has cancelled his membership.\n' '-Old Member: A member whose membership date has expired.\n' '-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n' '-Invoiced Member: A member whose invoice has been created.\n' '-Paying member: A member who has paid the membership fee.'), 'membership_start': fields.function( _membership_date, multi = 'membeship_start', string = 'Membership Start Date', type = 'date', store = { 'account.invoice': (_get_invoice_partner, ['state'], 10), 'membership.membership_line': (_get_partner_id, ['state'], 10, ), 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10) }, help="Date from which membership becomes active."), 'membership_stop': fields.function( _membership_date, string = 'Membership End Date', type='date', multi='membership_stop', store = { 'account.invoice': (_get_invoice_partner, ['state'], 10), 'membership.membership_line': (_get_partner_id, ['state'], 10), 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10) }, help="Date until which membership remains active."), 'membership_cancel': fields.function( _membership_date, string = 'Cancel Membership Date', type='date', multi='membership_cancel', store = { 'account.invoice': (_get_invoice_partner, ['state'], 11), 'membership.membership_line': (_get_partner_id, ['state'], 10), 'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10) }, help="Date on which membership has been cancelled"), } _defaults = { 'free_member': False, 'membership_cancel': False, } def _check_recursion(self, cr, uid, ids, context=None): """Check Recursive for Associated Members. """ level = 100 while len(ids): cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),)) ids = filter(None, map(lambda x:x[0], cr.fetchall())) if not level: return False level -= 1 return True _constraints = [ (_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member']) ] def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None): """ Create Customer Invoice of Membership for partners. @param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership. datas = {'membership_product_id': None, 'amount': None} """ invoice_obj = self.pool.get('account.invoice') invoice_line_obj = self.pool.get('account.invoice.line') invoice_tax_obj = self.pool.get('account.invoice.tax') product_id = product_id or datas.get('membership_product_id', False) amount = datas.get('amount', 0.0) invoice_list = [] if type(ids) in (int, long,): ids = [ids] for partner in self.browse(cr, uid, ids, context=context): account_id = partner.property_account_receivable and partner.property_account_receivable.id or False fpos_id = partner.property_account_position and partner.property_account_position.id or False addr = self.address_get(cr, uid, [partner.id], ['invoice']) if partner.free_member: raise osv.except_osv(_('Error!'), _("Partner is a free Member.")) if not addr.get('invoice', False): raise osv.except_osv(_('Error!'), _("Partner doesn't have an address to make the invoice.")) quantity = 1 line_value = { 'product_id': product_id, } line_dict = invoice_line_obj.product_id_change(cr, uid, {}, product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context) line_value.update(line_dict['value']) line_value['price_unit'] = amount if line_value.get('invoice_line_tax_id', False): tax_tab = [(6, 0, line_value['invoice_line_tax_id'])] line_value['invoice_line_tax_id'] = tax_tab invoice_id = invoice_obj.create(cr, uid, { 'partner_id': partner.id, 'account_id': account_id, 'fiscal_position': fpos_id or False }, context=context) line_value['invoice_id'] = invoice_id invoice_line_id = invoice_line_obj.create(cr, uid, line_value, context=context) invoice_obj.write(cr, uid, invoice_id, {'invoice_line': [(6, 0, [invoice_line_id])]}, context=context) invoice_list.append(invoice_id) if line_value['invoice_line_tax_id']: tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values() for tax in tax_value: invoice_tax_obj.create(cr, uid, tax, context=context) #recompute the membership_state of those partners self.pool.get('res.partner').write(cr, uid, ids, {}) return invoice_list class Product(osv.osv): def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): model_obj = self.pool.get('ir.model.data') if context is None: context = {} if ('product' in context) and (context['product']=='membership_product'): model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context) resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context) dict_model = {} for i in resource_id_form: dict_model[i['name']] = i['res_id'] if view_type == 'form': view_id = dict_model['membership_products_form'] else: view_id = dict_model['membership_products_tree'] return super(Product,self).fields_view_get(cr, user, view_id, view_type, context, toolbar, submenu) '''Product''' _inherit = 'product.template' _columns = { 'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'), 'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'), 'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'), } _sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')] _defaults = { 'membership': False, } class Invoice(osv.osv): '''Invoice''' _inherit = 'account.invoice' def action_cancel(self, cr, uid, ids, context=None): '''Create a 'date_cancel' on the membership_line object''' member_line_obj = self.pool.get('membership.membership_line') today = time.strftime('%Y-%m-%d') for invoice in self.browse(cr, uid, ids, context=context): mlines = member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [l.id for l in invoice.invoice_line])]) member_line_obj.write(cr, uid, mlines, {'date_cancel': today}) return super(Invoice, self).action_cancel(cr, uid, ids, context=context) class account_invoice_line(osv.osv): _inherit='account.invoice.line' def write(self, cr, uid, ids, vals, context=None): """Overrides orm write method """ member_line_obj = self.pool.get('membership.membership_line') res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context) for line in self.browse(cr, uid, ids, context=context): if line.invoice_id.type == 'out_invoice': ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context) if line.product_id and line.product_id.membership and not ml_ids: # Product line has changed to a membership product date_from = line.product_id.membership_date_from date_to = line.product_id.membership_date_to if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to: date_from = line.invoice_id.date_invoice member_line_obj.create(cr, uid, { 'partner': line.invoice_id.partner_id.id, 'membership_id': line.product_id.id, 'member_price': line.price_unit, 'date': time.strftime('%Y-%m-%d'), 'date_from': date_from, 'date_to': date_to, 'account_invoice_line': line.id, }, context=context) if line.product_id and not line.product_id.membership and ml_ids: # Product line has changed to a non membership product member_line_obj.unlink(cr, uid, ml_ids, context=context) return res def unlink(self, cr, uid, ids, context=None): """Remove Membership Line Record for Account Invoice Line """ member_line_obj = self.pool.get('membership.membership_line') for id in ids: ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context) member_line_obj.unlink(cr, uid, ml_ids, context=context) return super(account_invoice_line, self).unlink(cr, uid, ids, context=context) def create(self, cr, uid, vals, context=None): """Overrides orm create method """ member_line_obj = self.pool.get('membership.membership_line') result = super(account_invoice_line, self).create(cr, uid, vals, context=context) line = self.browse(cr, uid, result, context=context) if line.invoice_id.type == 'out_invoice': ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context) if line.product_id and line.product_id.membership and not ml_ids: # Product line is a membership product date_from = line.product_id.membership_date_from date_to = line.product_id.membership_date_to if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to: date_from = line.invoice_id.date_invoice member_line_obj.create(cr, uid, { 'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False, 'membership_id': line.product_id.id, 'member_price': line.price_unit, 'date': time.strftime('%Y-%m-%d'), 'date_from': date_from, 'date_to': date_to, 'account_invoice_line': line.id, }, context=context) return result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Donkyhotay/MoonPy
twisted/internet/wxsupport.py
61
1455
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # """Old method of wxPython support for Twisted. twisted.internet.wxreactor is probably a better choice. To use:: | # given a wxApp instance called myWxAppInstance: | from twisted.internet import wxsupport | wxsupport.install(myWxAppInstance) Use Twisted's APIs for running and stopping the event loop, don't use wxPython's methods. On Windows the Twisted event loop might block when dialogs are open or menus are selected. Maintainer: Itamar Shtull-Trauring """ import warnings warnings.warn("wxsupport is not fully functional on Windows, wxreactor is better.") # wxPython imports from wxPython.wx import wxApp # twisted imports from twisted.internet import reactor from twisted.python.runtime import platformType class wxRunner: """Make sure GUI events are handled.""" def __init__(self, app): self.app = app def run(self): """ Execute pending WX events followed by WX idle events and reschedule. """ # run wx events while self.app.Pending(): self.app.Dispatch() # run wx idle events self.app.ProcessIdle() reactor.callLater(0.02, self.run) def install(app): """Install the wxPython support, given a wxApp instance""" runner = wxRunner(app) reactor.callLater(0.02, runner.run) __all__ = ["install"]
gpl-3.0
fengsp/plan
plan/testsuite/output.py
5
1778
# -*- coding: utf-8 -*- """ plan.testsuite.output ~~~~~~~~~~~~~~~~~~~~~ Tests the output for Plan. :copyright: (c) 2014 by Shipeng Feng. :license: BSD, see LICENSE for more details. """ import unittest from plan.output import Output from plan.testsuite import BaseTestCase class OutputTestCase(BaseTestCase): def test_from_none(self): output = str(Output()) self.assert_equal(output, '') def test_from_string(self): output = str(Output('null')) self.assert_equal(output, '> /dev/null 2>&1') output = str(Output('> /tmp/out.log 2> /tmp/error.log')) self.assert_equal(output, '> /tmp/out.log 2> /tmp/error.log') def test_from_dict(self): output = str(Output(dict())) self.assert_equal(output, '') output = str(Output(dict(key='value'))) self.assert_equal(output, '> /dev/null 2>&1') output = str(Output(dict(stdout='/dev/null', stderr='/dev/null'))) self.assert_equal(output, '> /dev/null 2>&1') output = str(Output(dict(stdout=None, stderr=None))) self.assert_equal(output, '') output = str(Output(dict(stdout='/tmp/test_out.log', stderr=None))) self.assert_equal(output, '>> /tmp/test_out.log') output = str(Output(dict(stdout='', stderr='/tmp/test_error.log'))) self.assert_equal(output, '2>> /tmp/test_error.log') output = str(Output(dict(stdout='/t/out.log', stderr='/t/err.log'))) self.assert_equal(output, '>> /t/out.log 2>> /t/err.log') def test_from_illegal(self): output = Output(1) self.assert_raises(TypeError, output.__str__) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(OutputTestCase)) return suite
bsd-3-clause
shaistaansari/django
django/contrib/gis/forms/widgets.py
422
3659
from __future__ import unicode_literals import logging from django.conf import settings from django.contrib.gis import gdal from django.contrib.gis.geos import GEOSException, GEOSGeometry from django.forms.widgets import Widget from django.template import loader from django.utils import six, translation logger = logging.getLogger('django.contrib.gis') class BaseGeometryWidget(Widget): """ The base class for rich geometry widgets. Renders a map using the WKT of the geometry. """ geom_type = 'GEOMETRY' map_srid = 4326 map_width = 600 map_height = 400 display_raw = False supports_3d = False template_name = '' # set on subclasses def __init__(self, attrs=None): self.attrs = {} for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'): self.attrs[key] = getattr(self, key) if attrs: self.attrs.update(attrs) def serialize(self, value): return value.wkt if value else '' def deserialize(self, value): try: return GEOSGeometry(value, self.map_srid) except (GEOSException, ValueError) as err: logger.error( "Error creating geometry from value '%s' (%s)" % ( value, err) ) return None def render(self, name, value, attrs=None): # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.string_types): value = self.deserialize(value) if value: # Check that srid of value and map match if value.srid != self.map_srid: try: ogr = value.ogr ogr.transform(self.map_srid) value = ogr except gdal.GDALException as err: logger.error( "Error transforming geometry from srid '%s' to srid '%s' (%s)" % ( value.srid, self.map_srid, err) ) context = self.build_attrs( attrs, name=name, module='geodjango_%s' % name.replace('-', '_'), # JS-safe serialized=self.serialize(value), geom_type=gdal.OGRGeomType(self.attrs['geom_type']), STATIC_URL=settings.STATIC_URL, LANGUAGE_BIDI=translation.get_language_bidi(), ) return loader.render_to_string(self.template_name, context) class OpenLayersWidget(BaseGeometryWidget): template_name = 'gis/openlayers.html' class Media: js = ( 'http://openlayers.org/api/2.13/OpenLayers.js', 'gis/js/OLMapWidget.js', ) class OSMWidget(BaseGeometryWidget): """ An OpenLayers/OpenStreetMap-based widget. """ template_name = 'gis/openlayers-osm.html' default_lon = 5 default_lat = 47 class Media: js = ( 'http://openlayers.org/api/2.13/OpenLayers.js', 'http://www.openstreetmap.org/openlayers/OpenStreetMap.js', 'gis/js/OLMapWidget.js', ) def __init__(self, attrs=None): super(OSMWidget, self).__init__() for key in ('default_lon', 'default_lat'): self.attrs[key] = getattr(self, key) if attrs: self.attrs.update(attrs) @property def map_srid(self): # Use the official spherical mercator projection SRID when GDAL is # available; otherwise, fallback to 900913. if gdal.HAS_GDAL: return 3857 else: return 900913
bsd-3-clause
mr-niels-christensen/environment-scotland-dot-rural
src/main/rdflib/__init__.py
4
3049
"""\ A pure Python package providing the core RDF constructs. The packages is intended to provide the core RDF types and interfaces for working with RDF. The package defines a plugin interface for parsers, stores, and serializers that other packages can use to implement parsers, stores, and serializers that will plug into the rdflib package. The primary interface `rdflib` exposes to work with RDF is `rdflib.graph.Graph`. A tiny example: >>> import rdflib >>> g = rdflib.Graph() >>> result = g.parse("http://www.w3.org/2000/10/swap/test/meet/blue.rdf") >>> print("graph has %s statements." % len(g)) graph has 9 statements. >>> >>> for s, p, o in g: ... if (s, p, o) not in g: ... raise Exception("It better be!") >>> s = g.serialize(format='n3') """ __docformat__ = "restructuredtext en" # The format of the __version__ line is matched by a regex in setup.py __version__ = "4.1.2" __date__ = "2013/12/31" __all__ = [ 'URIRef', 'BNode', 'Literal', 'Variable', 'Namespace', 'Dataset', 'Graph', 'ConjunctiveGraph', 'RDF', 'RDFS', 'OWL', 'XSD', 'util', ] import sys assert sys.version_info >= (2, 5, 0), "rdflib requires Python 2.5 or higher" del sys import logging _LOGGER = logging.getLogger("rdflib") _LOGGER.info("RDFLib Version: %s" % __version__) NORMALIZE_LITERALS = True """ If True - Literals lexical forms are normalized when created. I.e. the lexical forms is parsed according to data-type, then the stored lexical form is the re-serialized value that was parsed. Illegal values for a datatype are simply kept. The normalized keyword for Literal.__new__ can override this. For example: >>> from rdflib import Literal,XSD >>> Literal("01", datatype=XSD.int) rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer')) This flag may be changed at any time, but will only affect literals created after that time, previously created literals will remain (un)normalized. """ DAWG_LITERAL_COLLATION = False """ DAWG_LITERAL_COLLATION determines how literals are ordered or compared to each other. In SPARQL, applying the >,<,>=,<= operators to literals of incompatible data-types is an error, i.e: Literal(2)>Literal('cake') is neither true nor false, but an error. This is a problem in PY3, where lists of Literals of incompatible types can no longer be sorted. Setting this flag to True gives you strict DAWG/SPARQL compliance, setting it to False will order Literals with incompatible datatypes by datatype URI In particular, this determines how the rich comparison operators for Literal work, eq, __neq__, __lt__, etc. """ from rdflib.term import ( URIRef, BNode, Literal, Variable) from rdflib.namespace import Namespace from rdflib.graph import Dataset, Graph, ConjunctiveGraph from rdflib.namespace import RDF, RDFS, OWL, XSD from rdflib import plugin from rdflib import query # tedious sop to flake8 assert plugin assert query from rdflib import util
apache-2.0
luci/luci-py
client/third_party/colorama/ansi.py
640
2524
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. ''' This module generates ANSI character codes to printing colors to terminals. See: http://en.wikipedia.org/wiki/ANSI_escape_code ''' CSI = '\033[' OSC = '\033]' BEL = '\007' def code_to_chars(code): return CSI + str(code) + 'm' def set_title(title): return OSC + '2;' + title + BEL def clear_screen(mode=2): return CSI + str(mode) + 'J' def clear_line(mode=2): return CSI + str(mode) + 'K' class AnsiCodes(object): def __init__(self): # the subclasses declare class attributes which are numbers. # Upon instantiation we define instance attributes, which are the same # as the class attributes but wrapped with the ANSI escape sequence for name in dir(self): if not name.startswith('_'): value = getattr(self, name) setattr(self, name, code_to_chars(value)) class AnsiCursor(object): def UP(self, n=1): return CSI + str(n) + 'A' def DOWN(self, n=1): return CSI + str(n) + 'B' def FORWARD(self, n=1): return CSI + str(n) + 'C' def BACK(self, n=1): return CSI + str(n) + 'D' def POS(self, x=1, y=1): return CSI + str(y) + ';' + str(x) + 'H' class AnsiFore(AnsiCodes): BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 RESET = 39 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 90 LIGHTRED_EX = 91 LIGHTGREEN_EX = 92 LIGHTYELLOW_EX = 93 LIGHTBLUE_EX = 94 LIGHTMAGENTA_EX = 95 LIGHTCYAN_EX = 96 LIGHTWHITE_EX = 97 class AnsiBack(AnsiCodes): BLACK = 40 RED = 41 GREEN = 42 YELLOW = 43 BLUE = 44 MAGENTA = 45 CYAN = 46 WHITE = 47 RESET = 49 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 100 LIGHTRED_EX = 101 LIGHTGREEN_EX = 102 LIGHTYELLOW_EX = 103 LIGHTBLUE_EX = 104 LIGHTMAGENTA_EX = 105 LIGHTCYAN_EX = 106 LIGHTWHITE_EX = 107 class AnsiStyle(AnsiCodes): BRIGHT = 1 DIM = 2 NORMAL = 22 RESET_ALL = 0 Fore = AnsiFore() Back = AnsiBack() Style = AnsiStyle() Cursor = AnsiCursor()
apache-2.0
robbi/pyload
module/plugins/hoster/ShareonlineBiz.py
7
5866
# -*- coding: utf-8 -*- import re import time from module.network.RequestFactory import getURL as get_url from ..captcha.ReCaptcha import ReCaptcha from ..internal.SimpleHoster import SimpleHoster class ShareonlineBiz(SimpleHoster): __name__ = "ShareonlineBiz" __type__ = "hoster" __version__ = "0.66" __status__ = "testing" __pattern__ = r'https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download\.php\?id=|dl/)(?P<ID>\w+)' __config__ = [("activated", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)] __description__ = """Shareonline.biz hoster plugin""" __license__ = "GPLv3" __authors__ = [("spoob", "spoob@pyload.org"), ("mkaay", "mkaay@mkaay.de"), ("zoidberg", "zoidberg@mujmail.cz"), ("Walter Purcaro", "vuolter@gmail.com")] URL_REPLACEMENTS = [ (__pattern__ + ".*", "http://www.share-online.biz/dl/\g<ID>")] CHECK_TRAFFIC = True RECAPTCHA_KEY = "6LdatrsSAAAAAHZrB70txiV5p-8Iv8BtVxlTtjKX" ERROR_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>' @classmethod def api_info(cls, url): info = {} field = get_url("http://api.share-online.biz/linkcheck.php", get={'md5': "1", 'links': re.match(cls.__pattern__, url).group("ID")}).split(";") try: if field[1] == "OK": info['fileid'] = field[0] info['status'] = 2 info['name'] = field[2] info['size'] = field[3] #: In bytes info['md5'] = field[4].strip().lower( ).replace("\n\n", "") #: md5 elif field[1] in ("DELETED", "NOTFOUND"): info['status'] = 1 except IndexError: pass return info def setup(self): self.resume_download = self.premium self.multiDL = False def handle_captcha(self): self.captcha = ReCaptcha(self.pyfile) response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY) m = re.search(r'var wait=(\d+);', self.data) self.set_wait(int(m.group(1)) if m else 30) res = self.load("%s/free/captcha/%d" % (self.pyfile.url, int(time.time() * 1000)), post={'dl_free': "1", 'recaptcha_challenge_field': challenge, 'recaptcha_response_field': response}) if res != "0": self.captcha.correct() return res else: self.retry_captcha() def handle_free(self, pyfile): self.wait(3) self.data = self.load("%s/free/" % pyfile.url, post={'dl_free': "1", 'choice': "free"}) self.check_errors() res = self.handle_captcha() self.link = res.decode('base64') if not self.link.startswith("http://"): self.error(_("Invalid url")) self.wait() def check_download(self): check = self.scan_download({'cookie': re.compile(r'<div id="dl_failure"'), 'fail': re.compile(r'<title>Share-Online')}) if check == "cookie": self.retry_captcha(5, 60, _("Cookie failure")) elif check == "fail": self.retry_captcha(5, 5 * 60, _("Download failed")) return SimpleHoster.check_download(self) #: Should be working better loading (account) api internally def handle_premium(self, pyfile): self.api_data = dlinfo = {} html = self.load("https://api.share-online.biz/account.php", get={'username': self.account.user, 'password': self.account.get_login('password'), 'act': "download", 'lid': self.info['fileid']}) self.log_debug(html) for line in html.splitlines(): try: key, value = line.split(": ") dlinfo[key.lower()] = value except ValueError: pass if dlinfo['status'] != "online": self.offline() else: pyfile.name = dlinfo['name'] pyfile.size = int(dlinfo['size']) self.link = dlinfo['url'] if self.link == "server_under_maintenance": self.temp_offline() else: self.multiDL = True def check_errors(self): m = re.search(r'/failure/(.*?)/', self.req.lastEffectiveURL) if m is None: self.info.pop('error', None) return errmsg = m.group(1).lower() try: self.log_error( errmsg, re.search( self.ERROR_PATTERN, self.data).group(1)) except Exception: self.log_error(_("Unknown error occurred"), errmsg) if errmsg == "invalid": self.fail(_("File not available")) elif errmsg in ("freelimit", "size", "proxy"): self.fail(_("Premium account needed")) elif errmsg in ("expired", "server"): self.retry(wait=600, msg=errmsg) elif errmsg == "full": self.fail(_("Server is full")) elif 'slot' in errmsg: self.wait(3600, reconnect=True) self.restart(errmsg) else: self.wait(60, reconnect=True) self.restart(errmsg)
gpl-3.0
goldeneye-source/ges-python
lib/encodings/mac_turkish.py
272
13513
""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-turkish', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS '\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE '\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE '\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE '\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS '\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS '\u2020' # 0xA0 -> DAGGER '\xb0' # 0xA1 -> DEGREE SIGN '\xa2' # 0xA2 -> CENT SIGN '\xa3' # 0xA3 -> POUND SIGN '\xa7' # 0xA4 -> SECTION SIGN '\u2022' # 0xA5 -> BULLET '\xb6' # 0xA6 -> PILCROW SIGN '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S '\xae' # 0xA8 -> REGISTERED SIGN '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u2122' # 0xAA -> TRADE MARK SIGN '\xb4' # 0xAB -> ACUTE ACCENT '\xa8' # 0xAC -> DIAERESIS '\u2260' # 0xAD -> NOT EQUAL TO '\xc6' # 0xAE -> LATIN CAPITAL LETTER AE '\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE '\u221e' # 0xB0 -> INFINITY '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO '\xa5' # 0xB4 -> YEN SIGN '\xb5' # 0xB5 -> MICRO SIGN '\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL '\u2211' # 0xB7 -> N-ARY SUMMATION '\u220f' # 0xB8 -> N-ARY PRODUCT '\u03c0' # 0xB9 -> GREEK SMALL LETTER PI '\u222b' # 0xBA -> INTEGRAL '\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR '\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR '\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA '\xe6' # 0xBE -> LATIN SMALL LETTER AE '\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE '\xbf' # 0xC0 -> INVERTED QUESTION MARK '\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK '\xac' # 0xC2 -> NOT SIGN '\u221a' # 0xC3 -> SQUARE ROOT '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK '\u2248' # 0xC5 -> ALMOST EQUAL TO '\u2206' # 0xC6 -> INCREMENT '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS '\xa0' # 0xCA -> NO-BREAK SPACE '\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE '\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE '\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE '\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE '\u0153' # 0xCF -> LATIN SMALL LIGATURE OE '\u2013' # 0xD0 -> EN DASH '\u2014' # 0xD1 -> EM DASH '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK '\xf7' # 0xD6 -> DIVISION SIGN '\u25ca' # 0xD7 -> LOZENGE '\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS '\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS '\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE '\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE '\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE '\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA '\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA '\u2021' # 0xE0 -> DOUBLE DAGGER '\xb7' # 0xE1 -> MIDDLE DOT '\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK '\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK '\u2030' # 0xE4 -> PER MILLE SIGN '\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE '\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE '\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\uf8ff' # 0xF0 -> Apple logo '\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE '\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE '\uf8a0' # 0xF5 -> undefined1 '\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT '\u02dc' # 0xF7 -> SMALL TILDE '\xaf' # 0xF8 -> MACRON '\u02d8' # 0xF9 -> BREVE '\u02d9' # 0xFA -> DOT ABOVE '\u02da' # 0xFB -> RING ABOVE '\xb8' # 0xFC -> CEDILLA '\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT '\u02db' # 0xFE -> OGONEK '\u02c7' # 0xFF -> CARON ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-3.0
crosswalk-project/blink-crosswalk-efl
LayoutTests/http/tests/websocket/echo-requested-extensions_wsh.py
5
1778
# Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from mod_pywebsocket import common, msgutil def web_socket_do_extra_handshake(request): pass def web_socket_transfer_data(request): msgutil.send_message(request, request.headers_in.get( common.SEC_WEBSOCKET_EXTENSIONS_HEADER))
bsd-3-clause
steynovich/ansible-modules-extras
network/f5/bigip_monitor_tcp.py
32
15166
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, serge van Ginderachter <serge@vanginderachter.be> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: bigip_monitor_tcp short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" author: - Serge van Ginderachter (@srvg) - Tim Rupp (@caphrim007) notes: - "Requires BIG-IP software version >= 11" - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - "Best run as a local_action in your playbook" - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" requirements: - bigsuds options: state: description: - Monitor state required: false default: 'present' choices: - present - absent name: description: - Monitor name required: true default: null aliases: - monitor partition: description: - Partition for the monitor required: false default: 'Common' type: description: - The template type of this monitor template required: false default: 'tcp' choices: - TTYPE_TCP - TTYPE_TCP_ECHO - TTYPE_TCP_HALF_OPEN parent: description: - The parent template of this monitor template required: false default: 'tcp' choices: - tcp - tcp_echo - tcp_half_open parent_partition: description: - Partition for the parent monitor required: false default: 'Common' send: description: - The send string for the monitor call required: true default: none receive: description: - The receive string for the monitor call required: true default: none ip: description: - IP address part of the ipport definition. The default API setting is "0.0.0.0". required: false default: none port: description: - Port address part op the ipport definition. The default API setting is 0. required: false default: none interval: description: - The interval specifying how frequently the monitor instance of this template will run. By default, this interval is used for up and down states. The default API setting is 5. required: false default: none timeout: description: - The number of seconds in which the node or service must respond to the monitor request. If the target responds within the set time period, it is considered up. If the target does not respond within the set time period, it is considered down. You can change this number to any number you want, however, it should be 3 times the interval number of seconds plus 1 second. The default API setting is 16. required: false default: none time_until_up: description: - Specifies the amount of time in seconds after the first successful response before a node will be marked up. A value of 0 will cause a node to be marked up immediately after a valid response is received from the node. The default API setting is 0. required: false default: none extends_documentation_fragment: f5 ''' EXAMPLES = ''' - name: Create TCP Monitor bigip_monitor_tcp: state: "present" server: "lb.mydomain.com" user: "admin" password: "secret" name: "my_tcp_monitor" type: "tcp" send: "tcp string to send" receive: "tcp string to receive" delegate_to: localhost - name: Create TCP half open Monitor bigip_monitor_tcp: state: "present" server: "lb.mydomain.com" user: "admin" password: "secret" name: "my_tcp_monitor" type: "tcp" send: "tcp string to send" receive: "http string to receive" delegate_to: localhost - name: Remove TCP Monitor bigip_monitor_tcp: state: "absent" server: "lb.mydomain.com" user: "admin" password: "secret" name: "my_tcp_monitor" ''' TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists result = False try: ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] if ttype == TEMPLATE_TYPE and parent == parent2: result = True else: module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: # genuine exception raise return result def create_monitor(api, monitor, template_attributes): try: api.LocalLB.Monitor.create_template( templates=[{ 'template_name': monitor, 'template_type': TEMPLATE_TYPE }], template_attributes=[template_attributes] ) except bigsuds.OperationFailed as e: if "already exists" in str(e): return False else: # genuine exception raise return True def delete_monitor(api, monitor): try: api.LocalLB.Monitor.delete_template(template_names=[monitor]) except bigsuds.OperationFailed as e: # maybe it was deleted since we checked if "was not found" in str(e): return False else: # genuine exception raise return True def check_string_property(api, monitor, str_property): try: template_prop = api.LocalLB.Monitor.get_template_string_property( [monitor], [str_property['type']] )[0] return str_property == template_prop except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise def set_string_property(api, monitor, str_property): api.LocalLB.Monitor.set_template_string_property( template_names=[monitor], values=[str_property] ) def check_integer_property(api, monitor, int_property): try: return int_property == api.LocalLB.Monitor.get_template_integer_property( [monitor], [int_property['type']] )[0] except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise def set_integer_property(api, monitor, int_property): api.LocalLB.Monitor.set_template_integer_property( template_names=[monitor], values=[int_property] ) def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): changed = False for str_property in template_string_properties: if str_property['value'] is not None and not check_string_property(api, monitor, str_property): if not module.check_mode: set_string_property(api, monitor, str_property) changed = True for int_property in template_integer_properties: if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): if not module.check_mode: set_integer_property(api, monitor, int_property) changed = True return changed def get_ipport(api, monitor): return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] def set_ipport(api, monitor, ipport): try: api.LocalLB.Monitor.set_template_destination( template_names=[monitor], destinations=[ipport] ) return True, "" except bigsuds.OperationFailed as e: if "Cannot modify the address type of monitor" in str(e): return False, "Cannot modify the address type of monitor if already assigned to a pool." else: # genuine exception raise def main(): argument_spec = f5_argument_spec() meta_args = dict( name=dict(required=True), type=dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), parent=dict(default=DEFAULT_PARENT), parent_partition=dict(default='Common'), send=dict(required=False), receive=dict(required=False), ip=dict(required=False), port=dict(required=False, type='int'), interval=dict(required=False, type='int'), timeout=dict(required=False, type='int'), time_until_up=dict(required=False, type='int', default=0) ) argument_spec.update(meta_args) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) if module.params['validate_certs']: import ssl if not hasattr(ssl, 'SSLContext'): module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') server = module.params['server'] server_port = module.params['server_port'] user = module.params['user'] password = module.params['password'] state = module.params['state'] partition = module.params['partition'] validate_certs = module.params['validate_certs'] parent_partition = module.params['parent_partition'] name = module.params['name'] type = 'TTYPE_' + module.params['type'].upper() parent = fq_name(parent_partition, module.params['parent']) monitor = fq_name(partition, name) send = module.params['send'] receive = module.params['receive'] ip = module.params['ip'] port = module.params['port'] interval = module.params['interval'] timeout = module.params['timeout'] time_until_up = module.params['time_until_up'] # tcp monitor has multiple types, so overrule global TEMPLATE_TYPE TEMPLATE_TYPE = type # end monitor specific stuff api = bigip_api(server, user, password, validate_certs, port=server_port) monitor_exists = check_monitor_exists(module, api, monitor, parent) # ipport is a special setting if monitor_exists: # make sure to not update current settings if not asked cur_ipport = get_ipport(api, monitor) if ip is None: ip = cur_ipport['ipport']['address'] if port is None: port = cur_ipport['ipport']['port'] else: # use API defaults if not defined to create it if interval is None: interval = 5 if timeout is None: timeout = 16 if ip is None: ip = '0.0.0.0' if port is None: port = 0 if send is None: send = '' if receive is None: receive = '' # define and set address type if ip == '0.0.0.0' and port == 0: address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' elif ip == '0.0.0.0' and port != 0: address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' elif ip != '0.0.0.0' and port != 0: address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' else: address_type = 'ATYPE_UNSET' ipport = { 'address_type': address_type, 'ipport': { 'address': ip, 'port': port } } template_attributes = { 'parent_template': parent, 'interval': interval, 'timeout': timeout, 'dest_ipport': ipport, 'is_read_only': False, 'is_directly_usable': True } # monitor specific stuff if type == 'TTYPE_TCP': template_string_properties = [ { 'type': 'STYPE_SEND', 'value': send }, { 'type': 'STYPE_RECEIVE', 'value': receive } ] else: template_string_properties = [] template_integer_properties = [ { 'type': 'ITYPE_INTERVAL', 'value': interval }, { 'type': 'ITYPE_TIMEOUT', 'value': timeout }, { 'type': 'ITYPE_TIME_UNTIL_UP', 'value': time_until_up } ] # main logic, monitor generic try: result = {'changed': False} # default if state == 'absent': if monitor_exists: if not module.check_mode: # possible race condition if same task # on other node deleted it first result['changed'] |= delete_monitor(api, monitor) else: result['changed'] |= True else: # check for monitor itself if not monitor_exists: if not module.check_mode: # again, check changed status here b/c race conditions # if other task already created it result['changed'] |= create_monitor(api, monitor, template_attributes) else: result['changed'] |= True # check for monitor parameters # whether it already existed, or was just created, now update # the update functions need to check for check mode but # cannot update settings if it doesn't exist which happens in check mode result['changed'] |= update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties) # we just have to update the ipport if monitor already exists and it's different if monitor_exists and cur_ipport != ipport: set_ipport(api, monitor, ipport) result['changed'] |= True # else: monitor doesn't exist (check mode) or ipport is already ok except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * if __name__ == '__main__': main()
gpl-3.0
mdsafwan/Deal-My-Stuff
Lib/site-packages/django/contrib/admin/utils.py
21
17140
from __future__ import unicode_literals import datetime import decimal from collections import defaultdict from django.contrib.auth import get_permission_codename from django.core.exceptions import FieldDoesNotExist from django.core.urlresolvers import NoReverseMatch, reverse from django.db import models from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.forms.forms import pretty_name from django.utils import formats, six, timezone from django.utils.encoding import force_str, force_text, smart_text from django.utils.html import format_html from django.utils.text import capfirst from django.utils.translation import ungettext def lookup_needs_distinct(opts, lookup_path): """ Returns True if 'distinct()' should be used to query the given lookup path. """ field_name = lookup_path.split('__', 1)[0] field = opts.get_field(field_name) if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()): return True return False def prepare_lookup_value(key, value): """ Returns a lookup value prepared to be used in queryset filtering. """ # if key ends with __in, split parameter into separate values if key.endswith('__in'): value = value.split(',') # if key ends with __isnull, special case '' and the string literals 'false' and '0' if key.endswith('__isnull'): if value.lower() in ('', 'false', '0'): value = False else: value = True return value def quote(s): """ Ensure that primary key values do not confuse the admin URLs by escaping any '/', '_' and ':' and similarly problematic characters. Similar to urllib.quote, except that the quoting is slightly different so that it doesn't get automatically unquoted by the Web browser. """ if not isinstance(s, six.string_types): return s res = list(s) for i in range(len(res)): c = res[i] if c in """:/_#?;@&=+$,"[]<>%\\""": res[i] = '_%02X' % ord(c) return ''.join(res) def unquote(s): """ Undo the effects of quote(). Based heavily on urllib.unquote(). """ mychr = chr myatoi = int list = s.split('_') res = [list[0]] myappend = res.append del list[0] for item in list: if item[1:2]: try: myappend(mychr(myatoi(item[:2], 16)) + item[2:]) except ValueError: myappend('_' + item) else: myappend('_' + item) return "".join(res) def flatten(fields): """Returns a list which is a single level of flattening of the original list.""" flat = [] for field in fields: if isinstance(field, (list, tuple)): flat.extend(field) else: flat.append(field) return flat def flatten_fieldsets(fieldsets): """Returns a list of field names from an admin fieldsets structure.""" field_names = [] for name, opts in fieldsets: field_names.extend( flatten(opts['fields']) ) return field_names def get_deleted_objects(objs, opts, user, admin_site, using): """ Find all objects related to ``objs`` that should also be deleted. ``objs`` must be a homogeneous iterable of objects (e.g. a QuerySet). Returns a nested list of strings suitable for display in the template with the ``unordered_list`` filter. """ collector = NestedObjects(using=using) collector.collect(objs) perms_needed = set() def format_callback(obj): has_admin = obj.__class__ in admin_site._registry opts = obj._meta no_edit_link = '%s: %s' % (capfirst(opts.verbose_name), force_text(obj)) if has_admin: try: admin_url = reverse('%s:%s_%s_change' % (admin_site.name, opts.app_label, opts.model_name), None, (quote(obj._get_pk_val()),)) except NoReverseMatch: # Change url doesn't exist -- don't display link to edit return no_edit_link p = '%s.%s' % (opts.app_label, get_permission_codename('delete', opts)) if not user.has_perm(p): perms_needed.add(opts.verbose_name) # Display a link to the admin page. return format_html('{}: <a href="{}">{}</a>', capfirst(opts.verbose_name), admin_url, obj) else: # Don't display link to edit, because it either has no # admin or is edited inline. return no_edit_link to_delete = collector.nested(format_callback) protected = [format_callback(obj) for obj in collector.protected] return to_delete, collector.model_count, perms_needed, protected class NestedObjects(Collector): def __init__(self, *args, **kwargs): super(NestedObjects, self).__init__(*args, **kwargs) self.edges = {} # {from_instance: [to_instances]} self.protected = set() self.model_count = defaultdict(int) def add_edge(self, source, target): self.edges.setdefault(source, []).append(target) def collect(self, objs, source=None, source_attr=None, **kwargs): for obj in objs: if source_attr and not source_attr.endswith('+'): related_name = source_attr % { 'class': source._meta.model_name, 'app_label': source._meta.app_label, } self.add_edge(getattr(obj, related_name), obj) else: self.add_edge(None, obj) self.model_count[obj._meta.verbose_name_plural] += 1 try: return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs) except models.ProtectedError as e: self.protected.update(e.protected_objects) def related_objects(self, related, objs): qs = super(NestedObjects, self).related_objects(related, objs) return qs.select_related(related.field.name) def _nested(self, obj, seen, format_callback): if obj in seen: return [] seen.add(obj) children = [] for child in self.edges.get(obj, ()): children.extend(self._nested(child, seen, format_callback)) if format_callback: ret = [format_callback(obj)] else: ret = [obj] if children: ret.append(children) return ret def nested(self, format_callback=None): """ Return the graph as a nested list. """ seen = set() roots = [] for root in self.edges.get(None, ()): roots.extend(self._nested(root, seen, format_callback)) return roots def can_fast_delete(self, *args, **kwargs): """ We always want to load the objects into memory so that we can display them to the user in confirm page. """ return False def model_format_dict(obj): """ Return a `dict` with keys 'verbose_name' and 'verbose_name_plural', typically for use with string formatting. `obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance. """ if isinstance(obj, (models.Model, models.base.ModelBase)): opts = obj._meta elif isinstance(obj, models.query.QuerySet): opts = obj.model._meta else: opts = obj return { 'verbose_name': force_text(opts.verbose_name), 'verbose_name_plural': force_text(opts.verbose_name_plural) } def model_ngettext(obj, n=None): """ Return the appropriate `verbose_name` or `verbose_name_plural` value for `obj` depending on the count `n`. `obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance. If `obj` is a `QuerySet` instance, `n` is optional and the length of the `QuerySet` is used. """ if isinstance(obj, models.query.QuerySet): if n is None: n = obj.count() obj = obj.model d = model_format_dict(obj) singular, plural = d["verbose_name"], d["verbose_name_plural"] return ungettext(singular, plural, n or 0) def lookup_field(name, obj, model_admin=None): opts = obj._meta try: f = _get_non_gfk_field(opts, name) except FieldDoesNotExist: # For non-field values, the value is either a method, property or # returned via a callable. if callable(name): attr = name value = attr(obj) elif (model_admin is not None and hasattr(model_admin, name) and not name == '__str__' and not name == '__unicode__'): attr = getattr(model_admin, name) value = attr(obj) else: attr = getattr(obj, name) if callable(attr): value = attr() else: value = attr f = None else: attr = None value = getattr(obj, name) return f, attr, value def _get_non_gfk_field(opts, name): """ For historical reasons, the admin app relies on GenericForeignKeys as being "not found" by get_field(). This could likely be cleaned up. Reverse relations should also be excluded as these aren't attributes of the model (rather something like `foo_set`). """ field = opts.get_field(name) if (field.is_relation and # Generic foreign keys OR reverse relations ((field.many_to_one and not field.related_model) or field.one_to_many)): raise FieldDoesNotExist() return field def label_for_field(name, model, model_admin=None, return_attr=False): """ Returns a sensible label for a field name. The name can be a callable, property (but not created with @property decorator) or the name of an object's attribute, as well as a genuine fields. If return_attr is True, the resolved attribute (which could be a callable) is also returned. This will be None if (and only if) the name refers to a field. """ attr = None try: field = _get_non_gfk_field(model._meta, name) try: label = field.verbose_name except AttributeError: # field is likely a ForeignObjectRel label = field.related_model._meta.verbose_name except FieldDoesNotExist: if name == "__unicode__": label = force_text(model._meta.verbose_name) attr = six.text_type elif name == "__str__": label = force_str(model._meta.verbose_name) attr = bytes else: if callable(name): attr = name elif model_admin is not None and hasattr(model_admin, name): attr = getattr(model_admin, name) elif hasattr(model, name): attr = getattr(model, name) else: message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name) if model_admin: message += " or %s" % (model_admin.__class__.__name__,) raise AttributeError(message) if hasattr(attr, "short_description"): label = attr.short_description elif (isinstance(attr, property) and hasattr(attr, "fget") and hasattr(attr.fget, "short_description")): label = attr.fget.short_description elif callable(attr): if attr.__name__ == "<lambda>": label = "--" else: label = pretty_name(attr.__name__) else: label = pretty_name(name) if return_attr: return (label, attr) else: return label def help_text_for_field(name, model): help_text = "" try: field = _get_non_gfk_field(model._meta, name) except FieldDoesNotExist: pass else: if hasattr(field, 'help_text'): help_text = field.help_text return smart_text(help_text) def display_for_field(value, field): from django.contrib.admin.templatetags.admin_list import _boolean_icon from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE if field.flatchoices: return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE) # NullBooleanField needs special-case null-handling, so it comes # before the general null test. elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField): return _boolean_icon(value) elif value is None: return EMPTY_CHANGELIST_VALUE elif isinstance(field, models.DateTimeField): return formats.localize(timezone.template_localtime(value)) elif isinstance(field, (models.DateField, models.TimeField)): return formats.localize(value) elif isinstance(field, models.DecimalField): return formats.number_format(value, field.decimal_places) elif isinstance(field, models.FloatField): return formats.number_format(value) elif isinstance(field, models.FileField) and value: return format_html('<a href="{}">{}</a>', value.url, value) else: return smart_text(value) def display_for_value(value, boolean=False): from django.contrib.admin.templatetags.admin_list import _boolean_icon from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE if boolean: return _boolean_icon(value) elif value is None: return EMPTY_CHANGELIST_VALUE elif isinstance(value, datetime.datetime): return formats.localize(timezone.template_localtime(value)) elif isinstance(value, (datetime.date, datetime.time)): return formats.localize(value) elif isinstance(value, six.integer_types + (decimal.Decimal, float)): return formats.number_format(value) else: return smart_text(value) class NotRelationField(Exception): pass def get_model_from_relation(field): if hasattr(field, 'get_path_info'): return field.get_path_info()[-1].to_opts.model else: raise NotRelationField def reverse_field_path(model, path): """ Create a reversed field path. E.g. Given (Order, "user__groups"), return (Group, "user__order"). Final field must be a related model, not a data field. """ reversed_path = [] parent = model pieces = path.split(LOOKUP_SEP) for piece in pieces: field = parent._meta.get_field(piece) # skip trailing data field if extant: if len(reversed_path) == len(pieces) - 1: # final iteration try: get_model_from_relation(field) except NotRelationField: break # Field should point to another model if field.is_relation and not (field.auto_created and not field.concrete): related_name = field.related_query_name() parent = field.rel.to else: related_name = field.field.name parent = field.related_model reversed_path.insert(0, related_name) return (parent, LOOKUP_SEP.join(reversed_path)) def get_fields_from_path(model, path): """ Return list of Fields given path relative to model. e.g. (ModelX, "user__groups__name") -> [ <django.db.models.fields.related.ForeignKey object at 0x...>, <django.db.models.fields.related.ManyToManyField object at 0x...>, <django.db.models.fields.CharField object at 0x...>, ] """ pieces = path.split(LOOKUP_SEP) fields = [] for piece in pieces: if fields: parent = get_model_from_relation(fields[-1]) else: parent = model fields.append(parent._meta.get_field(piece)) return fields def remove_trailing_data_field(fields): """ Discard trailing non-relation field if extant. """ try: get_model_from_relation(fields[-1]) except NotRelationField: fields = fields[:-1] return fields def get_limit_choices_to_from_path(model, path): """ Return Q object for limiting choices if applicable. If final model in path is linked via a ForeignKey or ManyToManyField which has a ``limit_choices_to`` attribute, return it as a Q object. """ fields = get_fields_from_path(model, path) fields = remove_trailing_data_field(fields) get_limit_choices_to = ( fields and hasattr(fields[-1], 'rel') and getattr(fields[-1].rel, 'get_limit_choices_to', None)) if not get_limit_choices_to: return models.Q() # empty Q limit_choices_to = get_limit_choices_to() if isinstance(limit_choices_to, models.Q): return limit_choices_to # already a Q else: return models.Q(**limit_choices_to) # convert dict to Q
apache-2.0
atsaki/libcloud
libcloud/dns/drivers/vultr.py
3
11986
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Vultr DNS Driver """ from libcloud.utils.py3 import urlencode from libcloud.common.vultr import VultrConnection, VultrResponse from libcloud.dns.base import DNSDriver, Zone, Record from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError from libcloud.dns.types import ZoneAlreadyExistsError, RecordAlreadyExistsError from libcloud.dns.types import Provider, RecordType __all__ = [ 'ZoneRequiredException', 'VultrDNSResponse', 'VultrDNSConnection', 'VultrDNSDriver', ] class ZoneRequiredException(Exception): pass class VultrDNSResponse(VultrResponse): pass class VultrDNSConnection(VultrConnection): responseCls = VultrDNSResponse class VultrDNSDriver(DNSDriver): type = Provider.VULTR name = 'Vultr DNS' website = 'http://www.vultr.com/' connectionCls = VultrDNSConnection RECORD_TYPE_MAP = { RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.TXT: 'TXT', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', RecordType.SRV: 'SRV', } def list_zones(self): """ Return a list of records for the provided zone. :param zone: Zone to list records for. :type zone: :class:`Zone` :return: ``list`` of :class:`Record` """ action = '/v1/dns/list' params = {'api_key': self.key} response = self.connection.request(action=action, params=params) zones = self._to_zones(response.objects[0]) return zones def list_records(self, zone): """ Returns a list of records for the provided zone. :param zone: zone to list records for :type zone: `Zone` :rtype: list of :class: `Record` """ if not isinstance(zone, Zone): raise ZoneRequiredException('zone should be of type Zone') zones = self.list_zones() if not self.ex_zone_exists(zone.domain, zones): raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.domain) action = '/v1/dns/records' params = {'domain': zone.domain} response = self.connection.request(action=action, params=params) records = self._to_records(response.objects[0], zone=zone) return records def get_zone(self, zone_id): """ Returns a `Zone` instance. :param zone_id: name of the zone user wants to get. :type zone_id: ``str`` :rtype: :class:`Zone` """ ret_zone = None action = '/v1/dns/list' params = {'api_key': self.key} response = self.connection.request(action=action, params=params) zones = self._to_zones(response.objects[0]) if not self.ex_zone_exists(zone_id, zones): raise ZoneDoesNotExistError(value=None, zone_id=zone_id, driver=self) for zone in zones: if zone_id == zone.domain: ret_zone = zone return ret_zone def get_record(self, zone_id, record_id): """ Returns a Record instance. :param zone_id: name of the required zone :type zone_id: ``str`` :param record_id: ID of the required record :type record_id: ``str`` :rtype: :class: `Record` """ ret_record = None zone = self.get_zone(zone_id=zone_id) records = self.list_records(zone=zone) if not self.ex_record_exists(record_id, records): raise RecordDoesNotExistError(value='', driver=self, record_id=record_id) for record in records: if record_id == record.id: ret_record = record return ret_record def create_zone(self, domain, type='master', ttl=None, extra=None): """ Returns a `Zone` object. :param domain: Zone domain name, (e.g. example.com). :type domain: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` :param extra: (optional) Extra attributes (driver specific). (e.g. {'serverip':'127.0.0.1'}) """ extra = extra or {} if extra and extra.get('serverip'): serverip = extra['serverip'] params = {'api_key': self.key} data = urlencode({'domain': domain, 'serverip': serverip}) action = '/v1/dns/create_domain' zones = self.list_zones() if self.ex_zone_exists(domain, zones): raise ZoneAlreadyExistsError(value='', driver=self, zone_id=domain) self.connection.request(params=params, action=action, data=data, method='POST') zone = Zone(id=domain, domain=domain, type=type, ttl=ttl, driver=self, extra=extra) return zone def create_record(self, name, zone, type, data, extra=None): """ Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone: Zone where the requested record is created. :type zone: :class:`Zone` :param type: DNS record type (A, AAAA, ...). :type type: :class:`RecordType` :param data: Data for the record (depends on the record type). :type data: ``str`` :param extra: Extra attributes (driver specific). (optional) :type extra: ``dict`` :rtype: :class:`Record` """ extra = extra or {} ret_record = None old_records_list = self.list_records(zone=zone) # check if record already exists # if exists raise RecordAlreadyExistsError for record in old_records_list: if record.name == name and record.data == data: raise RecordAlreadyExistsError(value='', driver=self, record_id=record.id) MX = self.RECORD_TYPE_MAP.get('MX') SRV = self.RECORD_TYPE_MAP.get('SRV') if extra and extra.get('priority'): priority = int(extra['priority']) post_data = {'domain': zone.domain, 'name': name, 'type': self.RECORD_TYPE_MAP.get(type), 'data': data} if type == MX or type == SRV: post_data['priority'] = priority encoded_data = urlencode(post_data) params = {'api_key': self.key} action = '/v1/dns/create_record' self.connection.request(action=action, params=params, data=encoded_data, method='POST') updated_zone_records = zone.list_records() for record in updated_zone_records: if record.name == name and record.data == data: ret_record = record return ret_record def delete_zone(self, zone): """ Delete a zone. Note: This will delete all the records belonging to this zone. :param zone: Zone to delete. :type zone: :class:`Zone` :rtype: ``bool`` """ action = '/v1/dns/delete_domain' params = {'api_key': self.key} data = urlencode({'domain': zone.domain}) zones = self.list_zones() if not self.ex_zone_exists(zone.domain, zones): raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone.domain) response = self.connection.request(params=params, action=action, data=data, method='POST') return response.status == 200 def delete_record(self, record): """ Delete a record. :param record: Record to delete. :type record: :class:`Record` :rtype: ``bool`` """ action = '/v1/dns/delete_record' params = {'api_key': self.key} data = urlencode({'RECORDID': record.id, 'domain': record.zone.domain}) zone_records = self.list_records(record.zone) if not self.ex_record_exists(record.id, zone_records): raise RecordDoesNotExistError(value='', driver=self, record_id=record.id) response = self.connection.request(action=action, params=params, data=data, method='POST') return response.status == 200 def ex_zone_exists(self, zone_id, zones_list): """ Function to check if a `Zone` object exists. :param zone_id: Name of the `Zone` object. :type zone_id: ``str`` :param zones_list: A list containing `Zone` objects :type zones_list: ``list`` :rtype: Returns `True` or `False` """ zone_ids = [] for zone in zones_list: zone_ids.append(zone.domain) return zone_id in zone_ids def ex_record_exists(self, record_id, records_list): """ :param record_id: Name of the `Record` object. :type record_id: ``str`` :param records_list: A list containing `Record` objects :type records_list: ``list`` :rtype: ``bool`` """ record_ids = [] for record in records_list: record_ids.append(record.id) return record_id in record_ids def _to_zone(self, item): """ Build an object `Zone` from the item dictionary :param item: item to build the zone from :type item: `dictionary` :rtype: :instance: `Zone` """ type = 'master' extra = {'date_created': item['date_created']} zone = Zone(id=item['domain'], domain=item['domain'], driver=self, type=type, ttl=None, extra=extra) return zone def _to_zones(self, items): """ Returns a list of `Zone` objects. :param: items: a list that contains dictionary objects to be passed to the _to_zone function. :type items: ``list`` """ zones = [] for item in items: zones.append(self._to_zone(item)) return zones def _to_record(self, item, zone): extra = {} if item.get('priority'): extra['priority'] = item['priority'] type = self._string_to_record_type(item['type']) record = Record(id=item['RECORDID'], name=item['name'], type=type, data=item['data'], zone=zone, driver=self, extra=extra) return record def _to_records(self, items, zone): records = [] for item in items: records.append(self._to_record(item, zone=zone)) return records
apache-2.0
liukaijv/XlsxWriter
xlsxwriter/test/comparison/test_button10.py
8
1309
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'button10.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet2 = workbook.add_worksheet() worksheet3 = workbook.add_worksheet() worksheet1.write_comment('A1', 'Some text') worksheet2.insert_button('B2', {}) worksheet3.write_comment('C2', 'More text') worksheet1.set_comments_author('John') worksheet3.set_comments_author('John') workbook.close() self.assertExcelEqual()
bsd-2-clause
jwhui/openthread
tests/scripts/thread-cert/Cert_9_2_02_MGMTCommissionerSet.py
3
16733
#!/usr/bin/env python3 # # Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest import command import mesh_cop import thread_cert from pktverify.consts import MLE_DATA_RESPONSE, LEAD_PET_URI, LEAD_KA_URI, MGMT_COMMISSIONER_SET_URI, NM_CHANNEL_TLV, NM_COMMISSIONER_ID_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_STATE_TLV, NM_STEERING_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV, SOURCE_ADDRESS_TLV, NWD_COMMISSIONING_DATA_TLV, MESHCOP_ACCEPT, MESHCOP_REJECT, LEADER_ALOC from pktverify.packet_verifier import PacketVerifier from pktverify.bytes import Bytes COMMISSIONER = 1 LEADER = 2 # Test Purpose and Description: # ----------------------------- # The purpose of this test case is to verify Leader's and active Commissioner's behavior via # MGMT_COMMISSIONER_SET request and response # # Test Topology: # ------------- # Commissioner # | # Leader # # DUT Types: # ---------- # Leader # Commissioner class Cert_9_2_02_MGMTCommissionerSet(thread_cert.TestCase): SUPPORT_NCP = False TOPOLOGY = { COMMISSIONER: { 'name': 'COMMISSIONER', 'mode': 'rdn', 'allowlist': [LEADER] }, LEADER: { 'name': 'LEADER', 'mode': 'rdn', 'allowlist': [COMMISSIONER] }, } def test(self): self.nodes[LEADER].start() self.simulator.go(5) self.assertEqual(self.nodes[LEADER].get_state(), 'leader') self.nodes[COMMISSIONER].start() self.simulator.go(5) self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router') self.simulator.get_messages_sent_by(LEADER) self.collect_rlocs() self.collect_rloc16s() self.nodes[COMMISSIONER].commissioner_start() self.simulator.go(3) leader_messages = self.simulator.get_messages_sent_by(LEADER) msg = leader_messages.next_coap_message('2.04', assert_enabled=True) commissioner_session_id_tlv = command.get_sub_tlv(msg.coap.payload, mesh_cop.CommissionerSessionId) steering_data_tlv = mesh_cop.SteeringData(bytes([0xff])) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([steering_data_tlv]) self.simulator.go(5) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([steering_data_tlv, commissioner_session_id_tlv]) self.simulator.go(5) border_agent_locator_tlv = mesh_cop.BorderAgentLocator(0x0400) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs( [commissioner_session_id_tlv, border_agent_locator_tlv]) self.simulator.go(5) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([ steering_data_tlv, commissioner_session_id_tlv, border_agent_locator_tlv, ]) self.simulator.go(5) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs( [mesh_cop.CommissionerSessionId(0xffff), steering_data_tlv]) self.simulator.go(5) self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([ commissioner_session_id_tlv, steering_data_tlv, mesh_cop.Channel(0x0, 0x0), ]) self.simulator.go(5) leader_rloc = self.nodes[LEADER].get_rloc() commissioner_rloc = self.nodes[COMMISSIONER].get_rloc() self.assertTrue(self.nodes[COMMISSIONER].ping(leader_rloc)) self.simulator.go(1) self.assertTrue(self.nodes[LEADER].ping(commissioner_rloc)) def verify(self, pv): pkts = pv.pkts pv.summary.show() LEADER = pv.vars['LEADER'] LEADER_RLOC = pv.vars['LEADER_RLOC'] LEADER_RLOC16 = pv.vars['LEADER_RLOC16'] COMMISSIONER = pv.vars['COMMISSIONER'] COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC'] # Step 1: Ensure topology is formed correctly pv.verify_attached('COMMISSIONER', 'LEADER') # Step 2: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # (missing Commissioner Session ID TLV) # Steering Data TLV (0xFF) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STEERING_DATA_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.steering_data == Bytes('ff') ).\ must_next() # Step 3: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Reject) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_REJECT ).\ must_next() # Step 4: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # Commissioner Session ID TLV # Steering Data TLV (0xFF) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: { NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_meshcop.tlv.steering_data == Bytes('ff') ).\ must_next() # Step 5: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Accept) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_ACCEPT ).\ must_next() # Step 6: Leader sends a MLE Data Response to the network with the # following TLVs: # - Active Timestamp TLV # - Leader Data TLV # - Network Data TLV # - Source Address TLV pkts.filter_wpan_src64(LEADER).\ filter_LLANMA().\ filter_mle_cmd(MLE_DATA_RESPONSE).\ filter(lambda p: { NETWORK_DATA_TLV, SOURCE_ADDRESS_TLV, ACTIVE_TIMESTAMP_TLV, LEADER_DATA_TLV } == set(p.mle.tlv.type) and\ { NWD_COMMISSIONING_DATA_TLV } == set(p.thread_nwd.tlv.type) and\ { NM_BORDER_AGENT_LOCATOR_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_nwd.tlv.stable == [0] ).\ must_next() # Step 7: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # Commissioner Session ID TLV # Border Agent Locator TLV (0x0400) (not allowed TLV) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: { NM_COMMISSIONER_SESSION_ID_TLV, NM_BORDER_AGENT_LOCATOR_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_meshcop.tlv.ba_locator == 0x0400 ).\ must_next() # Step 8: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Reject) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_REJECT ).\ must_next() # Step 9: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # Commissioner Session ID TLV # Steering Data TLV (0xFF) # Border Agent Locator TLV (0x0400) (not allowed TLV) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: { NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_meshcop.tlv.ba_locator == 0x0400 and\ p.thread_meshcop.tlv.steering_data == Bytes('ff') ).\ must_next() # Step 10: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Reject) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_REJECT ).\ must_next() # Step 11: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # Commissioner Session ID TLV (0xFFFF) (invalid value) # Steering Data TLV (0xFF) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: { NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_meshcop.tlv.commissioner_sess_id == 0xFFFF and\ p.thread_meshcop.tlv.steering_data == Bytes('ff') ).\ must_next() # Step 12: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Reject) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_REJECT ).\ must_next() # Step 13: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req) # to Leader Anycast or Routing Locator: # CoAP Request URI # CON POST coap://<L>:MM/c/cs # CoAP Payload # Commissioner Session ID TLV # Steering Data TLV (0xFF) # Channel TLV (not allowed TLV) _mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\ filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\ filter_coap_request(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: { NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV, NM_CHANNEL_TLV } == set(p.thread_meshcop.tlv.type) and\ p.thread_meshcop.tlv.steering_data == Bytes('ff') ).\ must_next() # Step 14: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to # Commissioner: # CoAP Response Code # 2.04 Changed # CoAP Payload # State TLV (value = Accept) pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\ filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\ filter(lambda p: [NM_STATE_TLV] == p.coap.tlv.type and\ p.thread_meshcop.tlv.state == MESHCOP_ACCEPT ).\ must_next() # Step 15: Verify connectivity by sending an ICMPv6 Echo Request to the DUT mesh local address _pkt = pkts.filter_ping_request().\ filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\ must_next() pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\ filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\ must_next() _pkt = pkts.filter_ping_request().\ filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\ must_next() pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\ filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\ must_next() if __name__ == '__main__': unittest.main()
bsd-3-clause
jennyzhang0215/incubator-mxnet
example/bi-lstm-sort/lstm.py
19
8237
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint:skip-file import sys import mxnet as mx import numpy as np from collections import namedtuple import time import math LSTMState = namedtuple("LSTMState", ["c", "h"]) LSTMParam = namedtuple("LSTMParam", ["i2h_weight", "i2h_bias", "h2h_weight", "h2h_bias"]) LSTMModel = namedtuple("LSTMModel", ["rnn_exec", "symbol", "init_states", "last_states", "forward_state", "backward_state", "seq_data", "seq_labels", "seq_outputs", "param_blocks"]) def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.): """LSTM Cell symbol""" if dropout > 0.: indata = mx.sym.Dropout(data=indata, p=dropout) i2h = mx.sym.FullyConnected(data=indata, weight=param.i2h_weight, bias=param.i2h_bias, num_hidden=num_hidden * 4, name="t%d_l%d_i2h" % (seqidx, layeridx)) h2h = mx.sym.FullyConnected(data=prev_state.h, weight=param.h2h_weight, bias=param.h2h_bias, num_hidden=num_hidden * 4, name="t%d_l%d_h2h" % (seqidx, layeridx)) gates = i2h + h2h slice_gates = mx.sym.SliceChannel(gates, num_outputs=4, name="t%d_l%d_slice" % (seqidx, layeridx)) in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid") in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh") forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid") out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid") next_c = (forget_gate * prev_state.c) + (in_gate * in_transform) next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh") return LSTMState(c=next_c, h=next_h) def bi_lstm_unroll(seq_len, input_size, num_hidden, num_embed, num_label, dropout=0.): embed_weight = mx.sym.Variable("embed_weight") cls_weight = mx.sym.Variable("cls_weight") cls_bias = mx.sym.Variable("cls_bias") last_states = [] last_states.append(LSTMState(c = mx.sym.Variable("l0_init_c"), h = mx.sym.Variable("l0_init_h"))) last_states.append(LSTMState(c = mx.sym.Variable("l1_init_c"), h = mx.sym.Variable("l1_init_h"))) forward_param = LSTMParam(i2h_weight=mx.sym.Variable("l0_i2h_weight"), i2h_bias=mx.sym.Variable("l0_i2h_bias"), h2h_weight=mx.sym.Variable("l0_h2h_weight"), h2h_bias=mx.sym.Variable("l0_h2h_bias")) backward_param = LSTMParam(i2h_weight=mx.sym.Variable("l1_i2h_weight"), i2h_bias=mx.sym.Variable("l1_i2h_bias"), h2h_weight=mx.sym.Variable("l1_h2h_weight"), h2h_bias=mx.sym.Variable("l1_h2h_bias")) # embeding layer data = mx.sym.Variable('data') label = mx.sym.Variable('softmax_label') embed = mx.sym.Embedding(data=data, input_dim=input_size, weight=embed_weight, output_dim=num_embed, name='embed') wordvec = mx.sym.SliceChannel(data=embed, num_outputs=seq_len, squeeze_axis=1) forward_hidden = [] for seqidx in range(seq_len): hidden = wordvec[seqidx] next_state = lstm(num_hidden, indata=hidden, prev_state=last_states[0], param=forward_param, seqidx=seqidx, layeridx=0, dropout=dropout) hidden = next_state.h last_states[0] = next_state forward_hidden.append(hidden) backward_hidden = [] for seqidx in range(seq_len): k = seq_len - seqidx - 1 hidden = wordvec[k] next_state = lstm(num_hidden, indata=hidden, prev_state=last_states[1], param=backward_param, seqidx=k, layeridx=1,dropout=dropout) hidden = next_state.h last_states[1] = next_state backward_hidden.insert(0, hidden) hidden_all = [] for i in range(seq_len): hidden_all.append(mx.sym.Concat(*[forward_hidden[i], backward_hidden[i]], dim=1)) hidden_concat = mx.sym.Concat(*hidden_all, dim=0) pred = mx.sym.FullyConnected(data=hidden_concat, num_hidden=num_label, weight=cls_weight, bias=cls_bias, name='pred') label = mx.sym.transpose(data=label) label = mx.sym.Reshape(data=label, target_shape=(0,)) sm = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax') return sm def bi_lstm_inference_symbol(input_size, seq_len, num_hidden, num_embed, num_label, dropout=0.): seqidx = 0 embed_weight=mx.sym.Variable("embed_weight") cls_weight = mx.sym.Variable("cls_weight") cls_bias = mx.sym.Variable("cls_bias") last_states = [LSTMState(c = mx.sym.Variable("l0_init_c"), h = mx.sym.Variable("l0_init_h")), LSTMState(c = mx.sym.Variable("l1_init_c"), h = mx.sym.Variable("l1_init_h"))] forward_param = LSTMParam(i2h_weight=mx.sym.Variable("l0_i2h_weight"), i2h_bias=mx.sym.Variable("l0_i2h_bias"), h2h_weight=mx.sym.Variable("l0_h2h_weight"), h2h_bias=mx.sym.Variable("l0_h2h_bias")) backward_param = LSTMParam(i2h_weight=mx.sym.Variable("l1_i2h_weight"), i2h_bias=mx.sym.Variable("l1_i2h_bias"), h2h_weight=mx.sym.Variable("l1_h2h_weight"), h2h_bias=mx.sym.Variable("l1_h2h_bias")) data = mx.sym.Variable("data") embed = mx.sym.Embedding(data=data, input_dim=input_size, weight=embed_weight, output_dim=num_embed, name='embed') wordvec = mx.sym.SliceChannel(data=embed, num_outputs=seq_len, squeeze_axis=1) forward_hidden = [] for seqidx in range(seq_len): next_state = lstm(num_hidden, indata=wordvec[seqidx], prev_state=last_states[0], param=forward_param, seqidx=seqidx, layeridx=0, dropout=0.0) hidden = next_state.h last_states[0] = next_state forward_hidden.append(hidden) backward_hidden = [] for seqidx in range(seq_len): k = seq_len - seqidx - 1 next_state = lstm(num_hidden, indata=wordvec[k], prev_state=last_states[1], param=backward_param, seqidx=k, layeridx=1, dropout=0.0) hidden = next_state.h last_states[1] = next_state backward_hidden.insert(0, hidden) hidden_all = [] for i in range(seq_len): hidden_all.append(mx.sym.Concat(*[forward_hidden[i], backward_hidden[i]], dim=1)) hidden_concat = mx.sym.Concat(*hidden_all, dim=0) fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=num_label, weight=cls_weight, bias=cls_bias, name='pred') sm = mx.sym.SoftmaxOutput(data=fc, name='softmax') output = [sm] for state in last_states: output.append(state.c) output.append(state.h) return mx.sym.Group(output)
apache-2.0
adam111316/SickGear
lib/unidecode/x010.py
252
4110
data = ( 'k', # 0x00 'kh', # 0x01 'g', # 0x02 'gh', # 0x03 'ng', # 0x04 'c', # 0x05 'ch', # 0x06 'j', # 0x07 'jh', # 0x08 'ny', # 0x09 'nny', # 0x0a 'tt', # 0x0b 'tth', # 0x0c 'dd', # 0x0d 'ddh', # 0x0e 'nn', # 0x0f 'tt', # 0x10 'th', # 0x11 'd', # 0x12 'dh', # 0x13 'n', # 0x14 'p', # 0x15 'ph', # 0x16 'b', # 0x17 'bh', # 0x18 'm', # 0x19 'y', # 0x1a 'r', # 0x1b 'l', # 0x1c 'w', # 0x1d 's', # 0x1e 'h', # 0x1f 'll', # 0x20 'a', # 0x21 '[?]', # 0x22 'i', # 0x23 'ii', # 0x24 'u', # 0x25 'uu', # 0x26 'e', # 0x27 '[?]', # 0x28 'o', # 0x29 'au', # 0x2a '[?]', # 0x2b 'aa', # 0x2c 'i', # 0x2d 'ii', # 0x2e 'u', # 0x2f 'uu', # 0x30 'e', # 0x31 'ai', # 0x32 '[?]', # 0x33 '[?]', # 0x34 '[?]', # 0x35 'N', # 0x36 '\'', # 0x37 ':', # 0x38 '', # 0x39 '[?]', # 0x3a '[?]', # 0x3b '[?]', # 0x3c '[?]', # 0x3d '[?]', # 0x3e '[?]', # 0x3f '0', # 0x40 '1', # 0x41 '2', # 0x42 '3', # 0x43 '4', # 0x44 '5', # 0x45 '6', # 0x46 '7', # 0x47 '8', # 0x48 '9', # 0x49 ' / ', # 0x4a ' // ', # 0x4b 'n*', # 0x4c 'r*', # 0x4d 'l*', # 0x4e 'e*', # 0x4f 'sh', # 0x50 'ss', # 0x51 'R', # 0x52 'RR', # 0x53 'L', # 0x54 'LL', # 0x55 'R', # 0x56 'RR', # 0x57 'L', # 0x58 'LL', # 0x59 '[?]', # 0x5a '[?]', # 0x5b '[?]', # 0x5c '[?]', # 0x5d '[?]', # 0x5e '[?]', # 0x5f '[?]', # 0x60 '[?]', # 0x61 '[?]', # 0x62 '[?]', # 0x63 '[?]', # 0x64 '[?]', # 0x65 '[?]', # 0x66 '[?]', # 0x67 '[?]', # 0x68 '[?]', # 0x69 '[?]', # 0x6a '[?]', # 0x6b '[?]', # 0x6c '[?]', # 0x6d '[?]', # 0x6e '[?]', # 0x6f '[?]', # 0x70 '[?]', # 0x71 '[?]', # 0x72 '[?]', # 0x73 '[?]', # 0x74 '[?]', # 0x75 '[?]', # 0x76 '[?]', # 0x77 '[?]', # 0x78 '[?]', # 0x79 '[?]', # 0x7a '[?]', # 0x7b '[?]', # 0x7c '[?]', # 0x7d '[?]', # 0x7e '[?]', # 0x7f '[?]', # 0x80 '[?]', # 0x81 '[?]', # 0x82 '[?]', # 0x83 '[?]', # 0x84 '[?]', # 0x85 '[?]', # 0x86 '[?]', # 0x87 '[?]', # 0x88 '[?]', # 0x89 '[?]', # 0x8a '[?]', # 0x8b '[?]', # 0x8c '[?]', # 0x8d '[?]', # 0x8e '[?]', # 0x8f '[?]', # 0x90 '[?]', # 0x91 '[?]', # 0x92 '[?]', # 0x93 '[?]', # 0x94 '[?]', # 0x95 '[?]', # 0x96 '[?]', # 0x97 '[?]', # 0x98 '[?]', # 0x99 '[?]', # 0x9a '[?]', # 0x9b '[?]', # 0x9c '[?]', # 0x9d '[?]', # 0x9e '[?]', # 0x9f 'A', # 0xa0 'B', # 0xa1 'G', # 0xa2 'D', # 0xa3 'E', # 0xa4 'V', # 0xa5 'Z', # 0xa6 'T`', # 0xa7 'I', # 0xa8 'K', # 0xa9 'L', # 0xaa 'M', # 0xab 'N', # 0xac 'O', # 0xad 'P', # 0xae 'Zh', # 0xaf 'R', # 0xb0 'S', # 0xb1 'T', # 0xb2 'U', # 0xb3 'P`', # 0xb4 'K`', # 0xb5 'G\'', # 0xb6 'Q', # 0xb7 'Sh', # 0xb8 'Ch`', # 0xb9 'C`', # 0xba 'Z\'', # 0xbb 'C', # 0xbc 'Ch', # 0xbd 'X', # 0xbe 'J', # 0xbf 'H', # 0xc0 'E', # 0xc1 'Y', # 0xc2 'W', # 0xc3 'Xh', # 0xc4 'OE', # 0xc5 '[?]', # 0xc6 '[?]', # 0xc7 '[?]', # 0xc8 '[?]', # 0xc9 '[?]', # 0xca '[?]', # 0xcb '[?]', # 0xcc '[?]', # 0xcd '[?]', # 0xce '[?]', # 0xcf 'a', # 0xd0 'b', # 0xd1 'g', # 0xd2 'd', # 0xd3 'e', # 0xd4 'v', # 0xd5 'z', # 0xd6 't`', # 0xd7 'i', # 0xd8 'k', # 0xd9 'l', # 0xda 'm', # 0xdb 'n', # 0xdc 'o', # 0xdd 'p', # 0xde 'zh', # 0xdf 'r', # 0xe0 's', # 0xe1 't', # 0xe2 'u', # 0xe3 'p`', # 0xe4 'k`', # 0xe5 'g\'', # 0xe6 'q', # 0xe7 'sh', # 0xe8 'ch`', # 0xe9 'c`', # 0xea 'z\'', # 0xeb 'c', # 0xec 'ch', # 0xed 'x', # 0xee 'j', # 0xef 'h', # 0xf0 'e', # 0xf1 'y', # 0xf2 'w', # 0xf3 'xh', # 0xf4 'oe', # 0xf5 'f', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa ' // ', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
gpl-3.0
HomeRad/TorCleaner
wc/filter/rules/Rule.py
1
9905
# -*- coding: iso-8859-1 -*- # Copyright (C) 2000-2009 Bastian Kleineidam """ Basic rule class and routines. """ import re from wc.XmlUtils import xmlquote, xmlquoteattr, xmlunquote from . import register_rule def compileRegex(obj, attr, fullmatch=False, flags=0): """ Regex-compile object attribute into <attr>_ro. """ if hasattr(obj, attr): val = getattr(obj, attr) if val: if fullmatch: val = "^(%s)$" % val setattr(obj, attr+"_ro", re.compile(val, flags)) class LangDict(dict): """ Dictionary with a fallback algorithm, getting a default key entry if the requested key is not already mapped. Keys are usually languages like 'en' or 'de'. """ def __getitem__(self, key): """ Accessing an entry with unknown translation returns the default translated entry or if not found a random one or if the dict is empty an empty string. """ if key not in self: # default is english if 'en' in self: return self['en'] for val in self.values(): return val self[key] = "" return super(LangDict, self).__getitem__(key) class Rule(object): """ Basic rule class for filtering. After loading from XML (and having called compile_data), a rule has: titles - mapping of {lang -> translated titles} descriptions - mapping of {lang -> translated description} - sid - identification string (unique among all rules) - oid - dynamic sorting number (unique only for sorting in one level) - disable - flag to disable this rule - urlre - regular expression that matches urls applicable for this rule. leave empty to apply to all urls. - parent - the parent folder (if any); look at FolderRule class """ # Attributes that cannot be used for custom data. reserved_attrnames = [ 'sid', 'name', 'titles', 'descriptions', 'disable', 'parent', 'attrnames', 'intattrs', 'listattrs', ] def __init__(self, sid=None, titles=None, descriptions=None, disable=0, parent=None): """ Initialize basic rule data. """ self.sid = sid # class name without "Rule" suffix, in lowercase self.name = self.__class__.__name__[:-4].lower() self.titles = LangDict() if titles is not None: self.titles.update(titles) self.descriptions = LangDict() if descriptions is not None: self.descriptions.update(descriptions) self.disable = disable self.parent = parent self.attrnames = ['disable', 'sid'] self.intattrs = ['disable'] self.listattrs = [] def _reset_parsed_data(self): """ Reset parsed rule data. """ self._data = u"" self._lang = u"en" def update(self, rule, dryrun=False, log=None): """ Update title and description with given rule data. """ assert self.sid == rule.sid, "updating %s with invalid rule %s" % \ (self, rule) assert self.sid.startswith('wc'), "updating invalid id %s" % self.sid l = [a for a in self.attrnames if a not in self.reserved_attrnames] chg = self.update_attrs(l, rule, dryrun, log) chg = self.update_titledesc(rule, dryrun, log) or chg return chg def update_titledesc(self, rule, dryrun, log): """ Update rule title and description with given rule data. """ chg = False for key, value in rule.titles.iteritems(): if key not in self.titles: oldvalue = "" elif self.titles[key] != value: oldvalue = self.titles[key] else: oldvalue = None if oldvalue is not None: chg = True print >> log, " ", _("updating rule title for " \ "language %s:") % key print >> log, " ", repr(oldvalue), "==>", repr(value) if not dryrun: self.titles[key] = value for key, value in rule.descriptions.iteritems(): if key not in self.descriptions: oldvalue = "" elif self.descriptions[key] != value: oldvalue = self.descriptions[key] else: oldvalue = None if oldvalue is not None: chg = True print >> log, " ", _("updating rule description for" \ " language %s:") % key print >> log, " ", repr(oldvalue), "==>", repr(value) if not dryrun: self.descriptions[key] = value return chg def update_attrs(self, attrs, rule, dryrun, log): """ Update rule attributes (specified by attrs) with given rule data. """ chg = False for attr in attrs: oldval = getattr(self, attr) newval = getattr(rule, attr) if oldval != newval: print >> log, " ", \ _("updating rule %s:") % self.tiptext() print >> log, " ", attr, repr(oldval), "==>", repr(newval) chg = True if not dryrun: setattr(self, attr, newval) return chg def __lt__(self, other): """ True iff self < other according to oid. """ return self.oid < other.oid def __le__(self, other): """ True iff self <= other according to oid. """ return self.oid <= other.oid def __eq__(self, other): """ True iff self == other according to oid. """ return self.oid == other.oid def __ne__(self, other): """ True iff self != other according to oid. """ return self.oid != other.oid def __gt__(self, other): """ True iff self > other according to oid. """ return self.oid > other.oid def __ge__(self, other): """ True iff self >= other according to oid. """ return self.oid >= other.oid def __hash__(self): """ Hash value. """ return self.sid def fill_attrs(self, attrs, name): """ Initialize rule attributes with given attrs. """ self._reset_parsed_data() if name in ('title', 'description'): self._lang = attrs['lang'] return for attr in self.attrnames: if attr in attrs: setattr(self, attr, attrs[attr]) for attr in self.intattrs: val = getattr(self, attr) if val: setattr(self, attr, int(val)) else: setattr(self, attr, 0) for attr in self.listattrs: val = getattr(self, attr) if val: setattr(self, attr, val.split(",")) else: setattr(self, attr, []) def fill_data(self, data, name): """ Called when XML character data was parsed to fill rule values. """ self._data += data def end_data(self, name): """ Called when XML end element was reached. """ if name == self.name: self._data = u"" else: self._data = xmlunquote(self._data) if name == 'title': self.titles[self._lang] = self._data elif name == 'description': self.descriptions[self._lang] = self._data def compile_data(self): """ Register this rule. Called when all XML parsing of rule is finished. """ register_rule(self) # some internal checks for name in self.intattrs: assert name in self.attrnames for name in self.listattrs: assert name in self.attrnames for name in self.attrnames: if name not in ('sid', 'disable'): assert name not in self.reserved_attrnames def toxml(self): """ Rule data as XML for storing, must be overridden in subclass. """ s = u'<%s'% self.name if self.sid is not None: s += u'sid="%s"' % xmlquoteattr(self.sid) if self.disable: s += u' disable="%d"' % self.disable return s def title_desc_toxml(self, prefix=""): """ XML for rule title and description. """ t = [u'%s<title lang="%s">%s</title>' % \ (prefix, xmlquoteattr(key), xmlquote(value)) \ for key,value in self.titles.iteritems() if value] d = [u'%s<description lang="%s">%s</description>' % \ (prefix, xmlquoteattr(key), xmlquote(value)) \ for key,value in self.descriptions.iteritems() if value] return u"\n".join(t+d) def __str__(self): """ Basic rule data as ISO-8859-1 encoded string. """ s = "Rule %s\n" % self.name if self.sid is not None: s += "sid %s\n" % self.sid.encode("iso-8859-1") s += "disable %d\n" % self.disable s += "title %s\n" % self.titles['en'].encode("iso-8859-1") return s def tiptext(self): """ Short info for gui display. """ s = u"%s" % self.name.capitalize() if self.sid is not None: s += u'#%s' % self.sid return s def applies_to_url(self, url): """ Wether rule applies to given URL. """ return True def applies_to_mime(self, mime): """ Wether rule applies to given MIME type. """ return True
gpl-2.0
kanagasabapathi/python-for-android
python3-alpha/python3-src/Lib/tkinter/tix.py
45
77690
# -*-mode: python; fill-column: 75; tab-width: 8 -*- # # $Id$ # # Tix.py -- Tix widget wrappers. # # For Tix, see http://tix.sourceforge.net # # - Sudhir Shenoy (sshenoy@gol.com), Dec. 1995. # based on an idea of Jean-Marc Lugrin (lugrin@ms.com) # # NOTE: In order to minimize changes to Tkinter.py, some of the code here # (TixWidget.__init__) has been taken from Tkinter (Widget.__init__) # and will break if there are major changes in Tkinter. # # The Tix widgets are represented by a class hierarchy in python with proper # inheritance of base classes. # # As a result after creating a 'w = StdButtonBox', I can write # w.ok['text'] = 'Who Cares' # or w.ok['bg'] = w['bg'] # or even w.ok.invoke() # etc. # # Compare the demo tixwidgets.py to the original Tcl program and you will # appreciate the advantages. # from tkinter import * from tkinter import _flatten, _cnfmerge, _default_root # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: raise ImportError("This version of Tix.py requires Tk 4.0 or higher") import _tkinter # If this fails your Python may not be configured for Tk # Some more constants (for consistency with Tkinter) WINDOW = 'window' TEXT = 'text' STATUS = 'status' IMMEDIATE = 'immediate' IMAGE = 'image' IMAGETEXT = 'imagetext' BALLOON = 'balloon' AUTO = 'auto' ACROSSTOP = 'acrosstop' # A few useful constants for the Grid widget ASCII = 'ascii' CELL = 'cell' COLUMN = 'column' DECREASING = 'decreasing' INCREASING = 'increasing' INTEGER = 'integer' MAIN = 'main' MAX = 'max' REAL = 'real' ROW = 'row' S_REGION = 's-region' X_REGION = 'x-region' Y_REGION = 'y-region' # Some constants used by Tkinter dooneevent() TCL_DONT_WAIT = 1 << 1 TCL_WINDOW_EVENTS = 1 << 2 TCL_FILE_EVENTS = 1 << 3 TCL_TIMER_EVENTS = 1 << 4 TCL_IDLE_EVENTS = 1 << 5 TCL_ALL_EVENTS = 0 # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. import tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: """The tix commands provide access to miscellaneous elements of Tix's internal state and the Tix application context. Most of the information manipulated by these commands pertains to the application as a whole, or to a screen or display, rather than to a particular window. This is a mixin class, assumed to be mixed to Tkinter.Tk that supports the self.tk.call method. """ def tix_addbitmapdir(self, directory): """Tix maintains a list of directories under which the tix_getimage and tix_getbitmap commands will search for image files. The standard bitmap directory is $TIX_LIBRARY/bitmaps. The addbitmapdir command adds directory into this list. By using this command, the image files of an applications can also be located using the tix_getimage or tix_getbitmap command. """ return self.tk.call('tix', 'addbitmapdir', directory) def tix_cget(self, option): """Returns the current value of the configuration option given by option. Option may be any of the options described in the CONFIGURATION OPTIONS section. """ return self.tk.call('tix', 'cget', option) def tix_configure(self, cnf=None, **kw): """Query or modify the configuration options of the Tix application context. If no option is specified, returns a dictionary all of the available options. If option is specified with no value, then the command returns a list describing the one named option (this list will be identical to the corresponding sublist of the value returned if no option is specified). If one or more option-value pairs are specified, then the command modifies the given option(s) to have the given value(s); in this case the command returns an empty string. Option may be any of the configuration options. """ # Copied from Tkinter.py if kw: cnf = _cnfmerge((cnf, kw)) elif cnf: cnf = _cnfmerge(cnf) if cnf is None: cnf = {} for x in self.tk.split(self.tk.call('tix', 'configure')): cnf[x[0][1:]] = (x[0][1:],) + x[1:] return cnf if isinstance(cnf, StringType): x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf)) return (x[0][1:],) + x[1:] return self.tk.call(('tix', 'configure') + self._options(cnf)) def tix_filedialog(self, dlgclass=None): """Returns the file selection dialog that may be shared among different calls from this application. This command will create a file selection dialog widget when it is called the first time. This dialog will be returned by all subsequent calls to tix_filedialog. An optional dlgclass parameter can be passed to specified what type of file selection dialog widget is desired. Possible options are tix FileSelectDialog or tixExFileSelectDialog. """ if dlgclass is not None: return self.tk.call('tix', 'filedialog', dlgclass) else: return self.tk.call('tix', 'filedialog') def tix_getbitmap(self, name): """Locates a bitmap file of the name name.xpm or name in one of the bitmap directories (see the tix_addbitmapdir command above). By using tix_getbitmap, you can avoid hard coding the pathnames of the bitmap files in your application. When successful, it returns the complete pathname of the bitmap file, prefixed with the character '@'. The returned value can be used to configure the -bitmap option of the TK and Tix widgets. """ return self.tk.call('tix', 'getbitmap', name) def tix_getimage(self, name): """Locates an image file of the name name.xpm, name.xbm or name.ppm in one of the bitmap directories (see the addbitmapdir command above). If more than one file with the same name (but different extensions) exist, then the image type is chosen according to the depth of the X display: xbm images are chosen on monochrome displays and color images are chosen on color displays. By using tix_ getimage, you can avoid hard coding the pathnames of the image files in your application. When successful, this command returns the name of the newly created image, which can be used to configure the -image option of the Tk and Tix widgets. """ return self.tk.call('tix', 'getimage', name) def tix_option_get(self, name): """Gets the options maintained by the Tix scheme mechanism. Available options include: active_bg active_fg bg bold_font dark1_bg dark1_fg dark2_bg dark2_fg disabled_fg fg fixed_font font inactive_bg inactive_fg input1_bg input2_bg italic_font light1_bg light1_fg light2_bg light2_fg menu_font output1_bg output2_bg select_bg select_fg selector """ # could use self.tk.globalgetvar('tixOption', name) return self.tk.call('tix', 'option', 'get', name) def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None): """Resets the scheme and fontset of the Tix application to newScheme and newFontSet, respectively. This affects only those widgets created after this call. Therefore, it is best to call the resetoptions command before the creation of any widgets in a Tix application. The optional parameter newScmPrio can be given to reset the priority level of the Tk options set by the Tix schemes. Because of the way Tk handles the X option database, after Tix has been has imported and inited, it is not possible to reset the color schemes and font sets using the tix config command. Instead, the tix_resetoptions command must be used. """ if newScmPrio is not None: return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio) else: return self.tk.call('tix', 'resetoptions', newScheme, newFontSet) class Tk(tkinter.Tk, tixCommand): """Toplevel widget of Tix which represents mostly the main window of an application. It has an associated Tcl interpreter.""" def __init__(self, screenName=None, baseName=None, className='Tix'): tkinter.Tk.__init__(self, screenName, baseName, className) tixlib = os.environ.get('TIX_LIBRARY') self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]') if tixlib is not None: self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib) self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib) # Load Tix - this should work dynamically or statically # If it's static, tcl/tix8.1/pkgIndex.tcl should have # 'load {} Tix' # If it's dynamic under Unix, tcl/tix8.1/pkgIndex.tcl should have # 'load libtix8.1.8.3.so Tix' self.tk.eval('package require Tix') def destroy(self): # For safety, remove an delete_window binding before destroy self.protocol("WM_DELETE_WINDOW", "") tkinter.Tk.destroy(self) # The Tix 'tixForm' geometry manager class Form: """The Tix Form geometry manager Widgets can be arranged by specifying attachments to other widgets. See Tix documentation for complete details""" def config(self, cnf={}, **kw): self.tk.call('tixForm', self._w, *self._options(cnf, kw)) form = config def __setitem__(self, key, value): Form.form(self, {key: value}) def check(self): return self.tk.call('tixForm', 'check', self._w) def forget(self): self.tk.call('tixForm', 'forget', self._w) def grid(self, xsize=0, ysize=0): if (not xsize) and (not ysize): x = self.tk.call('tixForm', 'grid', self._w) y = self.tk.splitlist(x) z = () for x in y: z = z + (self.tk.getint(x),) return z return self.tk.call('tixForm', 'grid', self._w, xsize, ysize) def info(self, option=None): if not option: return self.tk.call('tixForm', 'info', self._w) if option[0] != '-': option = '-' + option return self.tk.call('tixForm', 'info', self._w, option) def slaves(self): return [self._nametowidget(x) for x in self.tk.splitlist( self.tk.call( 'tixForm', 'slaves', self._w))] tkinter.Widget.__bases__ = tkinter.Widget.__bases__ + (Form,) class TixWidget(tkinter.Widget): """A TixWidget class is used to package all (or most) Tix widgets. Widget initialization is extended in two ways: 1) It is possible to give a list of options which must be part of the creation command (so called Tix 'static' options). These cannot be given as a 'config' command later. 2) It is possible to give the name of an existing TK widget. These are child widgets created automatically by a Tix mega-widget. The Tk call to create these widgets is therefore bypassed in TixWidget.__init__ Both options are for use by subclasses only. """ def __init__ (self, master=None, widgetName=None, static_options=None, cnf={}, kw={}): # Merge keywords and dictionary arguments if kw: cnf = _cnfmerge((cnf, kw)) else: cnf = _cnfmerge(cnf) # Move static options into extra. static_options must be # a list of keywords (or None). extra=() # 'options' is always a static option if static_options: static_options.append('options') else: static_options = ['options'] for k,v in list(cnf.items()): if k in static_options: extra = extra + ('-' + k, v) del cnf[k] self.widgetName = widgetName Widget._setup(self, master, cnf) # If widgetName is None, this is a dummy creation call where the # corresponding Tk widget has already been created by Tix if widgetName: self.tk.call(widgetName, self._w, *extra) # Non-static options - to be done via a 'config' command if cnf: Widget.config(self, cnf) # Dictionary to hold subwidget names for easier access. We can't # use the children list because the public Tix names may not be the # same as the pathname component self.subwidget_list = {} # We set up an attribute access function so that it is possible to # do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello' # when w is a StdButtonBox. # We can even do w.ok.invoke() because w.ok is subclassed from the # Button class if you go through the proper constructors def __getattr__(self, name): if name in self.subwidget_list: return self.subwidget_list[name] raise AttributeError(name) def set_silent(self, value): """Set a variable without calling its action routine""" self.tk.call('tixSetSilent', self._w, value) def subwidget(self, name): """Return the named subwidget (which must have been created by the sub-class).""" n = self._subwidget_name(name) if not n: raise TclError("Subwidget " + name + " not child of " + self._name) # Remove header of name and leading dot n = n[len(self._w)+1:] return self._nametowidget(n) def subwidgets_all(self): """Return all subwidgets.""" names = self._subwidget_names() if not names: return [] retlist = [] for name in names: name = name[len(self._w)+1:] try: retlist.append(self._nametowidget(name)) except: # some of the widgets are unknown e.g. border in LabelFrame pass return retlist def _subwidget_name(self,name): """Get a subwidget name (returns a String, not a Widget !)""" try: return self.tk.call(self._w, 'subwidget', name) except TclError: return None def _subwidget_names(self): """Return the name of all subwidgets.""" try: x = self.tk.call(self._w, 'subwidgets', '-all') return self.tk.split(x) except TclError: return None def config_all(self, option, value): """Set configuration options for all subwidgets (and self).""" if option == '': return elif not isinstance(option, StringType): option = repr(option) if not isinstance(value, StringType): value = repr(value) names = self._subwidget_names() for name in names: self.tk.call(name, 'configure', '-' + option, value) # These are missing from Tkinter def image_create(self, imgtype, cnf={}, master=None, **kw): if not master: master = tkinter._default_root if not master: raise RuntimeError('Too early to create image') if kw and cnf: cnf = _cnfmerge((cnf, kw)) elif kw: cnf = kw options = () for k, v in cnf.items(): if hasattr(v, '__call__'): v = self._register(v) options = options + ('-'+k, v) return master.tk.call(('image', 'create', imgtype,) + options) def image_delete(self, imgname): try: self.tk.call('image', 'delete', imgname) except TclError: # May happen if the root was destroyed pass # Subwidgets are child widgets created automatically by mega-widgets. # In python, we have to create these subwidgets manually to mirror their # existence in Tk/Tix. class TixSubWidget(TixWidget): """Subwidget class. This is used to mirror child widgets automatically created by Tix/Tk as part of a mega-widget in Python (which is not informed of this)""" def __init__(self, master, name, destroy_physically=1, check_intermediate=1): if check_intermediate: path = master._subwidget_name(name) try: path = path[len(master._w)+1:] plist = path.split('.') except: plist = [] if not check_intermediate: # immediate descendant TixWidget.__init__(self, master, None, None, {'name' : name}) else: # Ensure that the intermediate widgets exist parent = master for i in range(len(plist) - 1): n = '.'.join(plist[:i+1]) try: w = master._nametowidget(n) parent = w except KeyError: # Create the intermediate widget parent = TixSubWidget(parent, plist[i], destroy_physically=0, check_intermediate=0) # The Tk widget name is in plist, not in name if plist: name = plist[-1] TixWidget.__init__(self, parent, None, None, {'name' : name}) self.destroy_physically = destroy_physically def destroy(self): # For some widgets e.g., a NoteBook, when we call destructors, # we must be careful not to destroy the frame widget since this # also destroys the parent NoteBook thus leading to an exception # in Tkinter when it finally calls Tcl to destroy the NoteBook for c in list(self.children.values()): c.destroy() if self._name in self.master.children: del self.master.children[self._name] if self._name in self.master.subwidget_list: del self.master.subwidget_list[self._name] if self.destroy_physically: # This is bypassed only for a few widgets self.tk.call('destroy', self._w) # Useful func. to split Tcl lists and return as a dict. From Tkinter.py def _lst2dict(lst): dict = {} for x in lst: dict[x[0][1:]] = (x[0][1:],) + x[1:] return dict # Useful class to create a display style - later shared by many items. # Contributed by Steffen Kremser class DisplayStyle: """DisplayStyle - handle configuration options shared by (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): master = _default_root # global from Tkinter if not master and 'refwindow' in cnf: master=cnf['refwindow'] elif not master and 'refwindow' in kw: master= kw['refwindow'] elif not master: raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) def __str__(self): return self.stylename def _options(self, cnf, kw): if kw and cnf: cnf = _cnfmerge((cnf, kw)) elif kw: cnf = kw opts = () for k, v in cnf.items(): opts = opts + ('-'+k, v) return opts def delete(self): self.tk.call(self.stylename, 'delete') def __setitem__(self,key,value): self.tk.call(self.stylename, 'configure', '-%s'%key, value) def config(self, cnf={}, **kw): return _lst2dict( self.tk.split( self.tk.call( self.stylename, 'configure', *self._options(cnf,kw)))) def __getitem__(self,key): return self.tk.call(self.stylename, 'cget', '-%s'%key) ###################################################### ### The Tix Widget classes - in alphabetical order ### ###################################################### class Balloon(TixWidget): """Balloon help widget. Subwidget Class --------- ----- label Label message Message""" # FIXME: It should inherit -superclass tixShell def __init__(self, master=None, cnf={}, **kw): # static seem to be -installcolormap -initwait -statusbar -cursor static = ['options', 'installcolormap', 'initwait', 'statusbar', 'cursor'] TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label', destroy_physically=0) self.subwidget_list['message'] = _dummyLabel(self, 'message', destroy_physically=0) def bind_widget(self, widget, cnf={}, **kw): """Bind balloon widget to another. One balloon widget may be bound to several widgets at the same time""" self.tk.call(self._w, 'bind', widget._w, *self._options(cnf, kw)) def unbind_widget(self, widget): self.tk.call(self._w, 'unbind', widget._w) class ButtonBox(TixWidget): """ButtonBox - A container for pushbuttons. Subwidgets are the buttons added with the add method. """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixButtonBox', ['orientation', 'options'], cnf, kw) def add(self, name, cnf={}, **kw): """Add a button with given name to box.""" btn = self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = _dummyButton(self, name) return btn def invoke(self, name): if name in self.subwidget_list: self.tk.call(self._w, 'invoke', name) class ComboBox(TixWidget): """ComboBox - an Entry field with a dropdown menu. The user can select a choice by either typing in the entry subwidget or selecting from the listbox subwidget. Subwidget Class --------- ----- entry Entry arrow Button slistbox ScrolledListBox tick Button cross Button : present if created with the fancy option""" # FIXME: It should inherit -superclass tixLabelWidget def __init__ (self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixComboBox', ['editable', 'dropdown', 'fancy', 'options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') self.subwidget_list['arrow'] = _dummyButton(self, 'arrow') self.subwidget_list['slistbox'] = _dummyScrolledListBox(self, 'slistbox') try: self.subwidget_list['tick'] = _dummyButton(self, 'tick') self.subwidget_list['cross'] = _dummyButton(self, 'cross') except TypeError: # unavailable when -fancy not specified pass # align def add_history(self, str): self.tk.call(self._w, 'addhistory', str) def append_history(self, str): self.tk.call(self._w, 'appendhistory', str) def insert(self, index, str): self.tk.call(self._w, 'insert', index, str) def pick(self, index): self.tk.call(self._w, 'pick', index) class Control(TixWidget): """Control - An entry field with value change arrows. The user can adjust the value by pressing the two arrow buttons or by entering the value directly into the entry. The new value will be checked against the user-defined upper and lower limits. Subwidget Class --------- ----- incr Button decr Button entry Entry label Label""" # FIXME: It should inherit -superclass tixLabelWidget def __init__ (self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw) self.subwidget_list['incr'] = _dummyButton(self, 'incr') self.subwidget_list['decr'] = _dummyButton(self, 'decr') self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') def decrement(self): self.tk.call(self._w, 'decr') def increment(self): self.tk.call(self._w, 'incr') def invoke(self): self.tk.call(self._w, 'invoke') def update(self): self.tk.call(self._w, 'update') class DirList(TixWidget): """DirList - displays a list view of a directory, its previous directories and its sub-directories. The user can choose one of the directories displayed in the list or change to another directory. Subwidget Class --------- ----- hlist HList hsb Scrollbar vsb Scrollbar""" # FIXME: It should inherit -superclass tixScrolledHList def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def chdir(self, dir): self.tk.call(self._w, 'chdir', dir) class DirTree(TixWidget): """DirTree - Directory Listing in a hierarchical view. Displays a tree view of a directory, its previous directories and its sub-directories. The user can choose one of the directories displayed in the list or change to another directory. Subwidget Class --------- ----- hlist HList hsb Scrollbar vsb Scrollbar""" # FIXME: It should inherit -superclass tixScrolledHList def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def chdir(self, dir): self.tk.call(self._w, 'chdir', dir) class DirSelectBox(TixWidget): """DirSelectBox - Motif style file select box. It is generally used for the user to choose a file. FileSelectBox stores the files mostly recently selected into a ComboBox widget so that they can be quickly selected again. Subwidget Class --------- ----- selection ComboBox filter ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw) self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx') class ExFileSelectBox(TixWidget): """ExFileSelectBox - MS Windows style file select box. It provides an convenient method for the user to select files. Subwidget Class --------- ----- cancel Button ok Button hidden Checkbutton types ComboBox dir ComboBox file ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw) self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden') self.subwidget_list['types'] = _dummyComboBox(self, 'types') self.subwidget_list['dir'] = _dummyComboBox(self, 'dir') self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['file'] = _dummyComboBox(self, 'file') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') def filter(self): self.tk.call(self._w, 'filter') def invoke(self): self.tk.call(self._w, 'invoke') # Should inherit from a Dialog class class DirSelectDialog(TixWidget): """The DirSelectDialog widget presents the directories in the file system in a dialog window. The user can use this dialog window to navigate through the file system to select the desired directory. Subwidgets Class ---------- ----- dirbox DirSelectDialog""" # FIXME: It should inherit -superclass tixDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixDirSelectDialog', ['options'], cnf, kw) self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox') # cancel and ok buttons are missing def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') # Should inherit from a Dialog class class ExFileSelectDialog(TixWidget): """ExFileSelectDialog - MS Windows style file select dialog. It provides an convenient method for the user to select files. Subwidgets Class ---------- ----- fsbox ExFileSelectBox""" # FIXME: It should inherit -superclass tixDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixExFileSelectDialog', ['options'], cnf, kw) self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox') def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') class FileSelectBox(TixWidget): """ExFileSelectBox - Motif style file select box. It is generally used for the user to choose a file. FileSelectBox stores the files mostly recently selected into a ComboBox widget so that they can be quickly selected again. Subwidget Class --------- ----- selection ComboBox filter ComboBox dirlist ScrolledListBox filelist ScrolledListBox""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw) self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') self.subwidget_list['filter'] = _dummyComboBox(self, 'filter') self.subwidget_list['selection'] = _dummyComboBox(self, 'selection') def apply_filter(self): # name of subwidget is same as command self.tk.call(self._w, 'filter') def invoke(self): self.tk.call(self._w, 'invoke') # Should inherit from a Dialog class class FileSelectDialog(TixWidget): """FileSelectDialog - Motif style file select dialog. Subwidgets Class ---------- ----- btns StdButtonBox fsbox FileSelectBox""" # FIXME: It should inherit -superclass tixStdDialogShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileSelectDialog', ['options'], cnf, kw) self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns') self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox') def popup(self): self.tk.call(self._w, 'popup') def popdown(self): self.tk.call(self._w, 'popdown') class FileEntry(TixWidget): """FileEntry - Entry field with button that invokes a FileSelectDialog. The user can type in the filename manually. Alternatively, the user can press the button widget that sits next to the entry, which will bring up a file selection dialog. Subwidgets Class ---------- ----- button Button entry Entry""" # FIXME: It should inherit -superclass tixLabelWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixFileEntry', ['dialogtype', 'options'], cnf, kw) self.subwidget_list['button'] = _dummyButton(self, 'button') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') def invoke(self): self.tk.call(self._w, 'invoke') def file_dialog(self): # FIXME: return python object pass class HList(TixWidget, XView, YView): """HList - Hierarchy display widget can be used to display any data that have a hierarchical structure, for example, file system directory trees. The list entries are indented and connected by branch lines according to their places in the hierarchy. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixHList', ['columns', 'options'], cnf, kw) def add(self, entry, cnf={}, **kw): return self.tk.call(self._w, 'add', entry, *self._options(cnf, kw)) def add_child(self, parent=None, cnf={}, **kw): if not parent: parent = '' return self.tk.call( self._w, 'addchild', parent, *self._options(cnf, kw)) def anchor_set(self, entry): self.tk.call(self._w, 'anchor', 'set', entry) def anchor_clear(self): self.tk.call(self._w, 'anchor', 'clear') def column_width(self, col=0, width=None, chars=None): if not chars: return self.tk.call(self._w, 'column', 'width', col, width) else: return self.tk.call(self._w, 'column', 'width', col, '-char', chars) def delete_all(self): self.tk.call(self._w, 'delete', 'all') def delete_entry(self, entry): self.tk.call(self._w, 'delete', 'entry', entry) def delete_offsprings(self, entry): self.tk.call(self._w, 'delete', 'offsprings', entry) def delete_siblings(self, entry): self.tk.call(self._w, 'delete', 'siblings', entry) def dragsite_set(self, index): self.tk.call(self._w, 'dragsite', 'set', index) def dragsite_clear(self): self.tk.call(self._w, 'dragsite', 'clear') def dropsite_set(self, index): self.tk.call(self._w, 'dropsite', 'set', index) def dropsite_clear(self): self.tk.call(self._w, 'dropsite', 'clear') def header_create(self, col, cnf={}, **kw): self.tk.call(self._w, 'header', 'create', col, *self._options(cnf, kw)) def header_configure(self, col, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'header', 'configure', col))) self.tk.call(self._w, 'header', 'configure', col, *self._options(cnf, kw)) def header_cget(self, col, opt): return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): return self.tk.call(self._w, 'header', 'exists', col) def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) def header_size(self, col): return self.tk.call(self._w, 'header', 'size', col) def hide_entry(self, entry): self.tk.call(self._w, 'hide', 'entry', entry) def indicator_create(self, entry, cnf={}, **kw): self.tk.call( self._w, 'indicator', 'create', entry, *self._options(cnf, kw)) def indicator_configure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'indicator', 'configure', entry))) self.tk.call( self._w, 'indicator', 'configure', entry, *self._options(cnf, kw)) def indicator_cget(self, entry, opt): return self.tk.call(self._w, 'indicator', 'cget', entry, opt) def indicator_exists(self, entry): return self.tk.call (self._w, 'indicator', 'exists', entry) def indicator_delete(self, entry): self.tk.call(self._w, 'indicator', 'delete', entry) def indicator_size(self, entry): return self.tk.call(self._w, 'indicator', 'size', entry) def info_anchor(self): return self.tk.call(self._w, 'info', 'anchor') def info_bbox(self, entry): return self._getints( self.tk.call(self._w, 'info', 'bbox', entry)) or None def info_children(self, entry=None): c = self.tk.call(self._w, 'info', 'children', entry) return self.tk.splitlist(c) def info_data(self, entry): return self.tk.call(self._w, 'info', 'data', entry) def info_dragsite(self): return self.tk.call(self._w, 'info', 'dragsite') def info_dropsite(self): return self.tk.call(self._w, 'info', 'dropsite') def info_exists(self, entry): return self.tk.call(self._w, 'info', 'exists', entry) def info_hidden(self, entry): return self.tk.call(self._w, 'info', 'hidden', entry) def info_next(self, entry): return self.tk.call(self._w, 'info', 'next', entry) def info_parent(self, entry): return self.tk.call(self._w, 'info', 'parent', entry) def info_prev(self, entry): return self.tk.call(self._w, 'info', 'prev', entry) def info_selection(self): c = self.tk.call(self._w, 'info', 'selection') return self.tk.splitlist(c) def item_cget(self, entry, col, opt): return self.tk.call(self._w, 'item', 'cget', entry, col, opt) def item_configure(self, entry, col, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'item', 'configure', entry, col))) self.tk.call(self._w, 'item', 'configure', entry, col, *self._options(cnf, kw)) def item_create(self, entry, col, cnf={}, **kw): self.tk.call( self._w, 'item', 'create', entry, col, *self._options(cnf, kw)) def item_exists(self, entry, col): return self.tk.call(self._w, 'item', 'exists', entry, col) def item_delete(self, entry, col): self.tk.call(self._w, 'item', 'delete', entry, col) def entrycget(self, entry, opt): return self.tk.call(self._w, 'entrycget', entry, opt) def entryconfigure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'entryconfigure', entry))) self.tk.call(self._w, 'entryconfigure', entry, *self._options(cnf, kw)) def nearest(self, y): return self.tk.call(self._w, 'nearest', y) def see(self, entry): self.tk.call(self._w, 'see', entry) def selection_clear(self, cnf={}, **kw): self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw)) def selection_includes(self, entry): return self.tk.call(self._w, 'selection', 'includes', entry) def selection_set(self, first, last=None): self.tk.call(self._w, 'selection', 'set', first, last) def show_entry(self, entry): return self.tk.call(self._w, 'show', 'entry', entry) class InputOnly(TixWidget): """InputOnly - Invisible widget. Unix only. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw) class LabelEntry(TixWidget): """LabelEntry - Entry field with label. Packages an entry widget and a label into one mega widget. It can beused be used to simplify the creation of ``entry-form'' type of interface. Subwidgets Class ---------- ----- label Label entry Entry""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixLabelEntry', ['labelside','options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') class LabelFrame(TixWidget): """LabelFrame - Labelled Frame container. Packages a frame widget and a label into one mega widget. To create widgets inside a LabelFrame widget, one creates the new widgets relative to the frame subwidget and manage them inside the frame subwidget. Subwidgets Class ---------- ----- label Label frame Frame""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixLabelFrame', ['labelside','options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['frame'] = _dummyFrame(self, 'frame') class ListNoteBook(TixWidget): """A ListNoteBook widget is very similar to the TixNoteBook widget: it can be used to display many windows in a limited space using a notebook metaphor. The notebook is divided into a stack of pages (windows). At one time only one of these pages can be shown. The user can navigate through these pages by choosing the name of the desired page in the hlist subwidget.""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixListNoteBook', ['options'], cnf, kw) # Is this necessary? It's not an exposed subwidget in Tix. self.subwidget_list['pane'] = _dummyPanedWindow(self, 'pane', destroy_physically=0) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'shlist') def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name) return self.subwidget_list[name] def page(self, name): return self.subwidget(name) def pages(self): # Can't call subwidgets_all directly because we don't want .nbframe names = self.tk.split(self.tk.call(self._w, 'pages')) ret = [] for x in names: ret.append(self.subwidget(x)) return ret def raise_page(self, name): # raise is a python keyword self.tk.call(self._w, 'raise', name) class Meter(TixWidget): """The Meter widget can be used to show the progress of a background job which may take a long time to execute. """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixMeter', ['options'], cnf, kw) class NoteBook(TixWidget): """NoteBook - Multi-page container widget (tabbed notebook metaphor). Subwidgets Class ---------- ----- nbframe NoteBookFrame <pages> page widgets added dynamically with the add method""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw) self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe', destroy_physically=0) def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name) return self.subwidget_list[name] def delete(self, name): self.tk.call(self._w, 'delete', name) self.subwidget_list[name].destroy() del self.subwidget_list[name] def page(self, name): return self.subwidget(name) def pages(self): # Can't call subwidgets_all directly because we don't want .nbframe names = self.tk.split(self.tk.call(self._w, 'pages')) ret = [] for x in names: ret.append(self.subwidget(x)) return ret def raise_page(self, name): # raise is a python keyword self.tk.call(self._w, 'raise', name) def raised(self): return self.tk.call(self._w, 'raised') class NoteBookFrame(TixWidget): # FIXME: This is dangerous to expose to be called on its own. pass class OptionMenu(TixWidget): """OptionMenu - creates a menu button of options. Subwidget Class --------- ----- menubutton Menubutton menu Menu""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw) self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton') self.subwidget_list['menu'] = _dummyMenu(self, 'menu') def add_command(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', 'command', name, *self._options(cnf, kw)) def add_separator(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', 'separator', name, *self._options(cnf, kw)) def delete(self, name): self.tk.call(self._w, 'delete', name) def disable(self, name): self.tk.call(self._w, 'disable', name) def enable(self, name): self.tk.call(self._w, 'enable', name) class PanedWindow(TixWidget): """PanedWindow - Multi-pane container widget allows the user to interactively manipulate the sizes of several panes. The panes can be arranged either vertically or horizontally.The user changes the sizes of the panes by dragging the resize handle between two panes. Subwidgets Class ---------- ----- <panes> g/p widgets added dynamically with the add method.""" def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw) # add delete forget panecget paneconfigure panes setsize def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = TixSubWidget(self, name, check_intermediate=0) return self.subwidget_list[name] def delete(self, name): self.tk.call(self._w, 'delete', name) self.subwidget_list[name].destroy() del self.subwidget_list[name] def forget(self, name): self.tk.call(self._w, 'forget', name) def panecget(self, entry, opt): return self.tk.call(self._w, 'panecget', entry, opt) def paneconfigure(self, entry, cnf={}, **kw): if cnf is None: return _lst2dict( self.tk.split( self.tk.call(self._w, 'paneconfigure', entry))) self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw)) def panes(self): names = self.tk.splitlist(self.tk.call(self._w, 'panes')) return [self.subwidget(x) for x in names] class PopupMenu(TixWidget): """PopupMenu widget can be used as a replacement of the tk_popup command. The advantage of the Tix PopupMenu widget is it requires less application code to manipulate. Subwidgets Class ---------- ----- menubutton Menubutton menu Menu""" # FIXME: It should inherit -superclass tixShell def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw) self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton') self.subwidget_list['menu'] = _dummyMenu(self, 'menu') def bind_widget(self, widget): self.tk.call(self._w, 'bind', widget._w) def unbind_widget(self, widget): self.tk.call(self._w, 'unbind', widget._w) def post_widget(self, widget, x, y): self.tk.call(self._w, 'post', widget._w, x, y) class ResizeHandle(TixWidget): """Internal widget to draw resize handles on Scrolled widgets.""" def __init__(self, master, cnf={}, **kw): # There seems to be a Tix bug rejecting the configure method # Let's try making the flags -static flags = ['options', 'command', 'cursorfg', 'cursorbg', 'handlesize', 'hintcolor', 'hintwidth', 'x', 'y'] # In fact, x y height width are configurable TixWidget.__init__(self, master, 'tixResizeHandle', flags, cnf, kw) def attach_widget(self, widget): self.tk.call(self._w, 'attachwidget', widget._w) def detach_widget(self, widget): self.tk.call(self._w, 'detachwidget', widget._w) def hide(self, widget): self.tk.call(self._w, 'hide', widget._w) def show(self, widget): self.tk.call(self._w, 'show', widget._w) class ScrolledHList(TixWidget): """ScrolledHList - HList with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledHList', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledListBox(TixWidget): """ScrolledListBox - Listbox with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw) self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledText(TixWidget): """ScrolledText - Text with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw) self.subwidget_list['text'] = _dummyText(self, 'text') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledTList(TixWidget): """ScrolledTList - TList with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledTList', ['options'], cnf, kw) self.subwidget_list['tlist'] = _dummyTList(self, 'tlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class ScrolledWindow(TixWidget): """ScrolledWindow - Window with automatic scrollbars.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw) self.subwidget_list['window'] = _dummyFrame(self, 'window') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class Select(TixWidget): """Select - Container of button subwidgets. It can be used to provide radio-box or check-box style of selection options for the user. Subwidgets are buttons added dynamically using the add method.""" # FIXME: It should inherit -superclass tixLabelWidget def __init__(self, master, cnf={}, **kw): TixWidget.__init__(self, master, 'tixSelect', ['allowzero', 'radio', 'orientation', 'labelside', 'options'], cnf, kw) self.subwidget_list['label'] = _dummyLabel(self, 'label') def add(self, name, cnf={}, **kw): self.tk.call(self._w, 'add', name, *self._options(cnf, kw)) self.subwidget_list[name] = _dummyButton(self, name) return self.subwidget_list[name] def invoke(self, name): self.tk.call(self._w, 'invoke', name) class Shell(TixWidget): """Toplevel window. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixShell', ['options', 'title'], cnf, kw) class DialogShell(TixWidget): """Toplevel window, with popup popdown and center methods. It tells the window manager that it is a dialog window and should be treated specially. The exact treatment depends on the treatment of the window manager. Subwidgets - None""" # FIXME: It should inherit from Shell def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixDialogShell', ['options', 'title', 'mapped', 'minheight', 'minwidth', 'parent', 'transient'], cnf, kw) def popdown(self): self.tk.call(self._w, 'popdown') def popup(self): self.tk.call(self._w, 'popup') def center(self): self.tk.call(self._w, 'center') class StdButtonBox(TixWidget): """StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """ def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixStdButtonBox', ['orientation', 'options'], cnf, kw) self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['apply'] = _dummyButton(self, 'apply') self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['help'] = _dummyButton(self, 'help') def invoke(self, name): if name in self.subwidget_list: self.tk.call(self._w, 'invoke', name) class TList(TixWidget, XView, YView): """TList - Hierarchy display widget which can be used to display data in a tabular format. The list entries of a TList widget are similar to the entries in the Tk listbox widget. The main differences are (1) the TList widget can display the list entries in a two dimensional format and (2) you can use graphical images as well as multiple colors and fonts for the list entries. Subwidgets - None""" def __init__ (self,master=None,cnf={}, **kw): TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw) def active_set(self, index): self.tk.call(self._w, 'active', 'set', index) def active_clear(self): self.tk.call(self._w, 'active', 'clear') def anchor_set(self, index): self.tk.call(self._w, 'anchor', 'set', index) def anchor_clear(self): self.tk.call(self._w, 'anchor', 'clear') def delete(self, from_, to=None): self.tk.call(self._w, 'delete', from_, to) def dragsite_set(self, index): self.tk.call(self._w, 'dragsite', 'set', index) def dragsite_clear(self): self.tk.call(self._w, 'dragsite', 'clear') def dropsite_set(self, index): self.tk.call(self._w, 'dropsite', 'set', index) def dropsite_clear(self): self.tk.call(self._w, 'dropsite', 'clear') def insert(self, index, cnf={}, **kw): self.tk.call(self._w, 'insert', index, *self._options(cnf, kw)) def info_active(self): return self.tk.call(self._w, 'info', 'active') def info_anchor(self): return self.tk.call(self._w, 'info', 'anchor') def info_down(self, index): return self.tk.call(self._w, 'info', 'down', index) def info_left(self, index): return self.tk.call(self._w, 'info', 'left', index) def info_right(self, index): return self.tk.call(self._w, 'info', 'right', index) def info_selection(self): c = self.tk.call(self._w, 'info', 'selection') return self.tk.splitlist(c) def info_size(self): return self.tk.call(self._w, 'info', 'size') def info_up(self, index): return self.tk.call(self._w, 'info', 'up', index) def nearest(self, x, y): return self.tk.call(self._w, 'nearest', x, y) def see(self, index): self.tk.call(self._w, 'see', index) def selection_clear(self, cnf={}, **kw): self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw)) def selection_includes(self, index): return self.tk.call(self._w, 'selection', 'includes', index) def selection_set(self, first, last=None): self.tk.call(self._w, 'selection', 'set', first, last) class Tree(TixWidget): """Tree - The tixTree widget can be used to display hierarchical data in a tree form. The user can adjust the view of the tree by opening or closing parts of the tree.""" # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixTree', ['options'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def autosetmode(self): '''This command calls the setmode method for all the entries in this Tree widget: if an entry has no child entries, its mode is set to none. Otherwise, if the entry has any hidden child entries, its mode is set to open; otherwise its mode is set to close.''' self.tk.call(self._w, 'autosetmode') def close(self, entrypath): '''Close the entry given by entryPath if its mode is close.''' self.tk.call(self._w, 'close', entrypath) def getmode(self, entrypath): '''Returns the current mode of the entry given by entryPath.''' return self.tk.call(self._w, 'getmode', entrypath) def open(self, entrypath): '''Open the entry given by entryPath if its mode is open.''' self.tk.call(self._w, 'open', entrypath) def setmode(self, entrypath, mode='none'): '''This command is used to indicate whether the entry given by entryPath has children entries and whether the children are visible. mode must be one of open, close or none. If mode is set to open, a (+) indicator is drawn next the the entry. If mode is set to close, a (-) indicator is drawn next the the entry. If mode is set to none, no indicators will be drawn for this entry. The default mode is none. The open mode indicates the entry has hidden children and this entry can be opened by the user. The close mode indicates that all the children of the entry are now visible and the entry can be closed by the user.''' self.tk.call(self._w, 'setmode', entrypath, mode) # Could try subclassing Tree for CheckList - would need another arg to init class CheckList(TixWidget): """The CheckList widget displays a list of items to be selected by the user. CheckList acts similarly to the Tk checkbutton or radiobutton widgets, except it is capable of handling many more items than checkbuttons or radiobuttons. """ # FIXME: It should inherit -superclass tixTree def __init__(self, master=None, cnf={}, **kw): TixWidget.__init__(self, master, 'tixCheckList', ['options', 'radio'], cnf, kw) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') def autosetmode(self): '''This command calls the setmode method for all the entries in this Tree widget: if an entry has no child entries, its mode is set to none. Otherwise, if the entry has any hidden child entries, its mode is set to open; otherwise its mode is set to close.''' self.tk.call(self._w, 'autosetmode') def close(self, entrypath): '''Close the entry given by entryPath if its mode is close.''' self.tk.call(self._w, 'close', entrypath) def getmode(self, entrypath): '''Returns the current mode of the entry given by entryPath.''' return self.tk.call(self._w, 'getmode', entrypath) def open(self, entrypath): '''Open the entry given by entryPath if its mode is open.''' self.tk.call(self._w, 'open', entrypath) def getselection(self, mode='on'): '''Returns a list of items whose status matches status. If status is not specified, the list of items in the "on" status will be returned. Mode can be on, off, default''' c = self.tk.split(self.tk.call(self._w, 'getselection', mode)) return self.tk.splitlist(c) def getstatus(self, entrypath): '''Returns the current status of entryPath.''' return self.tk.call(self._w, 'getstatus', entrypath) def setstatus(self, entrypath, mode='on'): '''Sets the status of entryPath to be status. A bitmap will be displayed next to the entry its status is on, off or default.''' self.tk.call(self._w, 'setstatus', entrypath, mode) ########################################################################### ### The subclassing below is used to instantiate the subwidgets in each ### ### mega widget. This allows us to access their methods directly. ### ########################################################################### class _dummyButton(Button, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyCheckbutton(Checkbutton, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyEntry(Entry, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyFrame(Frame, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyLabel(Label, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyListbox(Listbox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyMenu(Menu, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyMenubutton(Menubutton, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrollbar(Scrollbar, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyText(Text, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrolledListBox(ScrolledListBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyHList(HList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyScrolledHList(ScrolledHList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyTList(TList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyComboBox(ComboBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, ['fancy',destroy_physically]) self.subwidget_list['label'] = _dummyLabel(self, 'label') self.subwidget_list['entry'] = _dummyEntry(self, 'entry') self.subwidget_list['arrow'] = _dummyButton(self, 'arrow') self.subwidget_list['slistbox'] = _dummyScrolledListBox(self, 'slistbox') try: self.subwidget_list['tick'] = _dummyButton(self, 'tick') #cross Button : present if created with the fancy option self.subwidget_list['cross'] = _dummyButton(self, 'cross') except TypeError: # unavailable when -fancy not specified pass class _dummyDirList(DirList, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['hlist'] = _dummyHList(self, 'hlist') self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb') self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb') class _dummyDirSelectBox(DirSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist') self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx') class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden') self.subwidget_list['types'] = _dummyComboBox(self, 'types') self.subwidget_list['dir'] = _dummyComboBox(self, 'dir') self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['file'] = _dummyComboBox(self, 'file') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') class _dummyFileSelectBox(FileSelectBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist') self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist') self.subwidget_list['filter'] = _dummyComboBox(self, 'filter') self.subwidget_list['selection'] = _dummyComboBox(self, 'selection') class _dummyFileComboBox(ComboBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx') class _dummyStdButtonBox(StdButtonBox, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) self.subwidget_list['ok'] = _dummyButton(self, 'ok') self.subwidget_list['apply'] = _dummyButton(self, 'apply') self.subwidget_list['cancel'] = _dummyButton(self, 'cancel') self.subwidget_list['help'] = _dummyButton(self, 'help') class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget): def __init__(self, master, name, destroy_physically=0): TixSubWidget.__init__(self, master, name, destroy_physically) class _dummyPanedWindow(PanedWindow, TixSubWidget): def __init__(self, master, name, destroy_physically=1): TixSubWidget.__init__(self, master, name, destroy_physically) ######################## ### Utility Routines ### ######################## #mike Should tixDestroy be exposed as a wrapper? - but not for widgets. def OptionName(widget): '''Returns the qualified path name for the widget. Normally used to set default options for subwidgets. See tixwidgets.py''' return widget.tk.call('tixOptionName', widget._w) # Called with a dictionary argument of the form # {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'} # returns a string which can be used to configure the fsbox file types # in an ExFileSelectBox. i.e., # '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}' def FileTypeList(dict): s = '' for type in dict.keys(): s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} ' return s # Still to be done: # tixIconView class CObjView(TixWidget): """This file implements the Canvas Object View widget. This is a base class of IconView. It implements automatic placement/adjustment of the scrollbars according to the canvas objects inside the canvas subwidget. The scrollbars are adjusted so that the canvas is just large enough to see all the objects. """ # FIXME: It should inherit -superclass tixScrolledWidget pass class Grid(TixWidget, XView, YView): '''The Tix Grid command creates a new window and makes it into a tixGrid widget. Additional options, may be specified on the command line or in the option database to configure aspects such as its cursor and relief. A Grid widget displays its contents in a two dimensional grid of cells. Each cell may contain one Tix display item, which may be in text, graphics or other formats. See the DisplayStyle class for more information about Tix display items. Individual cells, or groups of cells, can be formatted with a wide range of attributes, such as its color, relief and border. Subwidgets - None''' # valid specific resources as of Tk 8.4 # editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd, # highlightbackground, highlightcolor, leftmargin, itemtype, selectmode, # selectunit, topmargin, def __init__(self, master=None, cnf={}, **kw): static= [] self.cnf= cnf TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw) # valid options as of Tk 8.4 # anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, # edit, entryconfigure, format, geometryinfo, info, index, move, nearest, # selection, set, size, unset, xview, yview def anchor_clear(self): """Removes the selection anchor.""" self.tk.call(self, 'anchor', 'clear') def anchor_get(self): "Get the (x,y) coordinate of the current anchor cell" return self._getints(self.tk.call(self, 'anchor', 'get')) def anchor_set(self, x, y): """Set the selection anchor to the cell at (x, y).""" self.tk.call(self, 'anchor', 'set', x, y) def delete_row(self, from_, to=None): """Delete rows between from_ and to inclusive. If to is not provided, delete only row at from_""" if to is None: self.tk.call(self, 'delete', 'row', from_) else: self.tk.call(self, 'delete', 'row', from_, to) def delete_column(self, from_, to=None): """Delete columns between from_ and to inclusive. If to is not provided, delete only column at from_""" if to is None: self.tk.call(self, 'delete', 'column', from_) else: self.tk.call(self, 'delete', 'column', from_, to) def edit_apply(self): """If any cell is being edited, de-highlight the cell and applies the changes.""" self.tk.call(self, 'edit', 'apply') def edit_set(self, x, y): """Highlights the cell at (x, y) for editing, if the -editnotify command returns True for this cell.""" self.tk.call(self, 'edit', 'set', x, y) def entrycget(self, x, y, option): "Get the option value for cell at (x,y)" if option and option[0] != '-': option = '-' + option return self.tk.call(self, 'entrycget', x, y, option) def entryconfigure(self, x, y, cnf=None, **kw): return self._configure(('entryconfigure', x, y), cnf, kw) # def format # def index def info_exists(self, x, y): "Return True if display item exists at (x,y)" return self._getboolean(self.tk.call(self, 'info', 'exists', x, y)) def info_bbox(self, x, y): # This seems to always return '', at least for 'text' displayitems return self.tk.call(self, 'info', 'bbox', x, y) def move_column(self, from_, to, offset): """Moves the the range of columns from position FROM through TO by the distance indicated by OFFSET. For example, move_column(2, 4, 1) moves the columns 2,3,4 to columns 3,4,5.""" self.tk.call(self, 'move', 'column', from_, to, offset) def move_row(self, from_, to, offset): """Moves the the range of rows from position FROM through TO by the distance indicated by OFFSET. For example, move_row(2, 4, 1) moves the rows 2,3,4 to rows 3,4,5.""" self.tk.call(self, 'move', 'row', from_, to, offset) def nearest(self, x, y): "Return coordinate of cell nearest pixel coordinate (x,y)" return self._getints(self.tk.call(self, 'nearest', x, y)) # def selection adjust # def selection clear # def selection includes # def selection set # def selection toggle def set(self, x, y, itemtype=None, **kw): args= self._options(self.cnf, kw) if itemtype is not None: args= ('-itemtype', itemtype) + args self.tk.call(self, 'set', x, y, *args) def size_column(self, index, **kw): """Queries or sets the size of the column given by INDEX. INDEX may be any non-negative integer that gives the position of a given column. INDEX can also be the string "default"; in this case, this command queries or sets the default size of all columns. When no option-value pair is given, this command returns a tuple containing the current size setting of the given column. When option-value pairs are given, the corresponding options of the size setting of the given column are changed. Options may be one of the follwing: pad0 pixels Specifies the paddings to the left of a column. pad1 pixels Specifies the paddings to the right of a column. size val Specifies the width of a column . Val may be: "auto" -- the width of the column is set the the widest cell in the column; a valid Tk screen distance unit; or a real number following by the word chars (e.g. 3.4chars) that sets the width of the column to the given number of characters.""" return self.tk.split(self.tk.call(self._w, 'size', 'column', index, *self._options({}, kw))) def size_row(self, index, **kw): """Queries or sets the size of the row given by INDEX. INDEX may be any non-negative integer that gives the position of a given row . INDEX can also be the string "default"; in this case, this command queries or sets the default size of all rows. When no option-value pair is given, this command returns a list con- taining the current size setting of the given row . When option-value pairs are given, the corresponding options of the size setting of the given row are changed. Options may be one of the follwing: pad0 pixels Specifies the paddings to the top of a row. pad1 pixels Specifies the paddings to the the bottom of a row. size val Specifies the height of a row. Val may be: "auto" -- the height of the row is set the the highest cell in the row; a valid Tk screen distance unit; or a real number following by the word chars (e.g. 3.4chars) that sets the height of the row to the given number of characters.""" return self.tk.split(self.tk.call( self, 'size', 'row', index, *self._options({}, kw))) def unset(self, x, y): """Clears the cell at (x, y) by removing its display item.""" self.tk.call(self._w, 'unset', x, y) class ScrolledGrid(Grid): '''Scrolled Grid widgets''' # FIXME: It should inherit -superclass tixScrolledWidget def __init__(self, master=None, cnf={}, **kw): static= [] self.cnf= cnf TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
apache-2.0
xiaoyaozi5566/GEM5_DRAMSim2
src/mem/MemObject.py
4
1786
# Copyright (c) 2006-2007 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ron Dreslinski from m5.params import * from ClockedObject import ClockedObject class MemObject(ClockedObject): type = 'MemObject' abstract = True save_trace = Param.Bool(False, "save trace for this object")
bsd-3-clause
fxfitz/ansible
lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
7
13252
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> # Chris Houseknecht, <house@redhat.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = ''' --- module: azure_rm_virtualnetwork version_added: "2.1" short_description: Manage Azure virtual networks. description: - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network. options: resource_group: description: - name of resource group. required: true address_prefixes_cidr: description: - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating a new virtual network or using purge_address_prefixes. aliases: - address_prefixes dns_servers: description: - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to default Azure servers. location: description: - Valid azure location. Defaults to location of the resource group. name: description: - name of the virtual network. required: true purge_address_prefixes: description: - Use with state present to remove any existing address_prefixes. type: bool default: 'no' aliases: - purge purge_dns_servers: description: - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually exclusive with dns_servers. type: bool default: 'no' state: description: - Assert the state of the virtual network. Use 'present' to create or update and 'absent' to delete. default: present choices: - absent - present extends_documentation_fragment: - azure - azure_tags author: - "Chris Houseknecht (@chouseknecht)" - "Matt Davis (@nitzmahone)" ''' EXAMPLES = ''' - name: Create a virtual network azure_rm_virtualnetwork: name: foobar resource_group: Testing address_prefixes_cidr: - "10.1.0.0/16" - "172.100.0.0/16" dns_servers: - "127.0.0.1" - "127.0.0.2" tags: testing: testing delete: on-exit - name: Delete a virtual network azure_rm_virtualnetwork: name: foobar resource_group: Testing state: absent ''' RETURN = ''' state: description: Current state of the virtual network. returned: always type: dict sample: { "address_prefixes": [ "10.1.0.0/16", "172.100.0.0/16" ], "dns_servers": [ "127.0.0.1", "127.0.0.3" ], "etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"', "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network", "location": "eastus", "name": "my_test_network", "provisioning_state": "Succeeded", "tags": null, "type": "Microsoft.Network/virtualNetworks" } ''' try: from msrestazure.azure_exceptions import CloudError except ImportError: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN def virtual_network_to_dict(vnet): ''' Convert a virtual network object to a dict. :param vnet: VirtualNet object :return: dict ''' results = dict( id=vnet.id, name=vnet.name, location=vnet.location, type=vnet.type, tags=vnet.tags, provisioning_state=vnet.provisioning_state, etag=vnet.etag ) if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: results['dns_servers'] = [] for server in vnet.dhcp_options.dns_servers: results['dns_servers'].append(server) if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: results['address_prefixes'] = [] for space in vnet.address_space.address_prefixes: results['address_prefixes'].append(space) return results class AzureRMVirtualNetwork(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( resource_group=dict(type='str', required=True), name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), location=dict(type='str'), address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']), dns_servers=dict(type='list',), purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']), purge_dns_servers=dict(type='bool', default=False), ) mutually_exclusive = [ ('dns_servers', 'purge_dns_servers') ] required_if = [ ('purge_address_prefixes', True, ['address_prefixes_cidr']) ] self.resource_group = None self.name = None self.state = None self.location = None self.address_prefixes_cidr = None self.purge_address_prefixes = None self.dns_servers = None self.purge_dns_servers = None self.results = dict( changed=False, state=dict() ) super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec, mutually_exclusive=mutually_exclusive, required_if=required_if, supports_check_mode=True) def exec_module(self, **kwargs): for key in list(self.module_arg_spec.keys()) + ['tags']: setattr(self, key, kwargs[key]) self.results['check_mode'] = self.check_mode resource_group = self.get_resource_group(self.resource_group) if not self.location: # Set default location self.location = resource_group.location if self.state == 'present' and self.purge_address_prefixes: for prefix in self.address_prefixes_cidr: if not CIDR_PATTERN.match(prefix): self.fail("Parameter error: invalid address prefix value {0}".format(prefix)) if self.dns_servers and len(self.dns_servers) > 2: self.fail("Parameter error: You can provide a maximum of 2 DNS servers.") changed = False results = dict() try: self.log('Fetching vnet {0}'.format(self.name)) vnet = self.network_client.virtual_networks.get(self.resource_group, self.name) results = virtual_network_to_dict(vnet) self.log('Vnet exists {0}'.format(self.name)) self.log(results, pretty_print=True) self.check_provisioning_state(vnet, self.state) if self.state == 'present': if self.address_prefixes_cidr: existing_address_prefix_set = set(vnet.address_space.address_prefixes) requested_address_prefix_set = set(self.address_prefixes_cidr) missing_prefixes = requested_address_prefix_set - existing_address_prefix_set extra_prefixes = existing_address_prefix_set - requested_address_prefix_set if len(missing_prefixes) > 0: self.log('CHANGED: there are missing address_prefixes') changed = True if not self.purge_address_prefixes: # add the missing prefixes for prefix in missing_prefixes: results['address_prefixes'].append(prefix) if len(extra_prefixes) > 0 and self.purge_address_prefixes: self.log('CHANGED: there are address_prefixes to purge') changed = True # replace existing address prefixes with requested set results['address_prefixes'] = self.address_prefixes_cidr update_tags, results['tags'] = self.update_tags(results['tags']) if update_tags: changed = True if self.dns_servers: existing_dns_set = set(vnet.dhcp_options.dns_servers) if vnet.dhcp_options else set([]) requested_dns_set = set(self.dns_servers) if existing_dns_set != requested_dns_set: self.log('CHANGED: replacing DNS servers') changed = True results['dns_servers'] = self.dns_servers if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: self.log('CHANGED: purging existing DNS servers') changed = True results['dns_servers'] = [] elif self.state == 'absent': self.log("CHANGED: vnet exists but requested state is 'absent'") changed = True except CloudError: self.log('Vnet {0} does not exist'.format(self.name)) if self.state == 'present': self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name)) changed = True self.results['changed'] = changed self.results['state'] = results if self.check_mode: return self.results if changed: if self.state == 'present': if not results: # create a new virtual network self.log("Create virtual network {0}".format(self.name)) if not self.address_prefixes_cidr: self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network') vnet = self.network_models.VirtualNetwork( location=self.location, address_space=self.network_models.AddressSpace( address_prefixes=self.address_prefixes_cidr ) ) if self.dns_servers: vnet.dhcp_options = self.network_models.DhcpOptions( dns_servers=self.dns_servers ) if self.tags: vnet.tags = self.tags self.results['state'] = self.create_or_update_vnet(vnet) else: # update existing virtual network self.log("Update virtual network {0}".format(self.name)) vnet = self.network_models.VirtualNetwork( location=results['location'], address_space=self.network_models.AddressSpace( address_prefixes=results['address_prefixes'] ), tags=results['tags'] ) if results.get('dns_servers'): vnet.dhcp_options = self.network_models.DhcpOptions( dns_servers=results['dns_servers'] ) self.results['state'] = self.create_or_update_vnet(vnet) elif self.state == 'absent': self.delete_virtual_network() self.results['state']['status'] = 'Deleted' return self.results def create_or_update_vnet(self, vnet): try: poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet) new_vnet = self.get_poller_result(poller) except Exception as exc: self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc))) return virtual_network_to_dict(new_vnet) def delete_virtual_network(self): try: poller = self.network_client.virtual_networks.delete(self.resource_group, self.name) result = self.get_poller_result(poller) except Exception as exc: self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc))) return result def main(): AzureRMVirtualNetwork() if __name__ == '__main__': main()
gpl-3.0
devs1991/test_edx_docmode
common/lib/symmath/symmath/formula.py
58
22439
#!/usr/bin/python # -*- coding: utf-8 -*- """ Flexible python representation of a symbolic mathematical formula. Acceptes Presentation MathML, Content MathML (and could also do OpenMath). Provides sympy representation. """ # # File: formula.py # Date: 04-May-12 (creation) # Author: I. Chuang <ichuang@mit.edu> # import os import string import re import logging import operator import requests import sympy from sympy.printing.latex import LatexPrinter from sympy.printing.str import StrPrinter from sympy import latex, sympify from sympy.physics.quantum.qubit import Qubit from sympy.physics.quantum.state import Ket from xml.sax.saxutils import unescape import unicodedata from lxml import etree #import subprocess from copy import deepcopy log = logging.getLogger(__name__) log.warning("Dark code. Needs review before enabling in prod.") os.environ['PYTHONIOENCODING'] = 'utf-8' #----------------------------------------------------------------------------- class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member """my dot product""" zero = sympy.Symbol('dotzero') identity = sympy.Symbol('dotidentity') def _print_dot(_self, expr): """Print statement used for LatexPrinter""" return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1]) LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access #----------------------------------------------------------------------------- # unit vectors (for 8.02) def _print_hat(_self, expr): """Print statement used for LatexPrinter""" return '\\hat{%s}' % str(expr.args[0]).lower() LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access StrPrinter._print_hat = _print_hat # pylint: disable=protected-access #----------------------------------------------------------------------------- # helper routines def to_latex(expr): """ Convert expression to latex mathjax format """ if expr is None: return '' expr_s = latex(expr) expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek # substitute back into latex form for scripts # literally something of the form # 'scriptN' becomes '\\mathcal{N}' # note: can't use something akin to the _print_hat method above because we # sometimes get 'script(N)__B' or more complicated terms expr_s = re.sub( r'script([a-zA-Z0-9]+)', '\\mathcal{\\1}', expr_s ) #return '<math>%s{}{}</math>' % (xs[1:-1]) if expr_s[0] == '$': return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6 return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7 def my_evalf(expr, chop=False): """ Enhanced sympy evalf to handle lists of expressions and catch eval failures without dropping out. """ if isinstance(expr, list): try: return [x.evalf(chop=chop) for x in expr] except Exception: # pylint: disable=broad-except return expr try: return expr.evalf(chop=chop) except Exception: # pylint: disable=broad-except return expr def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None): """ Version of sympify to import expression into sympy """ # make all lowercase real? if symtab: varset = symtab else: varset = { 'p': sympy.Symbol('p'), 'g': sympy.Symbol('g'), 'e': sympy.E, # for exp 'i': sympy.I, # lowercase i is also sqrt(-1) 'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key" 'I': sympy.Symbol('I'), # otherwise it is sqrt(-1) 'N': sympy.Symbol('N'), # or it is some kind of sympy function 'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing 'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI 'hat': sympy.Function('hat'), # for unit vectors (8.02) } if do_qubit: # turn qubit(...) into Qubit instance varset.update({ 'qubit': Qubit, 'Ket': Ket, 'dot': dot, 'bit': sympy.Function('bit'), }) if abcsym: # consider all lowercase letters as real symbols, in the parsing for letter in string.lowercase: if letter in varset: # exclude those already done continue varset.update({letter: sympy.Symbol(letter, real=True)}) sexpr = sympify(expr, locals=varset) if normphase: # remove overall phase if sexpr is a list if isinstance(sexpr, list): if sexpr[0].is_number: ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0]) sexpr = [sympy.Mul(x, ophase) for x in sexpr] def to_matrix(expr): """ Convert a list, or list of lists to a matrix. """ # if expr is a list of lists, and is rectangular, then return Matrix(expr) if not isinstance(expr, list): return expr for row in expr: if not isinstance(row, list): return expr rdim = len(expr[0]) for row in expr: if not len(row) == rdim: return expr return sympy.Matrix(expr) if matrix: sexpr = to_matrix(sexpr) return sexpr #----------------------------------------------------------------------------- # class for symbolic mathematical formulas class formula(object): """ Representation of a mathematical formula object. Accepts mathml math expression for constructing, and can produce sympy translation. The formula may or may not include an assignment (=). """ def __init__(self, expr, asciimath='', options=None): self.expr = expr.strip() self.asciimath = asciimath self.the_cmathml = None self.the_sympy = None self.options = options def is_presentation_mathml(self): """ Check if formula is in mathml presentation format. """ return '<mstyle' in self.expr def is_mathml(self): """ Check if formula is in mathml format. """ return '<math ' in self.expr def fix_greek_in_mathml(self, xml): """ Recursively fix greek letters in passed in xml. """ def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) for k in xml: tag = gettag(k) if tag == 'mi' or tag == 'ci': usym = unicode(k.text) try: udata = unicodedata.name(usym) except Exception: # pylint: disable=broad-except udata = None # print "usym = %s, udata=%s" % (usym,udata) if udata: # eg "GREEK SMALL LETTER BETA" if 'GREEK' in udata: usym = udata.split(' ')[-1] if 'SMALL' in udata: usym = usym.lower() #print "greek: ",usym k.text = usym self.fix_greek_in_mathml(k) return xml def preprocess_pmathml(self, xml): r""" Pre-process presentation MathML from ASCIIMathML to make it more acceptable for SnuggleTeX, and also to accomodate some sympy conventions (eg hat(i) for \hat{i}). This method would be a good spot to look for an integral and convert it, if possible... """ if isinstance(xml, (str, unicode)): xml = etree.fromstring(xml) # TODO: wrap in try xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) def fix_pmathml(xml): """ f and g are processed as functions by asciimathml, eg "f-2" turns into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is really terrible for turning into cmathml. undo this here. """ for k in xml: tag = gettag(k) if tag == 'mrow': if len(k) == 2: if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo': idx = xml.index(k) xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container xml.insert(idx + 1, deepcopy(k[1])) xml.remove(k) fix_pmathml(k) fix_pmathml(xml) def fix_hat(xml): """ hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle this into <mi>hat(f)</mi> hat i also somtimes turned into <mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover> """ for k in xml: tag = gettag(k) if tag == 'mover': if len(k) == 2: if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^': newk = etree.Element('mi') newk.text = 'hat(%s)' % k[0].text xml.replace(k, newk) if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and \ gettag(k[1]) == 'mo' and str(k[1].text) == '^': newk = etree.Element('mi') newk.text = 'hat(%s)' % k[0][0].text xml.replace(k, newk) fix_hat(k) fix_hat(xml) def flatten_pmathml(xml): """ Give the text version of certain PMathML elements Sometimes MathML will be given with each letter separated (it doesn't know if its implicit multiplication or what). From an xml node, find the (text only) variable name it represents. So it takes <mrow> <mi>m</mi> <mi>a</mi> <mi>x</mi> </mrow> and returns 'max', for easier use later on. """ tag = gettag(xml) if tag == 'mn': return xml.text elif tag == 'mi': return xml.text elif tag == 'mrow': return ''.join([flatten_pmathml(y) for y in xml]) raise Exception('[flatten_pmathml] unknown tag %s' % tag) def fix_mathvariant(parent): """ Fix certain kinds of math variants Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle> with 'scriptN'. There have been problems using script_N or script(N) """ for child in parent: if gettag(child) == 'mstyle' and child.get('mathvariant') == 'script': newchild = etree.Element('mi') newchild.text = 'script%s' % flatten_pmathml(child[0]) parent.replace(child, newchild) fix_mathvariant(child) fix_mathvariant(xml) # find "tagged" superscripts # they have the character \u200b in the superscript # replace them with a__b so snuggle doesn't get confused def fix_superscripts(xml): """ Look for and replace sup elements with 'X__Y' or 'X_Y__Z' In the javascript, variables with '__X' in them had an invisible character inserted into the sup (to distinguish from powers) E.g. normal: <msubsup> <mi>a</mi> <mi>b</mi> <mi>c</mi> </msubsup> to be interpreted '(a_b)^c' (nothing done by this method) And modified: <msubsup> <mi>b</mi> <mi>x</mi> <mrow> <mo>&#x200B;</mo> <mi>d</mi> </mrow> </msubsup> to be interpreted 'a_b__c' also: <msup> <mi>x</mi> <mrow> <mo>&#x200B;</mo> <mi>B</mi> </mrow> </msup> to be 'x__B' """ for k in xml: tag = gettag(k) # match things like the last example-- # the second item in msub is an mrow with the first # character equal to \u200b if ( tag == 'msup' and len(k) == 2 and gettag(k[1]) == 'mrow' and gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew ): # replace the msup with 'X__Y' k[1].remove(k[1][0]) newk = etree.Element('mi') newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1])) xml.replace(k, newk) # match things like the middle example- # the third item in msubsup is an mrow with the first # character equal to \u200b if ( tag == 'msubsup' and len(k) == 3 and gettag(k[2]) == 'mrow' and gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew ): # replace the msubsup with 'X_Y__Z' k[2].remove(k[2][0]) newk = etree.Element('mi') newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2])) xml.replace(k, newk) fix_superscripts(k) fix_superscripts(xml) def fix_msubsup(parent): """ Snuggle returns an error when it sees an <msubsup> replace such elements with an <msup>, except the first element is of the form a_b. I.e. map a_b^c => (a_b)^c """ for child in parent: # fix msubsup if gettag(child) == 'msubsup' and len(child) == 3: newchild = etree.Element('msup') newbase = etree.Element('mi') newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1])) newexp = child[2] newchild.append(newbase) newchild.append(newexp) parent.replace(child, newchild) fix_msubsup(child) fix_msubsup(xml) self.xml = xml # pylint: disable=attribute-defined-outside-init return self.xml def get_content_mathml(self): if self.the_cmathml: return self.the_cmathml # pre-process the presentation mathml before sending it to snuggletex to convert to content mathml try: xml = self.preprocess_pmathml(self.expr) except Exception as err: # pylint: disable=broad-except log.warning('Err %s while preprocessing; expr=%s', err, self.expr) return "<html>Error! Cannot process pmathml</html>" pmathml = etree.tostring(xml, pretty_print=True) self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init # convert to cmathml self.the_cmathml = self.GetContentMathML(self.asciimath, pmathml) return self.the_cmathml cmathml = property(get_content_mathml, None, None, 'content MathML representation') def make_sympy(self, xml=None): """ Return sympy expression for the math formula. The math formula is converted to Content MathML then that is parsed. This is a recursive function, called on every CMML node. Support for more functions can be added by modifying opdict, abould halfway down """ if self.the_sympy: return self.the_sympy if xml is None: # root if not self.is_mathml(): return my_sympify(self.expr) if self.is_presentation_mathml(): cmml = None try: cmml = self.cmathml xml = etree.fromstring(str(cmml)) except Exception, err: if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: msg = "Illegal math expression" else: msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml) raise Exception(msg) xml = self.fix_greek_in_mathml(xml) self.the_sympy = self.make_sympy(xml[0]) else: xml = etree.fromstring(self.expr) xml = self.fix_greek_in_mathml(xml) self.the_sympy = self.make_sympy(xml[0]) return self.the_sympy def gettag(expr): return re.sub('{http://[^}]+}', '', expr.tag) def op_plus(*args): return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1] def op_times(*args): return reduce(operator.mul, args) def op_minus(*args): if len(args) == 1: return -args[0] if not len(args) == 2: raise Exception('minus given wrong number of arguments!') #return sympy.Add(args[0],-args[1]) return args[0] - args[1] opdict = { 'plus': op_plus, 'divide': operator.div, 'times': op_times, 'minus': op_minus, 'root': sympy.sqrt, 'power': sympy.Pow, 'sin': sympy.sin, 'cos': sympy.cos, 'tan': sympy.tan, 'cot': sympy.cot, 'sinh': sympy.sinh, 'cosh': sympy.cosh, 'coth': sympy.coth, 'tanh': sympy.tanh, 'asin': sympy.asin, 'acos': sympy.acos, 'atan': sympy.atan, 'atan2': sympy.atan2, 'acot': sympy.acot, 'asinh': sympy.asinh, 'acosh': sympy.acosh, 'atanh': sympy.atanh, 'acoth': sympy.acoth, 'exp': sympy.exp, 'log': sympy.log, 'ln': sympy.ln, } def parse_presentation_symbol(xml): """ Parse <msub>, <msup>, <mi>, and <mn> """ tag = gettag(xml) if tag == 'mn': return xml.text elif tag == 'mi': return xml.text elif tag == 'msub': return '_'.join([parse_presentation_symbol(y) for y in xml]) elif tag == 'msup': return '^'.join([parse_presentation_symbol(y) for y in xml]) raise Exception('[parse_presentation_symbol] unknown tag %s' % tag) # parser tree for Content MathML tag = gettag(xml) # first do compound objects if tag == 'apply': # apply operator opstr = gettag(xml[0]) if opstr in opdict: op = opdict[opstr] # pylint: disable=invalid-name args = [self.make_sympy(expr) for expr in xml[1:]] try: res = op(*args) except Exception, err: self.args = args # pylint: disable=attribute-defined-outside-init self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) return res else: raise Exception('[formula]: unknown operator tag %s' % (opstr)) elif tag == 'list': # square bracket list if gettag(xml[0]) == 'matrix': return self.make_sympy(xml[0]) else: return [self.make_sympy(expr) for expr in xml] elif tag == 'matrix': return sympy.Matrix([self.make_sympy(expr) for expr in xml]) elif tag == 'vector': return [self.make_sympy(expr) for expr in xml] # atoms are below elif tag == 'cn': # number return sympy.sympify(xml.text) elif tag == 'ci': # variable (symbol) if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript usym = parse_presentation_symbol(xml[0]) sym = sympy.Symbol(str(usym)) else: usym = unicode(xml.text) if 'hat' in usym: sym = my_sympify(usym) else: if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1) sym = sympy.I else: sym = sympy.Symbol(str(usym)) return sym else: # unknown tag raise Exception('[formula] unknown tag %s' % tag) sympy = property(make_sympy, None, None, 'sympy representation') def GetContentMathML(self, asciimath, mathml): # pylint: disable=invalid-name """ Handle requests to snuggletex API to convert the Ascii math to MathML """ url = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo' payload = { 'asciiMathInput': asciimath, 'asciiMathML': mathml, } headers = { 'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13" } request = requests.post(url, data=payload, headers=headers, verify=False) request.encoding = 'utf-8' ret = request.text mode = 0 cmathml = [] for k in ret.split('\n'): if 'conversion to Content MathML' in k: mode = 1 continue if mode == 1: if '<h3>Maxima Input Form</h3>' in k: mode = 0 continue cmathml.append(k) cmathml = '\n'.join(cmathml[2:]) cmathml = '<math xmlns="http://www.w3.org/1998/Math/MathML">\n' + unescape(cmathml) + '\n</math>' return cmathml
agpl-3.0
picca/hkl
tests/bindings/polarisation.py
1
13010
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import math import unittest from collections import namedtuple from gi.repository import GLib from gi.repository import Hkl from numpy import (array, cross, dot, empty, hstack, reshape, vstack) from numpy.linalg import inv, norm ######### # Types # ######### Detector = namedtuple('Detector', ['type']) Diffractometer = namedtuple('Diffractometer', ['dtype', 'sample', 'detector', 'source', 'engine']) Engine = namedtuple('Engine', ['name', 'mode']) Lattice = namedtuple('Lattice', ['a', 'b', 'c', 'alpha', 'beta', 'gamma']) Reflection = namedtuple('Reflection', ['hkl', 'values']) Sample = namedtuple('Sample', ['lattice', 'or0', 'or1', 'ux', 'uy', 'uz']) Source = namedtuple('Source', ['wavelength', 'energy']) HklDiffractometer = namedtuple('HklDiffractometer', ['sample', 'detector', 'geometry', 'engines']) ################## # Helper methods # ################## def hkl_matrix_to_numpy(m): M = empty((3, 3)) for i in range(3): for j in range(3): M[i, j] = m.get(i, j) return M def from_numpy_to_hkl_vector(v): V = Hkl.Vector() V.init(v[0], v[1], v[2]) return V ######################### # Hkl Type constructors # ######################### def new_hkl_sample(config): # sample sample = Hkl.Sample.new("test") lattice = Hkl.Lattice.new(config.lattice.a, config.lattice.b, config.lattice.c, math.radians(config.lattice.alpha), math.radians(config.lattice.beta), math.radians(config.lattice.gamma)) sample.lattice_set(lattice) parameter = sample.ux_get() parameter.value_set(config.ux, Hkl.UnitEnum.USER) sample.ux_set(parameter) parameter = sample.uy_get() parameter.value_set(config.uy, Hkl.UnitEnum.USER) sample.uy_set(parameter) parameter = sample.uz_get() parameter.value_set(config.uz, Hkl.UnitEnum.USER) sample.uz_set(parameter) return sample def new_hkl_detector(config): return Hkl.Detector.factory_new(config.type) def new_hkl_geometry(dtype, wavelength, init_values): factory = Hkl.factories()[dtype] geometry = factory.create_new_geometry() geometry.axis_values_set(init_values, Hkl.UnitEnum.USER) geometry.wavelength_set(wavelength, Hkl.UnitEnum.USER) return geometry def new_hkl_engines(dtype): factory = Hkl.factories()[dtype] engines = factory.create_new_engine_list() return engines def new_hkl_diffractometer(config): sample = new_hkl_sample(config.sample) detector = new_hkl_detector(config.detector) # add reflection or0 geometry = new_hkl_geometry(config.dtype, config.source.wavelength, config.sample.or0.values) or0 = sample.add_reflection(geometry, detector, config.sample.or0.hkl[0], config.sample.or0.hkl[1], config.sample.or0.hkl[2]) # add reflection or1 geometry.axis_values_set(config.sample.or1.values, Hkl.UnitEnum.USER) or1 = sample.add_reflection(geometry, detector, config.sample.or1.hkl[0], config.sample.or1.hkl[1], config.sample.or1.hkl[2]) # compute UB with or0 and or1 # sample.compute_UB_busing_levy(or0, or1) # UB = hkl_matrix_to_numpy(sample.UB_get()) # compute angles for reciprocal lattice vector h, k, l engines = new_hkl_engines(config.dtype) # set the engine mode engine = engines.engine_get_by_name(config.engine.name) engine.current_mode_set(config.engine.mode) return HklDiffractometer(sample, geometry, detector, engines) ################# # Config parser # ################# def find(filename): if os.path.exists(filename): return filename else: datadir = os.getenv('DATADIR') if datadir: filename = os.path.join(datadir, filename) if os.path.exists(filename): return filename else: raise Exception("Can not find: " + filename) else: raise Exception("Cannot find: " + filename) def parse_reflection(line): ref = line.split() hkl = [float(x) for x in ref[2:5]] angles = [float(x) for x in ref[7:13]] return Reflection(hkl, angles) def parse(filename, dtype): with open(filename, 'r') as f: for line in f: if str(line).find('Wavelength') != -1: wavelength = float(line.split()[1]) energy = 12.39842 / wavelength if str(line).find('A') != -1 and str(line).find('B') != -1 and str(line).find('C') != -1: abc = line.split() a = float(abc[1]) b = float(abc[3]) c = float(abc[5]) if str(line).find('Alpha') != -1 and str(line).find('Beta') != -1 and str(line).find('Gamma') != -1: abg = line.split() alpha = float(abg[1]) beta = float(abg[3]) gamma = float(abg[5]) if str(line).find('R0') != -1: or0 = parse_reflection(line) if str(line).find('R1') != -1: or1 = parse_reflection(line) if str(line).find('Ux') != -1 and str(line).find('Uy') != -1 and str(line).find('Uz') != -1: uxuyuz = line.split() ux = float(uxuyuz[1]) uy = float(uxuyuz[3]) uz = float(uxuyuz[5]) if str(line).find('Engine') != -1: engine_name = line.split()[1] if str(line).find('Mode') != -1: mode_name = line.split()[1] lattice = Lattice(a, b, c, alpha, beta, gamma) sample = Sample(lattice, or0, or1, ux, uy, uz) detector = Detector(0) source = Source(wavelength, energy) engine = Engine(engine_name, mode_name) return Diffractometer(dtype, sample, detector, source, engine) ###################### # hight level method # ###################### def ca(config, hkl, engine_name='hkl'): sample, geometry, detector, engines = new_hkl_diffractometer(config) engines.init(geometry, detector, sample) engine = engines.engine_get_by_name(engine_name) solutions = engine.pseudo_axis_values_set(hkl, Hkl.UnitEnum.USER) first_solution = solutions.items()[0] values = first_solution.geometry_get().axis_values_get(Hkl.UnitEnum.USER) return Reflection(hkl, values) def get_UB(config): sample, geometry, detector, engines = new_hkl_diffractometer(config) return hkl_matrix_to_numpy(sample.UB_get()) def get_R_and_P(config, values): sample, geometry, detector, engines = new_hkl_diffractometer(config) geometry.axis_values_set(values, Hkl.UnitEnum.USER) R = hkl_matrix_to_numpy(geometry.sample_rotation_get(sample).to_matrix()) P = hkl_matrix_to_numpy(geometry.detector_rotation_get(detector).to_matrix()) return R, P ############## # Unit tests # ############## class Polarisation(unittest.TestCase): def test_petraIII(self): dtype = "E6C" # RUBh = kf - ki = (P ki - ki) = (P - I) ki config = parse(find('crystal.ini'), dtype) print config gaga = ca(config, [0.5, 14.5, 0.43]) print gaga UB = get_UB(config) print "UB: " print UB # the hkl vectors a*, b*, c* expressed in the laboratory basis for the current Q # transformation matrix T reciprocal space --> laboratory # values_w = [0, 34.16414, 79.52420, 0, 0, 38.29633] # values_w = [0, 35.06068, 80.78517, 0, 0, 43.91934] # values_w = [0, 36.45961, 81.75533, 0, 0, 49.7139] # values_w = [0, 38.24551, 82.52447, 0, 0, 55.68957] # values_w = [0, 40.34955, 83.14900, 0, 0, 61.86603] # values_w = [0, 42.73321, 83.66607, 0, 0, 68.27333] # values_w = [0, 45.37981, 84.10117, 0, 0, 74.95312] # values_w = [0, 48.29079, 84.47230, 0, 0, 81.96226] values_w = [0, 51.48568, 84.79259, 0, 0, 89.37964] # mu, omega, chi, phi, gamma, delta R, P = get_R_and_P(config, values_w) print "R: " print R print "P: " print P RUB = dot(R, UB) print "RUB: " print RUB astar = dot(RUB, [1, 0, 0]) bstar = dot(RUB, [0, 1, 0]) cstar = dot(RUB, [0, 0, 1]) # transformation matrix: reciprocal space --> laboratory T = hstack((reshape(astar, (3, 1)), reshape(bstar, (3, 1)), reshape(cstar, (3, 1)))) Tbis = vstack((reshape(astar, (1, 3)), reshape(bstar, (1, 3)), reshape(cstar, (1, 3)))) # transformation matrix: laboratory --> reciprocal space Tinv = inv(T) print '' # print 'cstar in laboratory frame :',cstar # print 'cstar in laboratory frame from T:',dot(T, hkl) # print 'cstar in rec. space from Tinv :',dot(Tinv, dot(T, hkl)) # compute kf ki = array([1, 0, 0]) * math.pi * 2 / config.source.wavelength kf = dot(P, ki) # compute Q Q = kf - ki print '' print 'Energy (keV):', config.source print 'Lattice parameters:', config.sample.lattice print '1st orienting reflection:', config.sample.or0.hkl, 'with angles: ', config.sample.or0.values print '2nd orienting reflection:', config.sample.or1.hkl, 'with angles: ', config.sample.or1.values print '' print 'UB matrix:' print UB print '' print 'Transformation matrix T(reciprocal space)--> laboratory frame:' print T, print '' print '' print 'Transformation matrix T(laboratory frame)--> reciprocal space :' print Tinv # compute Q # hkl = [0.5, 6.5, 0.43] # hkl = [0.5, 7.5, 0.43] # hkl = [0.5, 8.5, 0.43] # hkl = [0.5, 9.5, 0.43] # hkl = [0.5, 10.5, 0.43] # hkl = [0.5, 11.5, 0.43] # hkl = [0.5, 12.5, 0.43] # hkl = [0.5, 13.5, 0.43] hkl = [0.5, 14.5, 0.43] ''' print '' print 'Q in lab. frame from code :', dot(dot(R, UB), hkl), ', normalized:',(dot(dot(R, UB), hkl))/norm(dot(dot(R, UB), hkl)), ', norm:', norm(dot(dot(R, UB), hkl)) print 'Q in lab. frame from T :', dot(T, hkl), ', normalized:', (dot(T, hkl))/norm(dot(T, hkl)), ', norm:', norm(dot(T, hkl)) print 'Q in lab. frame from ki, kf :', Q, ', normalized:', Q/norm(Q),', norm:', norm(Q) print '' print 'Q in rec. space from Tinv of T :', dot(Tinv, dot(T, hkl)) print '' print 'difference factor:',(norm(dot(T, hkl)))/(norm(Q)) print '' print 'kf',kf,', norm:',norm(kf) print 'ki',ki,', norm:',norm(ki) ''' print '' print 'Q in rec. space from Tinv of T :', dot(Tinv, dot(T, hkl)) print '' # # compute u1, u2, u3 in reciprocal space coordinates # u1,u2,u3 in laboratory frame u1xyz = ki+kf u2xyz = cross(ki, kf) u3xyz = ki-kf # print '(u1,u2,u3) in laboratory frame:',u1xyz,u2xyz,u3xyz # u1,u2,u3 in reciprocal space u1 = dot(Tinv, u1xyz) / norm(dot(Tinv, u1xyz)) u2 = dot(Tinv, u2xyz) / norm(dot(Tinv, u2xyz)) u3 = dot(Tinv, u3xyz) / norm(dot(Tinv, u3xyz)) u1 = dot(Tinv, u1xyz) u2 = dot(Tinv, u2xyz) u3 = dot(Tinv, u3xyz) print '(u1,u2,u3) in reciprocal space from Tinv, unnormalized:',\ u1, u2, u3 print '(u1,u2,u3) in reciprocal space from Tinv, normalized to 1:',\ dot(Tinv, u1xyz) / norm(dot(Tinv, u1xyz)), \ dot(Tinv, u2xyz) / norm(dot(Tinv, u2xyz)), \ dot(Tinv, u3xyz) / norm(dot(Tinv, u3xyz)) # print '(u1,u2,u3) in reciprocal space from Tinv:', \ # dot(Tinv, u1xyz), dot(Tinv, u2xyz), dot(Tinv, u3xyz) print '' # TRANSFORMATION MATRIX reciprocal lattice basis to u1 u2 u3 basis ABC = hstack((reshape([1, 0, 0], (3, 1)), reshape([0, 1, 0], (3, 1)), reshape([0, 0, 1], (3, 1)))) U = hstack((reshape(u1, (3, 1)), reshape(u2, (3, 1)), reshape(u3, (3, 1)))) M = dot(ABC, inv(U)) print 'Transformation matrix reciprocal lattice basis to u1 u2 u3 basis:' print M # keep in order to pass the test self.assertTrue(True) if __name__ == '__main__': unittest.main(verbosity=2)
gpl-3.0
jlegendary/nupic
examples/tp/tp_overlapping_sequences.py
8
31874
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import numpy import sys from optparse import OptionParser import pprint import random import unittest2 as unittest from nupic.research.TP import TP from nupic.research.TP10X2 import TP10X2 from nupic.research import fdrutilities as fdrutils from nupic.support.unittesthelpers import testcasebase """ Overlapping sequences test =========================== Test learning of sequences with shared (overlapping) subsequences. Test 1 - Test with fast learning, make sure PAM allows us to train with fewer repeats of the training data. Test 2 - Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data. Test 3 - Test with slow learning, some overlap in the patterns, and TP thresholds of 80% of newSynapseCount Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2 and 10 elements long. """ VERBOSITY = 0 # how chatty the unit tests should be SEED = 35 # the random seed used throughout # Whether to only run the short tests. SHORT = True # If set to 0 the CPP TP will not be tested INCLUDE_CPP_TP = 1 # Also test with CPP TP def printOneTrainingVector(x): "Print a single vector succinctly." print ''.join('1' if k != 0 else '.' for k in x) def printAllTrainingSequences(trainingSequences, upTo = 99999): for i,trainingSequence in enumerate(trainingSequences): print "============= Sequence",i,"=================" for j,pattern in enumerate(trainingSequence): printOneTrainingVector(pattern) def getSimplePatterns(numOnes, numPatterns, patternOverlap=0): """Very simple patterns. Each pattern has numOnes consecutive bits on. The amount of overlap between consecutive patterns is configurable, via the patternOverlap parameter. Parameters: ----------------------------------------------------------------------- numOnes: Number of bits ON in each pattern numPatterns: Number of unique patterns to generate patternOverlap: Number of bits of overlap between each successive pattern retval: patterns """ assert (patternOverlap < numOnes) # How many new bits are introduced in each successive pattern? numNewBitsInEachPattern = numOnes - patternOverlap numCols = numNewBitsInEachPattern * numPatterns + patternOverlap p = [] for i in xrange(numPatterns): x = numpy.zeros(numCols, dtype='float32') startBit = i*numNewBitsInEachPattern nextStartBit = startBit + numOnes x[startBit:nextStartBit] = 1 p.append(x) return p def buildOverlappedSequences( numSequences = 2, seqLen = 5, sharedElements = [3,4], numOnBitsPerPattern = 3, patternOverlap = 0, seqOverlap = 0, **kwargs ): """ Create training sequences that share some elements in the middle. Parameters: ----------------------------------------------------- numSequences: Number of unique training sequences to generate seqLen: Overall length of each sequence sharedElements: Which element indices of each sequence are shared. These will be in the range between 0 and seqLen-1 numOnBitsPerPattern: Number of ON bits in each TP input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns trainingSequences - a list of training sequences """ # Total number of patterns used to build the sequences numSharedElements = len(sharedElements) numUniqueElements = seqLen - numSharedElements numPatterns = numSharedElements + numUniqueElements * numSequences # Create the table of patterns patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap) # Total number of columns required numCols = len(patterns[0]) # ----------------------------------------------------------------------- # Create the training sequences trainingSequences = [] uniquePatternIndices = range(numSharedElements, numPatterns) for _ in xrange(numSequences): sequence = [] # pattern indices [0 ... numSharedElements-1] are reserved for the shared # middle sharedPatternIndices = range(numSharedElements) # Build up the sequence for j in xrange(seqLen): if j in sharedElements: patIdx = sharedPatternIndices.pop(0) else: patIdx = uniquePatternIndices.pop(0) sequence.append(patterns[patIdx]) trainingSequences.append(sequence) if VERBOSITY >= 3: print "\nTraining sequences" printAllTrainingSequences(trainingSequences) return (numCols, trainingSequences) def buildSequencePool(numSequences = 10, seqLen = [2,3,4], numPatterns = 5, numOnBitsPerPattern = 3, patternOverlap = 0, **kwargs ): """ Create a bunch of sequences of various lengths, all built from a fixed set of patterns. Parameters: ----------------------------------------------------- numSequences: Number of training sequences to generate seqLen: List of possible sequence lengths numPatterns: How many possible patterns there are to use within sequences numOnBitsPerPattern: Number of ON bits in each TP input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns trainingSequences - a list of training sequences """ # Create the table of patterns patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap) # Total number of columns required numCols = len(patterns[0]) # ----------------------------------------------------------------------- # Create the training sequences trainingSequences = [] for _ in xrange(numSequences): # Build it up from patterns sequence = [] length = random.choice(seqLen) for _ in xrange(length): patIdx = random.choice(xrange(numPatterns)) sequence.append(patterns[patIdx]) # Put it in trainingSequences.append(sequence) if VERBOSITY >= 3: print "\nTraining sequences" printAllTrainingSequences(trainingSequences) return (numCols, trainingSequences) def createTPs(includeCPP = True, includePy = True, numCols = 100, cellsPerCol = 4, activationThreshold = 3, minThreshold = 3, newSynapseCount = 3, initialPerm = 0.6, permanenceInc = 0.1, permanenceDec = 0.0, globalDecay = 0.0, pamLength = 0, checkSynapseConsistency = True, maxInfBacktrack = 0, maxLrnBacktrack = 0, **kwargs ): """Create one or more TP instances, placing each into a dict keyed by name. Parameters: ------------------------------------------------------------------ retval: tps - dict of TP instances """ # Keep these fixed: connectedPerm = 0.5 tps = dict() if includeCPP: if VERBOSITY >= 2: print "Creating TP10X2 instance" cpp_tp = TP10X2(numberOfCols = numCols, cellsPerColumn = cellsPerCol, initialPerm = initialPerm, connectedPerm = connectedPerm, minThreshold = minThreshold, newSynapseCount = newSynapseCount, permanenceInc = permanenceInc, permanenceDec = permanenceDec, activationThreshold = activationThreshold, globalDecay = globalDecay, burnIn = 1, seed=SEED, verbosity=VERBOSITY, checkSynapseConsistency = checkSynapseConsistency, collectStats = True, pamLength = pamLength, maxInfBacktrack = maxInfBacktrack, maxLrnBacktrack = maxLrnBacktrack, ) # Ensure we are copying over learning states for TPDiff cpp_tp.retrieveLearningStates = True tps['CPP'] = cpp_tp if includePy: if VERBOSITY >= 2: print "Creating PY TP instance" py_tp = TP(numberOfCols = numCols, cellsPerColumn = cellsPerCol, initialPerm = initialPerm, connectedPerm = connectedPerm, minThreshold = minThreshold, newSynapseCount = newSynapseCount, permanenceInc = permanenceInc, permanenceDec = permanenceDec, activationThreshold = activationThreshold, globalDecay = globalDecay, burnIn = 1, seed=SEED, verbosity=VERBOSITY, collectStats = True, pamLength = pamLength, maxInfBacktrack = maxInfBacktrack, maxLrnBacktrack = maxLrnBacktrack, ) tps['PY '] = py_tp return tps def assertNoTPDiffs(tps): """ Check for diffs among the TP instances in the passed in tps dict and raise an assert if any are detected Parameters: --------------------------------------------------------------------- tps: dict of TP instances """ if len(tps) == 1: return if len(tps) > 2: raise "Not implemented for more than 2 TPs" same = fdrutils.tpDiff2(tps.values(), verbosity=VERBOSITY) assert(same) return def evalSequences(tps, trainingSequences, testSequences = None, nTrainRepetitions = 1, doResets = True, **kwargs): """Train the TPs on the entire training set for nTrainRepetitions in a row. Then run the test set through inference once and return the inference stats. Parameters: --------------------------------------------------------------------- tps: dict of TP instances trainingSequences: list of training sequences. Each sequence is a list of TP input patterns testSequences: list of test sequences. If None, we will test against the trainingSequences nTrainRepetitions: Number of times to run the training set through the TP doResets: If true, send a reset to the TP between each sequence """ # If no test sequence is specified, use the first training sequence if testSequences == None: testSequences = trainingSequences # First TP instance is used by default for verbose printing of input values, # etc. firstTP = tps.values()[0] assertNoTPDiffs(tps) # ===================================================================== # Loop through the training set nTrainRepetitions times # ========================================================================== for trainingNum in xrange(nTrainRepetitions): if VERBOSITY >= 2: print "\n##############################################################" print "################# Training round #%d of %d #################" \ % (trainingNum, nTrainRepetitions) for (name,tp) in tps.iteritems(): print "TP parameters for %s: " % (name) print "---------------------" tp.printParameters() print # ====================================================================== # Loop through the sequences in the training set numSequences = len(testSequences) for sequenceNum, trainingSequence in enumerate(trainingSequences): numTimeSteps = len(trainingSequence) if VERBOSITY >= 2: print "\n================= Sequence #%d of %d ================" \ % (sequenceNum, numSequences) if doResets: for tp in tps.itervalues(): tp.reset() # -------------------------------------------------------------------- # Train each element of the sequence for t, x in enumerate(trainingSequence): # Print Verbose info about this element if VERBOSITY >= 2: print if VERBOSITY >= 3: print "------------------------------------------------------------" print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \ % (sequenceNum, numSequences, t, numTimeSteps) firstTP.printInput(x) print "input nzs:", x.nonzero() # Train in this element x = numpy.array(x).astype('float32') for tp in tps.itervalues(): tp.learn(x, computeInfOutput=True) # Print the input and output states if VERBOSITY >= 3: for (name,tp) in tps.iteritems(): print "I/O states of %s TP:" % (name) print "-------------------------------------", tp.printStates(printPrevious = (VERBOSITY >= 5)) print assertNoTPDiffs(tps) # Print out number of columns that weren't predicted if VERBOSITY >= 2: for (name,tp) in tps.iteritems(): stats = tp.getStats() print "# of unpredicted columns for %s TP: %d of %d" \ % (name, stats['curMissing'], x.sum()) numBurstingCols = tp.infActiveState['t'].min(axis=1).sum() print "# of bursting columns for %s TP: %d of %d" \ % (name, numBurstingCols, x.sum()) # Print the trained cells if VERBOSITY >= 4: print "Sequence %d finished." % (sequenceNum) for (name,tp) in tps.iteritems(): print "All cells of %s TP:" % (name) print "-------------------------------------", tp.printCells() print # -------------------------------------------------------------------- # Done training all sequences in this round, print the total number of # missing, extra columns and make sure it's the same among the TPs if VERBOSITY >= 2: print prevResult = None for (name,tp) in tps.iteritems(): stats = tp.getStats() if VERBOSITY >= 1: print "Stats for %s TP over all sequences for training round #%d of %d:" \ % (name, trainingNum, nTrainRepetitions) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] if prevResult is None: prevResult = (stats['totalMissing'], stats['totalExtra']) else: assert (stats['totalMissing'] == prevResult[0]) assert (stats['totalExtra'] == prevResult[1]) tp.resetStats() # ===================================================================== # Finish up learning if VERBOSITY >= 3: print "Calling trim segments" prevResult = None for tp in tps.itervalues(): nSegsRemoved, nSynsRemoved = tp.trimSegments() if prevResult is None: prevResult = (nSegsRemoved, nSynsRemoved) else: assert (nSegsRemoved == prevResult[0]) assert (nSynsRemoved == prevResult[1]) assertNoTPDiffs(tps) if VERBOSITY >= 4: print "Training completed. Complete state:" for (name,tp) in tps.iteritems(): print "%s:" % (name) tp.printCells() print # ========================================================================== # Infer # ========================================================================== if VERBOSITY >= 2: print "\n##############################################################" print "########################## Inference #########################" # Reset stats in all TPs for tp in tps.itervalues(): tp.resetStats() # ------------------------------------------------------------------- # Loop through the test sequences numSequences = len(testSequences) for sequenceNum, testSequence in enumerate(testSequences): numTimeSteps = len(testSequence) # Identify this sequence if VERBOSITY >= 2: print "\n================= Sequence %d of %d ================" \ % (sequenceNum, numSequences) # Send in the rest if doResets: for tp in tps.itervalues(): tp.reset() # ------------------------------------------------------------------- # Loop through the elements of this sequence for t,x in enumerate(testSequence): # Print verbose info about this element if VERBOSITY >= 2: print if VERBOSITY >= 3: print "------------------------------------------------------------" print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \ % (sequenceNum, numSequences, t, numTimeSteps) firstTP.printInput(x) print "input nzs:", x.nonzero() # Infer on this element for tp in tps.itervalues(): tp.infer(x) assertNoTPDiffs(tps) # Print out number of columns that weren't predicted if VERBOSITY >= 2: for (name,tp) in tps.iteritems(): stats = tp.getStats() print "# of unpredicted columns for %s TP: %d of %d" \ % (name, stats['curMissing'], x.sum()) # Debug print of internal state if VERBOSITY >= 3: for (name,tp) in tps.iteritems(): print "I/O states of %s TP:" % (name) print "-------------------------------------", tp.printStates(printPrevious = (VERBOSITY >= 5), printLearnState = False) print # Done with this sequence # Debug print of all stats of the TPs if VERBOSITY >= 4: print for (name,tp) in tps.iteritems(): print "Interim internal stats for %s TP:" % (name) print "---------------------------------" pprint.pprint(tp.getStats()) print if VERBOSITY >= 2: print "\n##############################################################" print "####################### Inference Done #######################" # Get the overall stats for each TP and return them tpStats = dict() for (name,tp) in tps.iteritems(): tpStats[name] = stats = tp.getStats() if VERBOSITY >= 2: print "Stats for %s TP over all sequences:" % (name) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] for (name,tp) in tps.iteritems(): if VERBOSITY >= 3: print "\nAll internal stats for %s TP:" % (name) print "-------------------------------------", pprint.pprint(tpStats[name]) print return tpStats def testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): """ Build up a set of sequences, create the TP(s), train them, test them, and check that we got the expected number of missing predictions during inference. Parameters: ----------------------------------------------------------------------- baseParams: dict of all of the parameters for building sequences, creating the TPs, and training and testing them. This gets updated from 'mods' before we use it. expMissingMin: Minimum number of expected missing predictions during testing. expMissingMax: Maximum number of expected missing predictions during testing. mods: dict of modifications to make to the baseParams. """ # Update the base with the modifications params = dict(baseParams) params.update(mods) # -------------------------------------------------------------------- # Create the sequences func = params['seqFunction'] (numCols, trainingSequences) = func(**params) # -------------------------------------------------------------------- # Create the TPs if params['numCols'] is None: params['numCols'] = numCols tps = createTPs(**params) # -------------------------------------------------------------------- # Train and get test results tpStats = evalSequences(tps = tps, trainingSequences=trainingSequences, testSequences=None, **params) # ----------------------------------------------------------------------- # Make sure there are the expected number of missing predictions for (name, stats) in tpStats.iteritems(): print "Detected %d missing predictions overall during inference" \ % (stats['totalMissing']) if expMissingMin is not None and stats['totalMissing'] < expMissingMin: print "FAILURE: Expected at least %d total missing but got %d" \ % (expMissingMin, stats['totalMissing']) assert False if expMissingMax is not None and stats['totalMissing'] > expMissingMax: print "FAILURE: Expected at most %d total missing but got %d" \ % (expMissingMax, stats['totalMissing']) assert False return True class TPOverlappingSeqsTest(testcasebase.TestCaseBase): def testFastLearning(self): """ Test with fast learning, make sure PAM allows us to train with fewer repeats of the training data. """ numOnBitsPerPattern = 3 # ================================================================ # Base params baseParams = dict( # Sequence generation seqFunction = buildOverlappedSequences, numSequences = 2, seqLen = 10, sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, # TP construction includeCPP = INCLUDE_CPP_TP, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, newSynapseCount = numOnBitsPerPattern, initialPerm = 0.6, permanenceInc = 0.1, permanenceDec = 0.0, globalDecay = 0.0, pamLength = 0, # Training/testing nTrainRepetitions = 8, doResets = True, ) # ================================================================ # Run various configs # No PAM, with 3 repetitions, still missing predictions print "\nRunning without PAM, 3 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=20, expMissingMax=None, pamLength=1, nTrainRepetitions=3)) # With PAM, with only 3 repetitions, 0 missing predictions print "\nRunning with PAM, 3 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=3)) def testSlowLearning(self): """ Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data. """ numOnBitsPerPattern = 3 # ================================================================ # Base params baseParams = dict( # Sequence generation seqFunction = buildOverlappedSequences, numSequences = 2, seqLen = 10, sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, # TP construction includeCPP = INCLUDE_CPP_TP, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, newSynapseCount = numOnBitsPerPattern, initialPerm = 0.11, permanenceInc = 0.1, permanenceDec = 0.0, globalDecay = 0.0, pamLength = 0, # Training/testing nTrainRepetitions = 8, doResets = True, ) # ================================================================ # Run various configs # No PAM, requires 40 repetitions # No PAM, with 10 repetitions, still missing predictions print "\nRunning without PAM, 10 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10)) # With PAM, with only 10 repetitions, 0 missing predictions print "\nRunning with PAM, 10 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10)) def testSlowLearningWithOverlap(self): """ Test with slow learning, some overlap in the patterns, and TP thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data. """ # Cannot use skipIf decorator because it reads SHORT before it is set. if SHORT: self.skipTest("Test skipped by default. Enable with --long.") numOnBitsPerPattern = 5 # ================================================================ # Base params baseParams = dict( # Sequence generation seqFunction = buildOverlappedSequences, numSequences = 2, seqLen = 10, sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 2, # TP construction includeCPP = INCLUDE_CPP_TP, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), newSynapseCount = numOnBitsPerPattern, initialPerm = 0.11, permanenceInc = 0.1, permanenceDec = 0.0, globalDecay = 0.0, pamLength = 0, # Training/testing nTrainRepetitions = 8, doResets = True, ) # ================================================================ # Run various configs # No PAM, with 10 repetitions, still missing predictions print "\nRunning without PAM, 10 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=10, expMissingMax=None, pamLength=1, nTrainRepetitions=10)) # With PAM, with only 10 repetitions, 0 missing predictions print "\nRunning with PAM, 10 repetitions of the training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=6, nTrainRepetitions=10)) def testForbesLikeData(self): """ Test with "Forbes-like" data. A bunch of sequences of lengths between 2 and 10 elements long. We will test with both fast and slow learning. Make sure PAM allows us to train with fewer repeats of the training data. """ # Cannot use skipIf decorator because it reads SHORT before it is set. if SHORT: self.skipTest("Test skipped by default. Enable with --long.") numOnBitsPerPattern = 3 # ================================================================ # Base params baseParams = dict( # Sequence generation seqFunction = buildSequencePool, numSequences = 20, seqLen = [3,10], numPatterns = 10, numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 1, # TP construction includeCPP = INCLUDE_CPP_TP, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), newSynapseCount = numOnBitsPerPattern, initialPerm = 0.51, permanenceInc = 0.1, permanenceDec = 0.0, globalDecay = 0.0, pamLength = 0, checkSynapseConsistency = False, # Training/testing nTrainRepetitions = 8, doResets = True, ) # ================================================================ # Run various configs # Fast mode, no PAM # Fast mode, with PAM print "\nRunning without PAM, fast learning, 2 repetitions of the " \ "training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=50, expMissingMax=None, pamLength=1, nTrainRepetitions=2)) # Fast mode, with PAM print "\nRunning with PAM, fast learning, 2 repetitions of the " \ "training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, pamLength=5, nTrainRepetitions=2)) # Slow mode, no PAM print "\nRunning without PAM, slow learning, 8 repetitions of the " \ "training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=1, expMissingMax=None, initialPerm=0.31, pamLength=1, nTrainRepetitions=8)) # Fast mode, with PAM print "\nRunning with PAM, slow learning, 8 repetitions of the " \ "training data..." self.assertTrue(testConfig(baseParams=baseParams, expMissingMin=0, expMissingMax=0, initialPerm=0.31, pamLength=5, nTrainRepetitions=8)) if __name__=="__main__": # Process command line arguments parser = OptionParser() parser.add_option( "--verbosity", default=VERBOSITY, type="int", help="Verbosity level, either 0, 1, 2, or 3 [default: %default].") parser.add_option("--seed", default=SEED, type="int", help="Random seed to use [default: %default].") parser.add_option("--short", action="store_true", default=True, help="Run short version of the tests [default: %default].") parser.add_option("--long", action="store_true", default=False, help="Run long version of the tests [default: %default].") (options, args) = parser.parse_args() SEED = options.seed VERBOSITY = options.verbosity SHORT = not options.long # Seed the random number generators rgen = numpy.random.RandomState(SEED) random.seed(SEED) if not INCLUDE_CPP_TP: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" print "!! WARNING: C++ TP testing is DISABLED until it can be updated." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # Form the command line for the unit test framework. args = [sys.argv[0]] + args unittest.main(argv=args, verbosity=VERBOSITY)
gpl-3.0
lmregus/Portfolio
python/design_patterns/env/lib/python3.7/site-packages/sphinx/ext/apidoc.py
1
18689
""" sphinx.ext.apidoc ~~~~~~~~~~~~~~~~~ Parses a directory tree looking for Python modules and packages and creates ReST files appropriately to create code documentation with Sphinx. It also creates a modules index (named modules.<suffix>). This is derived from the "sphinx-autopackage" script, which is: Copyright 2008 Société des arts technologiques (SAT), https://sat.qc.ca/ :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import argparse import glob import locale import os import sys from fnmatch import fnmatch from os import path import sphinx.locale from sphinx import __display_version__, package_dir from sphinx.cmd.quickstart import EXTENSIONS from sphinx.locale import __ from sphinx.util import rst from sphinx.util.osutil import FileAvoidWrite, ensuredir if False: # For type annotation from typing import Any, List, Tuple # NOQA # automodule options if 'SPHINX_APIDOC_OPTIONS' in os.environ: OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',') else: OPTIONS = [ 'members', 'undoc-members', # 'inherited-members', # disabled because there's a bug in sphinx 'show-inheritance', ] INITPY = '__init__.py' PY_SUFFIXES = {'.py', '.pyx'} def makename(package, module): # type: (str, str) -> str """Join package and module with a dot.""" # Both package and module can be None/empty. if package: name = package if module: name += '.' + module else: name = module return name def write_file(name, text, opts): # type: (str, str, Any) -> None """Write the output file for module/package <name>.""" fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix)) if opts.dryrun: print(__('Would create file %s.') % fname) return if not opts.force and path.isfile(fname): print(__('File %s already exists, skipping.') % fname) else: print(__('Creating file %s.') % fname) with FileAvoidWrite(fname) as f: f.write(text) def format_heading(level, text, escape=True): # type: (int, str, bool) -> str """Create a heading of <level> [1, 2 or 3 supported].""" if escape: text = rst.escape(text) underlining = ['=', '-', '~', ][level - 1] * len(text) return '%s\n%s\n\n' % (text, underlining) def format_directive(module, package=None): # type: (str, str) -> str """Create the automodule directive and add the options.""" directive = '.. automodule:: %s\n' % makename(package, module) for option in OPTIONS: directive += ' :%s:\n' % option return directive def create_module_file(package, module, opts): # type: (str, str, Any) -> None """Build the text of the file and write the file.""" if not opts.noheadings: text = format_heading(1, '%s module' % module) else: text = '' # text += format_heading(2, ':mod:`%s` Module' % module) text += format_directive(module, package) write_file(makename(package, module), text, opts) def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA # type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None """Build the text of the file and write the file.""" text = format_heading(1, ('%s package' if not is_namespace else "%s namespace") % makename(master_package, subroot)) if opts.modulefirst and not is_namespace: text += format_directive(subroot, master_package) text += '\n' # build a list of directories that are szvpackages (contain an INITPY file) # and also checks the INITPY file is not empty, or there are other python # source files in that folder. # (depending on settings - but shall_skip() takes care of that) subs = [sub for sub in subs if not shall_skip(path.join(root, sub, INITPY), opts, excludes)] # if there are some package directories, add a TOC for theses subpackages if subs: text += format_heading(2, 'Subpackages') text += '.. toctree::\n\n' for sub in subs: text += ' %s.%s\n' % (makename(master_package, subroot), sub) text += '\n' submods = [path.splitext(sub)[0] for sub in py_files if not shall_skip(path.join(root, sub), opts, excludes) and sub != INITPY] if submods: text += format_heading(2, 'Submodules') if opts.separatemodules: text += '.. toctree::\n\n' for submod in submods: modfile = makename(master_package, makename(subroot, submod)) text += ' %s\n' % modfile # generate separate file for this module if not opts.noheadings: filetext = format_heading(1, '%s module' % modfile) else: filetext = '' filetext += format_directive(makename(subroot, submod), master_package) write_file(modfile, filetext, opts) else: for submod in submods: modfile = makename(master_package, makename(subroot, submod)) if not opts.noheadings: text += format_heading(2, '%s module' % modfile) text += format_directive(makename(subroot, submod), master_package) text += '\n' text += '\n' if not opts.modulefirst and not is_namespace: text += format_heading(2, 'Module contents') text += format_directive(subroot, master_package) write_file(makename(master_package, subroot), text, opts) def create_modules_toc_file(modules, opts, name='modules'): # type: (List[str], Any, str) -> None """Create the module's index.""" text = format_heading(1, '%s' % opts.header, escape=False) text += '.. toctree::\n' text += ' :maxdepth: %s\n\n' % opts.maxdepth modules.sort() prev_module = '' for module in modules: # look if the module is a subpackage and, if yes, ignore it if module.startswith(prev_module + '.'): continue prev_module = module text += ' %s\n' % module write_file(name, text, opts) def shall_skip(module, opts, excludes=[]): # type: (str, Any, List[str]) -> bool """Check if we want to skip this module.""" # skip if the file doesn't exist and not using implicit namespaces if not opts.implicit_namespaces and not path.exists(module): return True # Are we a package (here defined as __init__.py, not the folder in itself) if os.path.basename(module) == INITPY: # Yes, check if we have any non-excluded modules at all here all_skipped = True basemodule = path.dirname(module) for submodule in glob.glob(path.join(basemodule, '*.py')): if not is_excluded(path.join(basemodule, submodule), excludes): # There's a non-excluded module here, we won't skip all_skipped = False if all_skipped: return True # skip if it has a "private" name and this is selected filename = path.basename(module) if filename != '__init__.py' and filename.startswith('_') and \ not opts.includeprivate: return True return False def recurse_tree(rootpath, excludes, opts): # type: (str, List[str], Any) -> List[str] """ Look for every file in the directory tree and create the corresponding ReST files. """ followlinks = getattr(opts, 'followlinks', False) includeprivate = getattr(opts, 'includeprivate', False) implicit_namespaces = getattr(opts, 'implicit_namespaces', False) # check if the base directory is a package and get its name if INITPY in os.listdir(rootpath) or implicit_namespaces: root_package = rootpath.split(path.sep)[-1] else: # otherwise, the base is a directory with packages root_package = None toplevels = [] for root, subs, files in os.walk(rootpath, followlinks=followlinks): # document only Python module files (that aren't excluded) py_files = sorted(f for f in files if path.splitext(f)[1] in PY_SUFFIXES and not is_excluded(path.join(root, f), excludes)) is_pkg = INITPY in py_files is_namespace = INITPY not in py_files and implicit_namespaces if is_pkg: py_files.remove(INITPY) py_files.insert(0, INITPY) elif root != rootpath: # only accept non-package at toplevel unless using implicit namespaces if not implicit_namespaces: del subs[:] continue # remove hidden ('.') and private ('_') directories, as well as # excluded dirs if includeprivate: exclude_prefixes = ('.',) # type: Tuple[str, ...] else: exclude_prefixes = ('.', '_') subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and not is_excluded(path.join(root, sub), excludes)) if is_pkg or is_namespace: # we are in a package with something to document if subs or len(py_files) > 1 or not shall_skip(path.join(root, INITPY), opts): subpackage = root[len(rootpath):].lstrip(path.sep).\ replace(path.sep, '.') # if this is not a namespace or # a namespace and there is something there to document if not is_namespace or len(py_files) > 0: create_package_file(root, root_package, subpackage, py_files, opts, subs, is_namespace, excludes) toplevels.append(makename(root_package, subpackage)) else: # if we are at the root level, we don't require it to be a package assert root == rootpath and root_package is None for py_file in py_files: if not shall_skip(path.join(rootpath, py_file), opts): module = path.splitext(py_file)[0] create_module_file(root_package, module, opts) toplevels.append(module) return toplevels def is_excluded(root, excludes): # type: (str, List[str]) -> bool """Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exclude "foo" also accidentally excluding "foobar". """ for exclude in excludes: if fnmatch(root, exclude): return True return False def get_parser(): # type: () -> argparse.ArgumentParser parser = argparse.ArgumentParser( usage='%(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> ' '[EXCLUDE_PATTERN, ...]', epilog=__('For more information, visit <http://sphinx-doc.org/>.'), description=__(""" Look recursively in <MODULE_PATH> for Python modules and packages and create one reST file with automodule directives per package in the <OUTPUT_PATH>. The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be excluded from generation. Note: By default this script will not overwrite already created files.""")) parser.add_argument('--version', action='version', dest='show_version', version='%%(prog)s %s' % __display_version__) parser.add_argument('module_path', help=__('path to module to document')) parser.add_argument('exclude_pattern', nargs='*', help=__('fnmatch-style file and/or directory patterns ' 'to exclude from generation')) parser.add_argument('-o', '--output-dir', action='store', dest='destdir', required=True, help=__('directory to place all output')) parser.add_argument('-d', '--maxdepth', action='store', dest='maxdepth', type=int, default=4, help=__('maximum depth of submodules to show in the TOC ' '(default: 4)')) parser.add_argument('-f', '--force', action='store_true', dest='force', help=__('overwrite existing files')) parser.add_argument('-l', '--follow-links', action='store_true', dest='followlinks', default=False, help=__('follow symbolic links. Powerful when combined ' 'with collective.recipe.omelette.')) parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun', help=__('run the script without creating files')) parser.add_argument('-e', '--separate', action='store_true', dest='separatemodules', help=__('put documentation for each module on its own page')) parser.add_argument('-P', '--private', action='store_true', dest='includeprivate', help=__('include "_private" modules')) parser.add_argument('--tocfile', action='store', dest='tocfile', default='modules', help=__("filename of table of contents (default: modules)")) parser.add_argument('-T', '--no-toc', action='store_false', dest='tocfile', help=__("don't create a table of contents file")) parser.add_argument('-E', '--no-headings', action='store_true', dest='noheadings', help=__("don't create headings for the module/package " "packages (e.g. when the docstrings already " "contain them)")) parser.add_argument('-M', '--module-first', action='store_true', dest='modulefirst', help=__('put module documentation before submodule ' 'documentation')) parser.add_argument('--implicit-namespaces', action='store_true', dest='implicit_namespaces', help=__('interpret module paths according to PEP-0420 ' 'implicit namespaces specification')) parser.add_argument('-s', '--suffix', action='store', dest='suffix', default='rst', help=__('file suffix (default: rst)')) parser.add_argument('-F', '--full', action='store_true', dest='full', help=__('generate a full project with sphinx-quickstart')) parser.add_argument('-a', '--append-syspath', action='store_true', dest='append_syspath', help=__('append module_path to sys.path, used when --full is given')) parser.add_argument('-H', '--doc-project', action='store', dest='header', help=__('project name (default: root module name)')) parser.add_argument('-A', '--doc-author', action='store', dest='author', help=__('project author(s), used when --full is given')) parser.add_argument('-V', '--doc-version', action='store', dest='version', help=__('project version, used when --full is given')) parser.add_argument('-R', '--doc-release', action='store', dest='release', help=__('project release, used when --full is given, ' 'defaults to --doc-version')) group = parser.add_argument_group(__('extension options')) group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions', action='append', help=__('enable arbitrary extensions')) for ext in EXTENSIONS: group.add_argument('--ext-%s' % ext, action='append_const', const='sphinx.ext.%s' % ext, dest='extensions', help=__('enable %s extension') % ext) return parser def main(argv=sys.argv[1:]): # type: (List[str]) -> int """Parse and check the command line arguments.""" sphinx.locale.setlocale(locale.LC_ALL, '') sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx') parser = get_parser() args = parser.parse_args(argv) rootpath = path.abspath(args.module_path) # normalize opts if args.header is None: args.header = rootpath.split(path.sep)[-1] if args.suffix.startswith('.'): args.suffix = args.suffix[1:] if not path.isdir(rootpath): print(__('%s is not a directory.') % rootpath, file=sys.stderr) sys.exit(1) if not args.dryrun: ensuredir(args.destdir) excludes = [path.abspath(exclude) for exclude in args.exclude_pattern] modules = recurse_tree(rootpath, excludes, args) if args.full: from sphinx.cmd import quickstart as qs modules.sort() prev_module = '' text = '' for module in modules: if module.startswith(prev_module + '.'): continue prev_module = module text += ' %s\n' % module d = { 'path': args.destdir, 'sep': False, 'dot': '_', 'project': args.header, 'author': args.author or 'Author', 'version': args.version or '', 'release': args.release or args.version or '', 'suffix': '.' + args.suffix, 'master': 'index', 'epub': True, 'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo'], 'makefile': True, 'batchfile': True, 'make_mode': True, 'mastertocmaxdepth': args.maxdepth, 'mastertoctree': text, 'language': 'en', 'module_path': rootpath, 'append_syspath': args.append_syspath, } if args.extensions: d['extensions'].extend(args.extensions) for ext in d['extensions'][:]: if ',' in ext: d['extensions'].remove(ext) d['extensions'].extend(ext.split(',')) if not args.dryrun: qs.generate(d, silent=True, overwrite=args.force) elif args.tocfile: create_modules_toc_file(modules, args, args.tocfile) return 0 # So program can be started with "python -m sphinx.apidoc ..." if __name__ == "__main__": main()
mit
kamcpp/tensorflow
tensorflow/python/training/momentum_test.py
16
20682
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Momentum.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class MomentumOptimizerTest(tf.test.TestCase): def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): var = var + accum * lr * momentum accum = accum * momentum + g var = var - lr * accum var = var - accum * lr * momentum return var, accum def testBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def testNesterovMomentum(self): for dtype in [tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) cost = 5 * var0 * var0 + 3 * var1 global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step') mom_op = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9, use_nesterov=True) opt_op = mom_op.minimize(cost, global_step, [var0, var1]) tf.initialize_all_variables().run() for t in range(1, 5): opt_op.run() var0_np, accum0_np = self._update_nesterov_momentum_numpy(var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, var0.eval()) self.assertAllClose(var1_np, var1.eval()) def testSparseNesterovMomentum(self): for dtype in [tf.float32, tf.float64]: with self.test_session(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) grads = [] for t in range(1, 5): grads.append(var0_np * 10) var0_np, accum0_np = self._update_nesterov_momentum_numpy(var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np, accum1_np, 3, 2.0, 0.9) var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) loss = 5 * var0 * var0 + 3 * var1 mom_op = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9, use_nesterov=True) x_feed = tf.placeholder(dtype) y_feed = tf.IndexedSlices(x_feed,tf.constant([0, 1]),tf.constant([2])) grads_and_vars = [(y_feed, var0), (tf.constant([3.0,3.0],dtype=dtype), var1)] opt_update = mom_op.apply_gradients(grads_and_vars) tf.initialize_all_variables().run() for t in range(1, 5): opt_update.run(feed_dict = {x_feed:grads[t - 1]}) var0_np, accum0_np = self._update_nesterov_momentum_numpy(var0_np, accum0_np, var0_np * 10, 2.0, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np, accum1_np, 3, 2.0, 0.9) self.assertAllClose(var0_np, var0.eval()) self.assertAllClose(var1_np, var1.eval()) def testTensorLearningRateAndMomentum(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer( learning_rate=tf.constant(2.0), momentum=tf.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in tf.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in tf.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) def _dbParamsMom01(self): """Return dist-belief momentum values. Return values been generated from the dist-belief momentum unittest, running with a learning rate of 0.1 and a momentum of 0.1. These values record how a parameter vector of size 10, initialized with 0.0, gets updated with 10 consecutive momentum steps. It uses random gradients. Returns: db_grad: The gradients to apply db_out: The parameters after the momentum update. """ db_grad = [[]] * 10 db_out = [[]] * 10 # pylint: disable=line-too-long db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615] db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618] db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521] db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544] db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311] db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189] db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648] db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303] db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287] db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544] db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229] db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717] db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251] db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997] db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922] db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418] db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227] db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781] db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711] db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295] # pylint: enable=line-too-long return db_grad, db_out def testLikeDistBeliefMom01(self): with self.test_session(): db_grad, db_out = self._dbParamsMom01() num_samples = len(db_grad) var0 = tf.Variable([0.0] * num_samples) grads0 = tf.constant([0.0] * num_samples) mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1) mom_update = mom_opt.apply_gradients(zip([grads0], [var0])) tf.initialize_all_variables().run() for i in xrange(num_samples): mom_update.run(feed_dict={grads0: db_grad[i]}) self.assertAllClose(np.array(db_out[i]), var0.eval()) def testSparse(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype)) var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2])) grads0 = tf.IndexedSlices(tf.constant([[.1, .1]], dtype=dtype), tf.constant([1]), tf.constant([4, 2])) grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]], dtype=dtype), tf.constant([2, 3]), tf.constant([4, 2])) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([0, 0], var0.eval()[0]) self.assertAllClose([0, 0], var0.eval()[1]) self.assertAllClose([1, 1], var1.eval()[2]) # Step 1: the momentum accumulators are 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([0, 0]), slot0.eval()[0]) self.assertAllCloseAccordingToType( np.array([.1, .1]), slot0.eval()[1]) self.assertAllCloseAccordingToType( np.array([.01, .01]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0]) self.assertAllCloseAccordingToType(np.array([- (0.1 * 2.0), - (0.1 * 2.0)]), var0.eval()[1]) self.assertAllCloseAccordingToType(np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2]) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllClose(np.array([0, 0]), slot0.eval()[0]) self.assertAllCloseAccordingToType(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1]) self.assertAllCloseAccordingToType(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()[2]) # Check that the parameters have been updated. self.assertAllClose(np.array([0, 0]), var0.eval()[0]) self.assertAllCloseAccordingToType( np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()[1]) self.assertAllCloseAccordingToType( np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()[2]) def testSharing(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.test_session(): var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9) mom_update1 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) tf.initialize_all_variables().run() self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update1.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the second momentum accumulators contain the previous update. mom_update2.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]), var1.eval()) if __name__ == "__main__": tf.test.main()
apache-2.0
aisk/grumpy
third_party/stdlib/re.py
7
13461
# # Secret Labs' Regular Expression Engine # # re-compatible interface for the sre matching engine # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # This version of the SRE library can be redistributed under CNRI's # Python 1.6 license. For any other use, please contact Secret Labs # AB (info@pythonware.com). # # Portions of this engine have been developed in cooperation with # CNRI. Hewlett-Packard provided funding for 1.6 integration and # other compatibility work. # r"""Support for regular expressions (RE). This module provides regular expression matching operations similar to those found in Perl. It supports both 8-bit and Unicode strings; both the pattern and the strings being processed can contain null bytes and characters outside the US ASCII range. Regular expressions can contain both special and ordinary characters. Most ordinary characters, like "A", "a", or "0", are the simplest regular expressions; they simply match themselves. You can concatenate ordinary characters, so last matches the string 'last'. The special characters are: "." Matches any character except a newline. "^" Matches the start of the string. "$" Matches the end of the string or just before the newline at the end of the string. "*" Matches 0 or more (greedy) repetitions of the preceding RE. Greedy means that it will match as many repetitions as possible. "+" Matches 1 or more (greedy) repetitions of the preceding RE. "?" Matches 0 or 1 (greedy) of the preceding RE. *?,+?,?? Non-greedy versions of the previous three special characters. {m,n} Matches from m to n repetitions of the preceding RE. {m,n}? Non-greedy version of the above. "\\" Either escapes special characters or signals a special sequence. [] Indicates a set of characters. A "^" as the first character indicates a complementing set. "|" A|B, creates an RE that will match either A or B. (...) Matches the RE inside the parentheses. The contents can be retrieved or matched later in the string. (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below). (?:...) Non-grouping version of regular parentheses. (?P<name>...) The substring matched by the group is accessible by name. (?P=name) Matches the text matched earlier by the group named name. (?#...) A comment; ignored. (?=...) Matches if ... matches next, but doesn't consume the string. (?!...) Matches if ... doesn't match next. (?<=...) Matches if preceded by ... (must be fixed length). (?<!...) Matches if not preceded by ... (must be fixed length). (?(id/name)yes|no) Matches yes pattern if the group with id/name matched, the (optional) no pattern otherwise. The special sequences consist of "\\" and a character from the list below. If the ordinary character is not on the list, then the resulting RE will match the second character. \number Matches the contents of the group of the same number. \A Matches only at the start of the string. \Z Matches only at the end of the string. \b Matches the empty string, but only at the start or end of a word. \B Matches the empty string, but not at the start or end of a word. \d Matches any decimal digit; equivalent to the set [0-9]. \D Matches any non-digit character; equivalent to the set [^0-9]. \s Matches any whitespace character; equivalent to [ \t\n\r\f\v]. \S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v]. \w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]. With LOCALE, it will match the set [0-9_] plus characters defined as letters for the current locale. \W Matches the complement of \w. \\ Matches a literal backslash. This module exports the following functions: match Match a regular expression pattern to the beginning of a string. search Search a string for the presence of a pattern. sub Substitute occurrences of a pattern found in a string. subn Same as sub, but also return the number of substitutions made. split Split a string by the occurrences of a pattern. findall Find all occurrences of a pattern in a string. finditer Return an iterator yielding a match object for each match. compile Compile a pattern into a RegexObject. purge Clear the regular expression cache. escape Backslash all non-alphanumerics in a string. Some of the functions in this module takes flags as optional parameters: I IGNORECASE Perform case-insensitive matching. L LOCALE Make \w, \W, \b, \B, dependent on the current locale. M MULTILINE "^" matches the beginning of lines (after a newline) as well as the string. "$" matches the end of lines (before a newline) as well as the end of the string. S DOTALL "." matches any character at all, including the newline. X VERBOSE Ignore whitespace and comments for nicer looking RE's. U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale. This module also defines an exception 'error'. """ import sre_compile import sre_parse # try: # import _locale # except ImportError: # _locale = None _locale = None BRANCH = "branch" SUBPATTERN = "subpattern" # public symbols __all__ = [ "match", "search", "sub", "subn", "split", "findall", "compile", "purge", "template", "escape", "I", "L", "M", "S", "X", "U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", "UNICODE", "error" ] __version__ = "2.2.1" # flags I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments # sre extensions (experimental, don't rely on these) T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation # sre exception error = sre_compile.error # -------------------------------------------------------------------- # public interface def match(pattern, string, flags=0): """Try to apply the pattern at the start of the string, returning a match object, or None if no match was found.""" return _compile(pattern, flags).match(string) def search(pattern, string, flags=0): """Scan through string looking for a match to the pattern, returning a match object, or None if no match was found.""" return _compile(pattern, flags).search(string) def sub(pattern, repl, string, count=0, flags=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in string by the replacement repl. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).sub(repl, string, count) def subn(pattern, repl, string, count=0, flags=0): """Return a 2-tuple containing (new_string, number). new_string is the string obtained by replacing the leftmost non-overlapping occurrences of the pattern in the source string by the replacement repl. number is the number of substitutions that were made. repl can be either a string or a callable; if a string, backslash escapes in it are processed. If it is a callable, it's passed the match object and must return a replacement string to be used.""" return _compile(pattern, flags).subn(repl, string, count) def split(pattern, string, maxsplit=0, flags=0): """Split the source string by the occurrences of the pattern, returning a list containing the resulting substrings.""" return _compile(pattern, flags).split(string, maxsplit) def findall(pattern, string, flags=0): """Return a list of all non-overlapping matches in the string. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) # if sys.hexversion >= 0x02020000: # __all__.append("finditer") def finditer(pattern, string, flags=0): """Return an iterator over all non-overlapping matches in the string. For each match, the iterator returns a match object. Empty matches are included in the result.""" return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." return _compile(pattern, flags) def purge(): "Clear the regular expression cache" _cache.clear() _cache_repl.clear() def template(pattern, flags=0): "Compile a template pattern, returning a pattern object" return _compile(pattern, flags|T) _alphanum = frozenset( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") def escape(pattern): "Escape all non-alphanumeric characters in pattern." s = list(pattern) alphanum = _alphanum for i, c in enumerate(pattern): if c not in alphanum: if c == "\000": s[i] = "\\000" else: s[i] = "\\" + c return pattern[:0].join(s) # -------------------------------------------------------------------- # internals _cache = {} _cache_repl = {} _pattern_type = type(sre_compile.compile("", 0)) _MAXCACHE = 100 def _compile(*key): # internal: compile pattern pattern, flags = key bypass_cache = flags & DEBUG if not bypass_cache: cachekey = (type(key[0]),) + key # try: # p, loc = _cache[cachekey] # if loc is None or loc == _locale.setlocale(_locale.LC_CTYPE): # return p # except KeyError: # pass if isinstance(pattern, _pattern_type): if flags: raise ValueError('Cannot process flags argument with a compiled pattern') return pattern if not sre_compile.isstring(pattern): raise TypeError, "first argument must be string or compiled pattern" try: p = sre_compile.compile(pattern, flags) except error, v: raise error, v # invalid expression if not bypass_cache: if len(_cache) >= _MAXCACHE: _cache.clear() if p.flags & LOCALE: if not _locale: return p # loc = _locale.setlocale(_locale.LC_CTYPE) else: loc = None _cache[cachekey] = p, loc return p def _compile_repl(*key): # internal: compile replacement pattern p = _cache_repl.get(key) if p is not None: return p repl, pattern = key try: p = sre_parse.parse_template(repl, pattern) except error, v: raise error, v # invalid expression if len(_cache_repl) >= _MAXCACHE: _cache_repl.clear() _cache_repl[key] = p return p def _expand(pattern, match, template): # internal: match.expand implementation hook template = sre_parse.parse_template(template, pattern) return sre_parse.expand_template(template, match) def _subx(pattern, template): # internal: pattern.sub/subn implementation helper template = _compile_repl(template, pattern) if not template[0] and len(template[1]) == 1: # literal replacement return template[1][0] def filter(match, template=template): return sre_parse.expand_template(template, match) return filter # register myself for pickling import copy_reg def _pickle(p): return _compile, (p.pattern, p.flags) copy_reg.pickle(_pattern_type, _pickle, _compile) # -------------------------------------------------------------------- # experimental stuff (see python-dev discussions for details) class Scanner(object): def __init__(self, lexicon, flags=0): self.lexicon = lexicon # combine phrases into a compound pattern p = [] s = sre_parse.Pattern() s.flags = flags for phrase, action in lexicon: p.append(sre_parse.SubPattern(s, [ (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), ])) s.groups = len(p)+1 p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) self.scanner = sre_compile.compile(p) def scan(self, string): result = [] append = result.append match = self.scanner.scanner(string).match i = 0 while 1: m = match() if not m: break j = m.end() if i == j: break action = self.lexicon[m.lastindex-1][1] if hasattr(action, '__call__'): self.match = m action = action(self, m.group()) if action is not None: append(action) i = j return result, string[i:]
apache-2.0
dyn888/youtube-dl
youtube_dl/extractor/expotv.py
35
2961
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, ) class ExpoTVIE(InfoExtractor): _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])' _TEST = { 'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561', 'md5': '2985e6d7a392b2f7a05e0ca350fe41d0', 'info_dict': { 'id': '17561', 'ext': 'mp4', 'upload_date': '20060212', 'title': 'My Favorite Online Scrapbook Store', 'view_count': int, 'description': 'You\'ll find most everything you need at this virtual store front.', 'uploader': 'Anna T.', 'thumbnail': 're:^https?://.*\.jpg$', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') config = self._download_json( 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), video_id, 'Downloading video configuration') formats = [] for fcfg in config['sources']: media_url = fcfg.get('file') if not media_url: continue if fcfg.get('type') == 'm3u8': formats.extend(self._extract_m3u8_formats( media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': media_url, 'height': int_or_none(fcfg.get('height')), 'format_id': fcfg.get('label'), 'ext': self._search_regex( r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, 'file extension', default=None) or fcfg.get('type'), }) self._sort_formats(formats) title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = config.get('image') view_count = int_or_none(self._search_regex( r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts')) uploader = self._search_regex( r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date', fatal=False)) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'view_count': view_count, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, }
unlicense
Samuel789/MediPi
MedManagementWeb/env/lib/python3.5/site-packages/idna/uts46data.py
143
184931
# This file is automatically generated by tools/build-uts46data.py # vim: set fileencoding=utf-8 : """IDNA Mapping Table from UTS46.""" def _seg_0(): return [ (0x0, '3'), (0x1, '3'), (0x2, '3'), (0x3, '3'), (0x4, '3'), (0x5, '3'), (0x6, '3'), (0x7, '3'), (0x8, '3'), (0x9, '3'), (0xA, '3'), (0xB, '3'), (0xC, '3'), (0xD, '3'), (0xE, '3'), (0xF, '3'), (0x10, '3'), (0x11, '3'), (0x12, '3'), (0x13, '3'), (0x14, '3'), (0x15, '3'), (0x16, '3'), (0x17, '3'), (0x18, '3'), (0x19, '3'), (0x1A, '3'), (0x1B, '3'), (0x1C, '3'), (0x1D, '3'), (0x1E, '3'), (0x1F, '3'), (0x20, '3'), (0x21, '3'), (0x22, '3'), (0x23, '3'), (0x24, '3'), (0x25, '3'), (0x26, '3'), (0x27, '3'), (0x28, '3'), (0x29, '3'), (0x2A, '3'), (0x2B, '3'), (0x2C, '3'), (0x2D, 'V'), (0x2E, 'V'), (0x2F, '3'), (0x30, 'V'), (0x31, 'V'), (0x32, 'V'), (0x33, 'V'), (0x34, 'V'), (0x35, 'V'), (0x36, 'V'), (0x37, 'V'), (0x38, 'V'), (0x39, 'V'), (0x3A, '3'), (0x3B, '3'), (0x3C, '3'), (0x3D, '3'), (0x3E, '3'), (0x3F, '3'), (0x40, '3'), (0x41, 'M', u'a'), (0x42, 'M', u'b'), (0x43, 'M', u'c'), (0x44, 'M', u'd'), (0x45, 'M', u'e'), (0x46, 'M', u'f'), (0x47, 'M', u'g'), (0x48, 'M', u'h'), (0x49, 'M', u'i'), (0x4A, 'M', u'j'), (0x4B, 'M', u'k'), (0x4C, 'M', u'l'), (0x4D, 'M', u'm'), (0x4E, 'M', u'n'), (0x4F, 'M', u'o'), (0x50, 'M', u'p'), (0x51, 'M', u'q'), (0x52, 'M', u'r'), (0x53, 'M', u's'), (0x54, 'M', u't'), (0x55, 'M', u'u'), (0x56, 'M', u'v'), (0x57, 'M', u'w'), (0x58, 'M', u'x'), (0x59, 'M', u'y'), (0x5A, 'M', u'z'), (0x5B, '3'), (0x5C, '3'), (0x5D, '3'), (0x5E, '3'), (0x5F, '3'), (0x60, '3'), (0x61, 'V'), (0x62, 'V'), (0x63, 'V'), ] def _seg_1(): return [ (0x64, 'V'), (0x65, 'V'), (0x66, 'V'), (0x67, 'V'), (0x68, 'V'), (0x69, 'V'), (0x6A, 'V'), (0x6B, 'V'), (0x6C, 'V'), (0x6D, 'V'), (0x6E, 'V'), (0x6F, 'V'), (0x70, 'V'), (0x71, 'V'), (0x72, 'V'), (0x73, 'V'), (0x74, 'V'), (0x75, 'V'), (0x76, 'V'), (0x77, 'V'), (0x78, 'V'), (0x79, 'V'), (0x7A, 'V'), (0x7B, '3'), (0x7C, '3'), (0x7D, '3'), (0x7E, '3'), (0x7F, '3'), (0x80, 'X'), (0x81, 'X'), (0x82, 'X'), (0x83, 'X'), (0x84, 'X'), (0x85, 'X'), (0x86, 'X'), (0x87, 'X'), (0x88, 'X'), (0x89, 'X'), (0x8A, 'X'), (0x8B, 'X'), (0x8C, 'X'), (0x8D, 'X'), (0x8E, 'X'), (0x8F, 'X'), (0x90, 'X'), (0x91, 'X'), (0x92, 'X'), (0x93, 'X'), (0x94, 'X'), (0x95, 'X'), (0x96, 'X'), (0x97, 'X'), (0x98, 'X'), (0x99, 'X'), (0x9A, 'X'), (0x9B, 'X'), (0x9C, 'X'), (0x9D, 'X'), (0x9E, 'X'), (0x9F, 'X'), (0xA0, '3', u' '), (0xA1, 'V'), (0xA2, 'V'), (0xA3, 'V'), (0xA4, 'V'), (0xA5, 'V'), (0xA6, 'V'), (0xA7, 'V'), (0xA8, '3', u' ̈'), (0xA9, 'V'), (0xAA, 'M', u'a'), (0xAB, 'V'), (0xAC, 'V'), (0xAD, 'I'), (0xAE, 'V'), (0xAF, '3', u' ̄'), (0xB0, 'V'), (0xB1, 'V'), (0xB2, 'M', u'2'), (0xB3, 'M', u'3'), (0xB4, '3', u' ́'), (0xB5, 'M', u'μ'), (0xB6, 'V'), (0xB7, 'V'), (0xB8, '3', u' ̧'), (0xB9, 'M', u'1'), (0xBA, 'M', u'o'), (0xBB, 'V'), (0xBC, 'M', u'1⁄4'), (0xBD, 'M', u'1⁄2'), (0xBE, 'M', u'3⁄4'), (0xBF, 'V'), (0xC0, 'M', u'à'), (0xC1, 'M', u'á'), (0xC2, 'M', u'â'), (0xC3, 'M', u'ã'), (0xC4, 'M', u'ä'), (0xC5, 'M', u'å'), (0xC6, 'M', u'æ'), (0xC7, 'M', u'ç'), ] def _seg_2(): return [ (0xC8, 'M', u'è'), (0xC9, 'M', u'é'), (0xCA, 'M', u'ê'), (0xCB, 'M', u'ë'), (0xCC, 'M', u'ì'), (0xCD, 'M', u'í'), (0xCE, 'M', u'î'), (0xCF, 'M', u'ï'), (0xD0, 'M', u'ð'), (0xD1, 'M', u'ñ'), (0xD2, 'M', u'ò'), (0xD3, 'M', u'ó'), (0xD4, 'M', u'ô'), (0xD5, 'M', u'õ'), (0xD6, 'M', u'ö'), (0xD7, 'V'), (0xD8, 'M', u'ø'), (0xD9, 'M', u'ù'), (0xDA, 'M', u'ú'), (0xDB, 'M', u'û'), (0xDC, 'M', u'ü'), (0xDD, 'M', u'ý'), (0xDE, 'M', u'þ'), (0xDF, 'D', u'ss'), (0xE0, 'V'), (0xE1, 'V'), (0xE2, 'V'), (0xE3, 'V'), (0xE4, 'V'), (0xE5, 'V'), (0xE6, 'V'), (0xE7, 'V'), (0xE8, 'V'), (0xE9, 'V'), (0xEA, 'V'), (0xEB, 'V'), (0xEC, 'V'), (0xED, 'V'), (0xEE, 'V'), (0xEF, 'V'), (0xF0, 'V'), (0xF1, 'V'), (0xF2, 'V'), (0xF3, 'V'), (0xF4, 'V'), (0xF5, 'V'), (0xF6, 'V'), (0xF7, 'V'), (0xF8, 'V'), (0xF9, 'V'), (0xFA, 'V'), (0xFB, 'V'), (0xFC, 'V'), (0xFD, 'V'), (0xFE, 'V'), (0xFF, 'V'), (0x100, 'M', u'ā'), (0x101, 'V'), (0x102, 'M', u'ă'), (0x103, 'V'), (0x104, 'M', u'ą'), (0x105, 'V'), (0x106, 'M', u'ć'), (0x107, 'V'), (0x108, 'M', u'ĉ'), (0x109, 'V'), (0x10A, 'M', u'ċ'), (0x10B, 'V'), (0x10C, 'M', u'č'), (0x10D, 'V'), (0x10E, 'M', u'ď'), (0x10F, 'V'), (0x110, 'M', u'đ'), (0x111, 'V'), (0x112, 'M', u'ē'), (0x113, 'V'), (0x114, 'M', u'ĕ'), (0x115, 'V'), (0x116, 'M', u'ė'), (0x117, 'V'), (0x118, 'M', u'ę'), (0x119, 'V'), (0x11A, 'M', u'ě'), (0x11B, 'V'), (0x11C, 'M', u'ĝ'), (0x11D, 'V'), (0x11E, 'M', u'ğ'), (0x11F, 'V'), (0x120, 'M', u'ġ'), (0x121, 'V'), (0x122, 'M', u'ģ'), (0x123, 'V'), (0x124, 'M', u'ĥ'), (0x125, 'V'), (0x126, 'M', u'ħ'), (0x127, 'V'), (0x128, 'M', u'ĩ'), (0x129, 'V'), (0x12A, 'M', u'ī'), (0x12B, 'V'), ] def _seg_3(): return [ (0x12C, 'M', u'ĭ'), (0x12D, 'V'), (0x12E, 'M', u'į'), (0x12F, 'V'), (0x130, 'M', u'i̇'), (0x131, 'V'), (0x132, 'M', u'ij'), (0x134, 'M', u'ĵ'), (0x135, 'V'), (0x136, 'M', u'ķ'), (0x137, 'V'), (0x139, 'M', u'ĺ'), (0x13A, 'V'), (0x13B, 'M', u'ļ'), (0x13C, 'V'), (0x13D, 'M', u'ľ'), (0x13E, 'V'), (0x13F, 'M', u'l·'), (0x141, 'M', u'ł'), (0x142, 'V'), (0x143, 'M', u'ń'), (0x144, 'V'), (0x145, 'M', u'ņ'), (0x146, 'V'), (0x147, 'M', u'ň'), (0x148, 'V'), (0x149, 'M', u'ʼn'), (0x14A, 'M', u'ŋ'), (0x14B, 'V'), (0x14C, 'M', u'ō'), (0x14D, 'V'), (0x14E, 'M', u'ŏ'), (0x14F, 'V'), (0x150, 'M', u'ő'), (0x151, 'V'), (0x152, 'M', u'œ'), (0x153, 'V'), (0x154, 'M', u'ŕ'), (0x155, 'V'), (0x156, 'M', u'ŗ'), (0x157, 'V'), (0x158, 'M', u'ř'), (0x159, 'V'), (0x15A, 'M', u'ś'), (0x15B, 'V'), (0x15C, 'M', u'ŝ'), (0x15D, 'V'), (0x15E, 'M', u'ş'), (0x15F, 'V'), (0x160, 'M', u'š'), (0x161, 'V'), (0x162, 'M', u'ţ'), (0x163, 'V'), (0x164, 'M', u'ť'), (0x165, 'V'), (0x166, 'M', u'ŧ'), (0x167, 'V'), (0x168, 'M', u'ũ'), (0x169, 'V'), (0x16A, 'M', u'ū'), (0x16B, 'V'), (0x16C, 'M', u'ŭ'), (0x16D, 'V'), (0x16E, 'M', u'ů'), (0x16F, 'V'), (0x170, 'M', u'ű'), (0x171, 'V'), (0x172, 'M', u'ų'), (0x173, 'V'), (0x174, 'M', u'ŵ'), (0x175, 'V'), (0x176, 'M', u'ŷ'), (0x177, 'V'), (0x178, 'M', u'ÿ'), (0x179, 'M', u'ź'), (0x17A, 'V'), (0x17B, 'M', u'ż'), (0x17C, 'V'), (0x17D, 'M', u'ž'), (0x17E, 'V'), (0x17F, 'M', u's'), (0x180, 'V'), (0x181, 'M', u'ɓ'), (0x182, 'M', u'ƃ'), (0x183, 'V'), (0x184, 'M', u'ƅ'), (0x185, 'V'), (0x186, 'M', u'ɔ'), (0x187, 'M', u'ƈ'), (0x188, 'V'), (0x189, 'M', u'ɖ'), (0x18A, 'M', u'ɗ'), (0x18B, 'M', u'ƌ'), (0x18C, 'V'), (0x18E, 'M', u'ǝ'), (0x18F, 'M', u'ə'), (0x190, 'M', u'ɛ'), (0x191, 'M', u'ƒ'), (0x192, 'V'), (0x193, 'M', u'ɠ'), ] def _seg_4(): return [ (0x194, 'M', u'ɣ'), (0x195, 'V'), (0x196, 'M', u'ɩ'), (0x197, 'M', u'ɨ'), (0x198, 'M', u'ƙ'), (0x199, 'V'), (0x19C, 'M', u'ɯ'), (0x19D, 'M', u'ɲ'), (0x19E, 'V'), (0x19F, 'M', u'ɵ'), (0x1A0, 'M', u'ơ'), (0x1A1, 'V'), (0x1A2, 'M', u'ƣ'), (0x1A3, 'V'), (0x1A4, 'M', u'ƥ'), (0x1A5, 'V'), (0x1A6, 'M', u'ʀ'), (0x1A7, 'M', u'ƨ'), (0x1A8, 'V'), (0x1A9, 'M', u'ʃ'), (0x1AA, 'V'), (0x1AC, 'M', u'ƭ'), (0x1AD, 'V'), (0x1AE, 'M', u'ʈ'), (0x1AF, 'M', u'ư'), (0x1B0, 'V'), (0x1B1, 'M', u'ʊ'), (0x1B2, 'M', u'ʋ'), (0x1B3, 'M', u'ƴ'), (0x1B4, 'V'), (0x1B5, 'M', u'ƶ'), (0x1B6, 'V'), (0x1B7, 'M', u'ʒ'), (0x1B8, 'M', u'ƹ'), (0x1B9, 'V'), (0x1BC, 'M', u'ƽ'), (0x1BD, 'V'), (0x1C4, 'M', u'dž'), (0x1C7, 'M', u'lj'), (0x1CA, 'M', u'nj'), (0x1CD, 'M', u'ǎ'), (0x1CE, 'V'), (0x1CF, 'M', u'ǐ'), (0x1D0, 'V'), (0x1D1, 'M', u'ǒ'), (0x1D2, 'V'), (0x1D3, 'M', u'ǔ'), (0x1D4, 'V'), (0x1D5, 'M', u'ǖ'), (0x1D6, 'V'), (0x1D7, 'M', u'ǘ'), (0x1D8, 'V'), (0x1D9, 'M', u'ǚ'), (0x1DA, 'V'), (0x1DB, 'M', u'ǜ'), (0x1DC, 'V'), (0x1DE, 'M', u'ǟ'), (0x1DF, 'V'), (0x1E0, 'M', u'ǡ'), (0x1E1, 'V'), (0x1E2, 'M', u'ǣ'), (0x1E3, 'V'), (0x1E4, 'M', u'ǥ'), (0x1E5, 'V'), (0x1E6, 'M', u'ǧ'), (0x1E7, 'V'), (0x1E8, 'M', u'ǩ'), (0x1E9, 'V'), (0x1EA, 'M', u'ǫ'), (0x1EB, 'V'), (0x1EC, 'M', u'ǭ'), (0x1ED, 'V'), (0x1EE, 'M', u'ǯ'), (0x1EF, 'V'), (0x1F1, 'M', u'dz'), (0x1F4, 'M', u'ǵ'), (0x1F5, 'V'), (0x1F6, 'M', u'ƕ'), (0x1F7, 'M', u'ƿ'), (0x1F8, 'M', u'ǹ'), (0x1F9, 'V'), (0x1FA, 'M', u'ǻ'), (0x1FB, 'V'), (0x1FC, 'M', u'ǽ'), (0x1FD, 'V'), (0x1FE, 'M', u'ǿ'), (0x1FF, 'V'), (0x200, 'M', u'ȁ'), (0x201, 'V'), (0x202, 'M', u'ȃ'), (0x203, 'V'), (0x204, 'M', u'ȅ'), (0x205, 'V'), (0x206, 'M', u'ȇ'), (0x207, 'V'), (0x208, 'M', u'ȉ'), (0x209, 'V'), (0x20A, 'M', u'ȋ'), (0x20B, 'V'), (0x20C, 'M', u'ȍ'), ] def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', u'ȏ'), (0x20F, 'V'), (0x210, 'M', u'ȑ'), (0x211, 'V'), (0x212, 'M', u'ȓ'), (0x213, 'V'), (0x214, 'M', u'ȕ'), (0x215, 'V'), (0x216, 'M', u'ȗ'), (0x217, 'V'), (0x218, 'M', u'ș'), (0x219, 'V'), (0x21A, 'M', u'ț'), (0x21B, 'V'), (0x21C, 'M', u'ȝ'), (0x21D, 'V'), (0x21E, 'M', u'ȟ'), (0x21F, 'V'), (0x220, 'M', u'ƞ'), (0x221, 'V'), (0x222, 'M', u'ȣ'), (0x223, 'V'), (0x224, 'M', u'ȥ'), (0x225, 'V'), (0x226, 'M', u'ȧ'), (0x227, 'V'), (0x228, 'M', u'ȩ'), (0x229, 'V'), (0x22A, 'M', u'ȫ'), (0x22B, 'V'), (0x22C, 'M', u'ȭ'), (0x22D, 'V'), (0x22E, 'M', u'ȯ'), (0x22F, 'V'), (0x230, 'M', u'ȱ'), (0x231, 'V'), (0x232, 'M', u'ȳ'), (0x233, 'V'), (0x23A, 'M', u'ⱥ'), (0x23B, 'M', u'ȼ'), (0x23C, 'V'), (0x23D, 'M', u'ƚ'), (0x23E, 'M', u'ⱦ'), (0x23F, 'V'), (0x241, 'M', u'ɂ'), (0x242, 'V'), (0x243, 'M', u'ƀ'), (0x244, 'M', u'ʉ'), (0x245, 'M', u'ʌ'), (0x246, 'M', u'ɇ'), (0x247, 'V'), (0x248, 'M', u'ɉ'), (0x249, 'V'), (0x24A, 'M', u'ɋ'), (0x24B, 'V'), (0x24C, 'M', u'ɍ'), (0x24D, 'V'), (0x24E, 'M', u'ɏ'), (0x24F, 'V'), (0x2B0, 'M', u'h'), (0x2B1, 'M', u'ɦ'), (0x2B2, 'M', u'j'), (0x2B3, 'M', u'r'), (0x2B4, 'M', u'ɹ'), (0x2B5, 'M', u'ɻ'), (0x2B6, 'M', u'ʁ'), (0x2B7, 'M', u'w'), (0x2B8, 'M', u'y'), (0x2B9, 'V'), (0x2D8, '3', u' ̆'), (0x2D9, '3', u' ̇'), (0x2DA, '3', u' ̊'), (0x2DB, '3', u' ̨'), (0x2DC, '3', u' ̃'), (0x2DD, '3', u' ̋'), (0x2DE, 'V'), (0x2E0, 'M', u'ɣ'), (0x2E1, 'M', u'l'), (0x2E2, 'M', u's'), (0x2E3, 'M', u'x'), (0x2E4, 'M', u'ʕ'), (0x2E5, 'V'), (0x340, 'M', u'̀'), (0x341, 'M', u'́'), (0x342, 'V'), (0x343, 'M', u'̓'), (0x344, 'M', u'̈́'), (0x345, 'M', u'ι'), (0x346, 'V'), (0x34F, 'I'), (0x350, 'V'), (0x370, 'M', u'ͱ'), (0x371, 'V'), (0x372, 'M', u'ͳ'), (0x373, 'V'), (0x374, 'M', u'ʹ'), (0x375, 'V'), (0x376, 'M', u'ͷ'), (0x377, 'V'), ] def _seg_6(): return [ (0x378, 'X'), (0x37A, '3', u' ι'), (0x37B, 'V'), (0x37E, '3', u';'), (0x37F, 'X'), (0x384, '3', u' ́'), (0x385, '3', u' ̈́'), (0x386, 'M', u'ά'), (0x387, 'M', u'·'), (0x388, 'M', u'έ'), (0x389, 'M', u'ή'), (0x38A, 'M', u'ί'), (0x38B, 'X'), (0x38C, 'M', u'ό'), (0x38D, 'X'), (0x38E, 'M', u'ύ'), (0x38F, 'M', u'ώ'), (0x390, 'V'), (0x391, 'M', u'α'), (0x392, 'M', u'β'), (0x393, 'M', u'γ'), (0x394, 'M', u'δ'), (0x395, 'M', u'ε'), (0x396, 'M', u'ζ'), (0x397, 'M', u'η'), (0x398, 'M', u'θ'), (0x399, 'M', u'ι'), (0x39A, 'M', u'κ'), (0x39B, 'M', u'λ'), (0x39C, 'M', u'μ'), (0x39D, 'M', u'ν'), (0x39E, 'M', u'ξ'), (0x39F, 'M', u'ο'), (0x3A0, 'M', u'π'), (0x3A1, 'M', u'ρ'), (0x3A2, 'X'), (0x3A3, 'M', u'σ'), (0x3A4, 'M', u'τ'), (0x3A5, 'M', u'υ'), (0x3A6, 'M', u'φ'), (0x3A7, 'M', u'χ'), (0x3A8, 'M', u'ψ'), (0x3A9, 'M', u'ω'), (0x3AA, 'M', u'ϊ'), (0x3AB, 'M', u'ϋ'), (0x3AC, 'V'), (0x3C2, 'D', u'σ'), (0x3C3, 'V'), (0x3CF, 'M', u'ϗ'), (0x3D0, 'M', u'β'), (0x3D1, 'M', u'θ'), (0x3D2, 'M', u'υ'), (0x3D3, 'M', u'ύ'), (0x3D4, 'M', u'ϋ'), (0x3D5, 'M', u'φ'), (0x3D6, 'M', u'π'), (0x3D7, 'V'), (0x3D8, 'M', u'ϙ'), (0x3D9, 'V'), (0x3DA, 'M', u'ϛ'), (0x3DB, 'V'), (0x3DC, 'M', u'ϝ'), (0x3DD, 'V'), (0x3DE, 'M', u'ϟ'), (0x3DF, 'V'), (0x3E0, 'M', u'ϡ'), (0x3E1, 'V'), (0x3E2, 'M', u'ϣ'), (0x3E3, 'V'), (0x3E4, 'M', u'ϥ'), (0x3E5, 'V'), (0x3E6, 'M', u'ϧ'), (0x3E7, 'V'), (0x3E8, 'M', u'ϩ'), (0x3E9, 'V'), (0x3EA, 'M', u'ϫ'), (0x3EB, 'V'), (0x3EC, 'M', u'ϭ'), (0x3ED, 'V'), (0x3EE, 'M', u'ϯ'), (0x3EF, 'V'), (0x3F0, 'M', u'κ'), (0x3F1, 'M', u'ρ'), (0x3F2, 'M', u'σ'), (0x3F3, 'V'), (0x3F4, 'M', u'θ'), (0x3F5, 'M', u'ε'), (0x3F6, 'V'), (0x3F7, 'M', u'ϸ'), (0x3F8, 'V'), (0x3F9, 'M', u'σ'), (0x3FA, 'M', u'ϻ'), (0x3FB, 'V'), (0x3FD, 'M', u'ͻ'), (0x3FE, 'M', u'ͼ'), (0x3FF, 'M', u'ͽ'), (0x400, 'M', u'ѐ'), (0x401, 'M', u'ё'), (0x402, 'M', u'ђ'), (0x403, 'M', u'ѓ'), ] def _seg_7(): return [ (0x404, 'M', u'є'), (0x405, 'M', u'ѕ'), (0x406, 'M', u'і'), (0x407, 'M', u'ї'), (0x408, 'M', u'ј'), (0x409, 'M', u'љ'), (0x40A, 'M', u'њ'), (0x40B, 'M', u'ћ'), (0x40C, 'M', u'ќ'), (0x40D, 'M', u'ѝ'), (0x40E, 'M', u'ў'), (0x40F, 'M', u'џ'), (0x410, 'M', u'а'), (0x411, 'M', u'б'), (0x412, 'M', u'в'), (0x413, 'M', u'г'), (0x414, 'M', u'д'), (0x415, 'M', u'е'), (0x416, 'M', u'ж'), (0x417, 'M', u'з'), (0x418, 'M', u'и'), (0x419, 'M', u'й'), (0x41A, 'M', u'к'), (0x41B, 'M', u'л'), (0x41C, 'M', u'м'), (0x41D, 'M', u'н'), (0x41E, 'M', u'о'), (0x41F, 'M', u'п'), (0x420, 'M', u'р'), (0x421, 'M', u'с'), (0x422, 'M', u'т'), (0x423, 'M', u'у'), (0x424, 'M', u'ф'), (0x425, 'M', u'х'), (0x426, 'M', u'ц'), (0x427, 'M', u'ч'), (0x428, 'M', u'ш'), (0x429, 'M', u'щ'), (0x42A, 'M', u'ъ'), (0x42B, 'M', u'ы'), (0x42C, 'M', u'ь'), (0x42D, 'M', u'э'), (0x42E, 'M', u'ю'), (0x42F, 'M', u'я'), (0x430, 'V'), (0x460, 'M', u'ѡ'), (0x461, 'V'), (0x462, 'M', u'ѣ'), (0x463, 'V'), (0x464, 'M', u'ѥ'), (0x465, 'V'), (0x466, 'M', u'ѧ'), (0x467, 'V'), (0x468, 'M', u'ѩ'), (0x469, 'V'), (0x46A, 'M', u'ѫ'), (0x46B, 'V'), (0x46C, 'M', u'ѭ'), (0x46D, 'V'), (0x46E, 'M', u'ѯ'), (0x46F, 'V'), (0x470, 'M', u'ѱ'), (0x471, 'V'), (0x472, 'M', u'ѳ'), (0x473, 'V'), (0x474, 'M', u'ѵ'), (0x475, 'V'), (0x476, 'M', u'ѷ'), (0x477, 'V'), (0x478, 'M', u'ѹ'), (0x479, 'V'), (0x47A, 'M', u'ѻ'), (0x47B, 'V'), (0x47C, 'M', u'ѽ'), (0x47D, 'V'), (0x47E, 'M', u'ѿ'), (0x47F, 'V'), (0x480, 'M', u'ҁ'), (0x481, 'V'), (0x48A, 'M', u'ҋ'), (0x48B, 'V'), (0x48C, 'M', u'ҍ'), (0x48D, 'V'), (0x48E, 'M', u'ҏ'), (0x48F, 'V'), (0x490, 'M', u'ґ'), (0x491, 'V'), (0x492, 'M', u'ғ'), (0x493, 'V'), (0x494, 'M', u'ҕ'), (0x495, 'V'), (0x496, 'M', u'җ'), (0x497, 'V'), (0x498, 'M', u'ҙ'), (0x499, 'V'), (0x49A, 'M', u'қ'), (0x49B, 'V'), (0x49C, 'M', u'ҝ'), (0x49D, 'V'), (0x49E, 'M', u'ҟ'), ] def _seg_8(): return [ (0x49F, 'V'), (0x4A0, 'M', u'ҡ'), (0x4A1, 'V'), (0x4A2, 'M', u'ң'), (0x4A3, 'V'), (0x4A4, 'M', u'ҥ'), (0x4A5, 'V'), (0x4A6, 'M', u'ҧ'), (0x4A7, 'V'), (0x4A8, 'M', u'ҩ'), (0x4A9, 'V'), (0x4AA, 'M', u'ҫ'), (0x4AB, 'V'), (0x4AC, 'M', u'ҭ'), (0x4AD, 'V'), (0x4AE, 'M', u'ү'), (0x4AF, 'V'), (0x4B0, 'M', u'ұ'), (0x4B1, 'V'), (0x4B2, 'M', u'ҳ'), (0x4B3, 'V'), (0x4B4, 'M', u'ҵ'), (0x4B5, 'V'), (0x4B6, 'M', u'ҷ'), (0x4B7, 'V'), (0x4B8, 'M', u'ҹ'), (0x4B9, 'V'), (0x4BA, 'M', u'һ'), (0x4BB, 'V'), (0x4BC, 'M', u'ҽ'), (0x4BD, 'V'), (0x4BE, 'M', u'ҿ'), (0x4BF, 'V'), (0x4C0, 'X'), (0x4C1, 'M', u'ӂ'), (0x4C2, 'V'), (0x4C3, 'M', u'ӄ'), (0x4C4, 'V'), (0x4C5, 'M', u'ӆ'), (0x4C6, 'V'), (0x4C7, 'M', u'ӈ'), (0x4C8, 'V'), (0x4C9, 'M', u'ӊ'), (0x4CA, 'V'), (0x4CB, 'M', u'ӌ'), (0x4CC, 'V'), (0x4CD, 'M', u'ӎ'), (0x4CE, 'V'), (0x4D0, 'M', u'ӑ'), (0x4D1, 'V'), (0x4D2, 'M', u'ӓ'), (0x4D3, 'V'), (0x4D4, 'M', u'ӕ'), (0x4D5, 'V'), (0x4D6, 'M', u'ӗ'), (0x4D7, 'V'), (0x4D8, 'M', u'ә'), (0x4D9, 'V'), (0x4DA, 'M', u'ӛ'), (0x4DB, 'V'), (0x4DC, 'M', u'ӝ'), (0x4DD, 'V'), (0x4DE, 'M', u'ӟ'), (0x4DF, 'V'), (0x4E0, 'M', u'ӡ'), (0x4E1, 'V'), (0x4E2, 'M', u'ӣ'), (0x4E3, 'V'), (0x4E4, 'M', u'ӥ'), (0x4E5, 'V'), (0x4E6, 'M', u'ӧ'), (0x4E7, 'V'), (0x4E8, 'M', u'ө'), (0x4E9, 'V'), (0x4EA, 'M', u'ӫ'), (0x4EB, 'V'), (0x4EC, 'M', u'ӭ'), (0x4ED, 'V'), (0x4EE, 'M', u'ӯ'), (0x4EF, 'V'), (0x4F0, 'M', u'ӱ'), (0x4F1, 'V'), (0x4F2, 'M', u'ӳ'), (0x4F3, 'V'), (0x4F4, 'M', u'ӵ'), (0x4F5, 'V'), (0x4F6, 'M', u'ӷ'), (0x4F7, 'V'), (0x4F8, 'M', u'ӹ'), (0x4F9, 'V'), (0x4FA, 'M', u'ӻ'), (0x4FB, 'V'), (0x4FC, 'M', u'ӽ'), (0x4FD, 'V'), (0x4FE, 'M', u'ӿ'), (0x4FF, 'V'), (0x500, 'M', u'ԁ'), (0x501, 'V'), (0x502, 'M', u'ԃ'), (0x503, 'V'), ] def _seg_9(): return [ (0x504, 'M', u'ԅ'), (0x505, 'V'), (0x506, 'M', u'ԇ'), (0x507, 'V'), (0x508, 'M', u'ԉ'), (0x509, 'V'), (0x50A, 'M', u'ԋ'), (0x50B, 'V'), (0x50C, 'M', u'ԍ'), (0x50D, 'V'), (0x50E, 'M', u'ԏ'), (0x50F, 'V'), (0x510, 'M', u'ԑ'), (0x511, 'V'), (0x512, 'M', u'ԓ'), (0x513, 'V'), (0x514, 'M', u'ԕ'), (0x515, 'V'), (0x516, 'M', u'ԗ'), (0x517, 'V'), (0x518, 'M', u'ԙ'), (0x519, 'V'), (0x51A, 'M', u'ԛ'), (0x51B, 'V'), (0x51C, 'M', u'ԝ'), (0x51D, 'V'), (0x51E, 'M', u'ԟ'), (0x51F, 'V'), (0x520, 'M', u'ԡ'), (0x521, 'V'), (0x522, 'M', u'ԣ'), (0x523, 'V'), (0x524, 'M', u'ԥ'), (0x525, 'V'), (0x526, 'M', u'ԧ'), (0x527, 'V'), (0x528, 'X'), (0x531, 'M', u'ա'), (0x532, 'M', u'բ'), (0x533, 'M', u'գ'), (0x534, 'M', u'դ'), (0x535, 'M', u'ե'), (0x536, 'M', u'զ'), (0x537, 'M', u'է'), (0x538, 'M', u'ը'), (0x539, 'M', u'թ'), (0x53A, 'M', u'ժ'), (0x53B, 'M', u'ի'), (0x53C, 'M', u'լ'), (0x53D, 'M', u'խ'), (0x53E, 'M', u'ծ'), (0x53F, 'M', u'կ'), (0x540, 'M', u'հ'), (0x541, 'M', u'ձ'), (0x542, 'M', u'ղ'), (0x543, 'M', u'ճ'), (0x544, 'M', u'մ'), (0x545, 'M', u'յ'), (0x546, 'M', u'ն'), (0x547, 'M', u'շ'), (0x548, 'M', u'ո'), (0x549, 'M', u'չ'), (0x54A, 'M', u'պ'), (0x54B, 'M', u'ջ'), (0x54C, 'M', u'ռ'), (0x54D, 'M', u'ս'), (0x54E, 'M', u'վ'), (0x54F, 'M', u'տ'), (0x550, 'M', u'ր'), (0x551, 'M', u'ց'), (0x552, 'M', u'ւ'), (0x553, 'M', u'փ'), (0x554, 'M', u'ք'), (0x555, 'M', u'օ'), (0x556, 'M', u'ֆ'), (0x557, 'X'), (0x559, 'V'), (0x560, 'X'), (0x561, 'V'), (0x587, 'M', u'եւ'), (0x588, 'X'), (0x589, 'V'), (0x58B, 'X'), (0x58F, 'V'), (0x590, 'X'), (0x591, 'V'), (0x5C8, 'X'), (0x5D0, 'V'), (0x5EB, 'X'), (0x5F0, 'V'), (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), (0x61E, 'V'), (0x675, 'M', u'اٴ'), (0x676, 'M', u'وٴ'), (0x677, 'M', u'ۇٴ'), (0x678, 'M', u'يٴ'), (0x679, 'V'), (0x6DD, 'X'), ] def _seg_10(): return [ (0x6DE, 'V'), (0x70E, 'X'), (0x710, 'V'), (0x74B, 'X'), (0x74D, 'V'), (0x7B2, 'X'), (0x7C0, 'V'), (0x7FB, 'X'), (0x800, 'V'), (0x82E, 'X'), (0x830, 'V'), (0x83F, 'X'), (0x840, 'V'), (0x85C, 'X'), (0x85E, 'V'), (0x85F, 'X'), (0x8A0, 'V'), (0x8A1, 'X'), (0x8A2, 'V'), (0x8AD, 'X'), (0x8E4, 'V'), (0x8FF, 'X'), (0x900, 'V'), (0x958, 'M', u'क़'), (0x959, 'M', u'ख़'), (0x95A, 'M', u'ग़'), (0x95B, 'M', u'ज़'), (0x95C, 'M', u'ड़'), (0x95D, 'M', u'ढ़'), (0x95E, 'M', u'फ़'), (0x95F, 'M', u'य़'), (0x960, 'V'), (0x978, 'X'), (0x979, 'V'), (0x980, 'X'), (0x981, 'V'), (0x984, 'X'), (0x985, 'V'), (0x98D, 'X'), (0x98F, 'V'), (0x991, 'X'), (0x993, 'V'), (0x9A9, 'X'), (0x9AA, 'V'), (0x9B1, 'X'), (0x9B2, 'V'), (0x9B3, 'X'), (0x9B6, 'V'), (0x9BA, 'X'), (0x9BC, 'V'), (0x9C5, 'X'), (0x9C7, 'V'), (0x9C9, 'X'), (0x9CB, 'V'), (0x9CF, 'X'), (0x9D7, 'V'), (0x9D8, 'X'), (0x9DC, 'M', u'ড়'), (0x9DD, 'M', u'ঢ়'), (0x9DE, 'X'), (0x9DF, 'M', u'য়'), (0x9E0, 'V'), (0x9E4, 'X'), (0x9E6, 'V'), (0x9FC, 'X'), (0xA01, 'V'), (0xA04, 'X'), (0xA05, 'V'), (0xA0B, 'X'), (0xA0F, 'V'), (0xA11, 'X'), (0xA13, 'V'), (0xA29, 'X'), (0xA2A, 'V'), (0xA31, 'X'), (0xA32, 'V'), (0xA33, 'M', u'ਲ਼'), (0xA34, 'X'), (0xA35, 'V'), (0xA36, 'M', u'ਸ਼'), (0xA37, 'X'), (0xA38, 'V'), (0xA3A, 'X'), (0xA3C, 'V'), (0xA3D, 'X'), (0xA3E, 'V'), (0xA43, 'X'), (0xA47, 'V'), (0xA49, 'X'), (0xA4B, 'V'), (0xA4E, 'X'), (0xA51, 'V'), (0xA52, 'X'), (0xA59, 'M', u'ਖ਼'), (0xA5A, 'M', u'ਗ਼'), (0xA5B, 'M', u'ਜ਼'), (0xA5C, 'V'), (0xA5D, 'X'), (0xA5E, 'M', u'ਫ਼'), (0xA5F, 'X'), ] def _seg_11(): return [ (0xA66, 'V'), (0xA76, 'X'), (0xA81, 'V'), (0xA84, 'X'), (0xA85, 'V'), (0xA8E, 'X'), (0xA8F, 'V'), (0xA92, 'X'), (0xA93, 'V'), (0xAA9, 'X'), (0xAAA, 'V'), (0xAB1, 'X'), (0xAB2, 'V'), (0xAB4, 'X'), (0xAB5, 'V'), (0xABA, 'X'), (0xABC, 'V'), (0xAC6, 'X'), (0xAC7, 'V'), (0xACA, 'X'), (0xACB, 'V'), (0xACE, 'X'), (0xAD0, 'V'), (0xAD1, 'X'), (0xAE0, 'V'), (0xAE4, 'X'), (0xAE6, 'V'), (0xAF2, 'X'), (0xB01, 'V'), (0xB04, 'X'), (0xB05, 'V'), (0xB0D, 'X'), (0xB0F, 'V'), (0xB11, 'X'), (0xB13, 'V'), (0xB29, 'X'), (0xB2A, 'V'), (0xB31, 'X'), (0xB32, 'V'), (0xB34, 'X'), (0xB35, 'V'), (0xB3A, 'X'), (0xB3C, 'V'), (0xB45, 'X'), (0xB47, 'V'), (0xB49, 'X'), (0xB4B, 'V'), (0xB4E, 'X'), (0xB56, 'V'), (0xB58, 'X'), (0xB5C, 'M', u'ଡ଼'), (0xB5D, 'M', u'ଢ଼'), (0xB5E, 'X'), (0xB5F, 'V'), (0xB64, 'X'), (0xB66, 'V'), (0xB78, 'X'), (0xB82, 'V'), (0xB84, 'X'), (0xB85, 'V'), (0xB8B, 'X'), (0xB8E, 'V'), (0xB91, 'X'), (0xB92, 'V'), (0xB96, 'X'), (0xB99, 'V'), (0xB9B, 'X'), (0xB9C, 'V'), (0xB9D, 'X'), (0xB9E, 'V'), (0xBA0, 'X'), (0xBA3, 'V'), (0xBA5, 'X'), (0xBA8, 'V'), (0xBAB, 'X'), (0xBAE, 'V'), (0xBBA, 'X'), (0xBBE, 'V'), (0xBC3, 'X'), (0xBC6, 'V'), (0xBC9, 'X'), (0xBCA, 'V'), (0xBCE, 'X'), (0xBD0, 'V'), (0xBD1, 'X'), (0xBD7, 'V'), (0xBD8, 'X'), (0xBE6, 'V'), (0xBFB, 'X'), (0xC01, 'V'), (0xC04, 'X'), (0xC05, 'V'), (0xC0D, 'X'), (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), (0xC29, 'X'), (0xC2A, 'V'), (0xC34, 'X'), (0xC35, 'V'), ] def _seg_12(): return [ (0xC3A, 'X'), (0xC3D, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), (0xC4A, 'V'), (0xC4E, 'X'), (0xC55, 'V'), (0xC57, 'X'), (0xC58, 'V'), (0xC5A, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), (0xC70, 'X'), (0xC78, 'V'), (0xC80, 'X'), (0xC82, 'V'), (0xC84, 'X'), (0xC85, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), (0xC92, 'V'), (0xCA9, 'X'), (0xCAA, 'V'), (0xCB4, 'X'), (0xCB5, 'V'), (0xCBA, 'X'), (0xCBC, 'V'), (0xCC5, 'X'), (0xCC6, 'V'), (0xCC9, 'X'), (0xCCA, 'V'), (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), (0xCDE, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), (0xCF3, 'X'), (0xD02, 'V'), (0xD04, 'X'), (0xD05, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), (0xD11, 'X'), (0xD12, 'V'), (0xD3B, 'X'), (0xD3D, 'V'), (0xD45, 'X'), (0xD46, 'V'), (0xD49, 'X'), (0xD4A, 'V'), (0xD4F, 'X'), (0xD57, 'V'), (0xD58, 'X'), (0xD60, 'V'), (0xD64, 'X'), (0xD66, 'V'), (0xD76, 'X'), (0xD79, 'V'), (0xD80, 'X'), (0xD82, 'V'), (0xD84, 'X'), (0xD85, 'V'), (0xD97, 'X'), (0xD9A, 'V'), (0xDB2, 'X'), (0xDB3, 'V'), (0xDBC, 'X'), (0xDBD, 'V'), (0xDBE, 'X'), (0xDC0, 'V'), (0xDC7, 'X'), (0xDCA, 'V'), (0xDCB, 'X'), (0xDCF, 'V'), (0xDD5, 'X'), (0xDD6, 'V'), (0xDD7, 'X'), (0xDD8, 'V'), (0xDE0, 'X'), (0xDF2, 'V'), (0xDF5, 'X'), (0xE01, 'V'), (0xE33, 'M', u'ํา'), (0xE34, 'V'), (0xE3B, 'X'), (0xE3F, 'V'), (0xE5C, 'X'), (0xE81, 'V'), (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), (0xE87, 'V'), ] def _seg_13(): return [ (0xE89, 'X'), (0xE8A, 'V'), (0xE8B, 'X'), (0xE8D, 'V'), (0xE8E, 'X'), (0xE94, 'V'), (0xE98, 'X'), (0xE99, 'V'), (0xEA0, 'X'), (0xEA1, 'V'), (0xEA4, 'X'), (0xEA5, 'V'), (0xEA6, 'X'), (0xEA7, 'V'), (0xEA8, 'X'), (0xEAA, 'V'), (0xEAC, 'X'), (0xEAD, 'V'), (0xEB3, 'M', u'ໍາ'), (0xEB4, 'V'), (0xEBA, 'X'), (0xEBB, 'V'), (0xEBE, 'X'), (0xEC0, 'V'), (0xEC5, 'X'), (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), (0xECE, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', u'ຫນ'), (0xEDD, 'M', u'ຫມ'), (0xEDE, 'V'), (0xEE0, 'X'), (0xF00, 'V'), (0xF0C, 'M', u'་'), (0xF0D, 'V'), (0xF43, 'M', u'གྷ'), (0xF44, 'V'), (0xF48, 'X'), (0xF49, 'V'), (0xF4D, 'M', u'ཌྷ'), (0xF4E, 'V'), (0xF52, 'M', u'དྷ'), (0xF53, 'V'), (0xF57, 'M', u'བྷ'), (0xF58, 'V'), (0xF5C, 'M', u'ཛྷ'), (0xF5D, 'V'), (0xF69, 'M', u'ཀྵ'), (0xF6A, 'V'), (0xF6D, 'X'), (0xF71, 'V'), (0xF73, 'M', u'ཱི'), (0xF74, 'V'), (0xF75, 'M', u'ཱུ'), (0xF76, 'M', u'ྲྀ'), (0xF77, 'M', u'ྲཱྀ'), (0xF78, 'M', u'ླྀ'), (0xF79, 'M', u'ླཱྀ'), (0xF7A, 'V'), (0xF81, 'M', u'ཱྀ'), (0xF82, 'V'), (0xF93, 'M', u'ྒྷ'), (0xF94, 'V'), (0xF98, 'X'), (0xF99, 'V'), (0xF9D, 'M', u'ྜྷ'), (0xF9E, 'V'), (0xFA2, 'M', u'ྡྷ'), (0xFA3, 'V'), (0xFA7, 'M', u'ྦྷ'), (0xFA8, 'V'), (0xFAC, 'M', u'ྫྷ'), (0xFAD, 'V'), (0xFB9, 'M', u'ྐྵ'), (0xFBA, 'V'), (0xFBD, 'X'), (0xFBE, 'V'), (0xFCD, 'X'), (0xFCE, 'V'), (0xFDB, 'X'), (0x1000, 'V'), (0x10A0, 'X'), (0x10C7, 'M', u'ⴧ'), (0x10C8, 'X'), (0x10CD, 'M', u'ⴭ'), (0x10CE, 'X'), (0x10D0, 'V'), (0x10FC, 'M', u'ნ'), (0x10FD, 'V'), (0x115F, 'X'), (0x1161, 'V'), (0x1249, 'X'), (0x124A, 'V'), (0x124E, 'X'), (0x1250, 'V'), (0x1257, 'X'), (0x1258, 'V'), ] def _seg_14(): return [ (0x1259, 'X'), (0x125A, 'V'), (0x125E, 'X'), (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), (0x12B2, 'V'), (0x12B6, 'X'), (0x12B8, 'V'), (0x12BF, 'X'), (0x12C0, 'V'), (0x12C1, 'X'), (0x12C2, 'V'), (0x12C6, 'X'), (0x12C8, 'V'), (0x12D7, 'X'), (0x12D8, 'V'), (0x1311, 'X'), (0x1312, 'V'), (0x1316, 'X'), (0x1318, 'V'), (0x135B, 'X'), (0x135D, 'V'), (0x137D, 'X'), (0x1380, 'V'), (0x139A, 'X'), (0x13A0, 'V'), (0x13F5, 'X'), (0x1400, 'V'), (0x1680, 'X'), (0x1681, 'V'), (0x169D, 'X'), (0x16A0, 'V'), (0x16F1, 'X'), (0x1700, 'V'), (0x170D, 'X'), (0x170E, 'V'), (0x1715, 'X'), (0x1720, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), (0x1760, 'V'), (0x176D, 'X'), (0x176E, 'V'), (0x1771, 'X'), (0x1772, 'V'), (0x1774, 'X'), (0x1780, 'V'), (0x17B4, 'X'), (0x17B6, 'V'), (0x17DE, 'X'), (0x17E0, 'V'), (0x17EA, 'X'), (0x17F0, 'V'), (0x17FA, 'X'), (0x1800, 'V'), (0x1806, 'X'), (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), (0x1878, 'X'), (0x1880, 'V'), (0x18AB, 'X'), (0x18B0, 'V'), (0x18F6, 'X'), (0x1900, 'V'), (0x191D, 'X'), (0x1920, 'V'), (0x192C, 'X'), (0x1930, 'V'), (0x193C, 'X'), (0x1940, 'V'), (0x1941, 'X'), (0x1944, 'V'), (0x196E, 'X'), (0x1970, 'V'), (0x1975, 'X'), (0x1980, 'V'), (0x19AC, 'X'), (0x19B0, 'V'), (0x19CA, 'X'), (0x19D0, 'V'), (0x19DB, 'X'), (0x19DE, 'V'), (0x1A1C, 'X'), (0x1A1E, 'V'), (0x1A5F, 'X'), (0x1A60, 'V'), (0x1A7D, 'X'), (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), (0x1A9A, 'X'), ] def _seg_15(): return [ (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1B00, 'V'), (0x1B4C, 'X'), (0x1B50, 'V'), (0x1B7D, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), (0x1C38, 'X'), (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), (0x1C80, 'X'), (0x1CC0, 'V'), (0x1CC8, 'X'), (0x1CD0, 'V'), (0x1CF7, 'X'), (0x1D00, 'V'), (0x1D2C, 'M', u'a'), (0x1D2D, 'M', u'æ'), (0x1D2E, 'M', u'b'), (0x1D2F, 'V'), (0x1D30, 'M', u'd'), (0x1D31, 'M', u'e'), (0x1D32, 'M', u'ǝ'), (0x1D33, 'M', u'g'), (0x1D34, 'M', u'h'), (0x1D35, 'M', u'i'), (0x1D36, 'M', u'j'), (0x1D37, 'M', u'k'), (0x1D38, 'M', u'l'), (0x1D39, 'M', u'm'), (0x1D3A, 'M', u'n'), (0x1D3B, 'V'), (0x1D3C, 'M', u'o'), (0x1D3D, 'M', u'ȣ'), (0x1D3E, 'M', u'p'), (0x1D3F, 'M', u'r'), (0x1D40, 'M', u't'), (0x1D41, 'M', u'u'), (0x1D42, 'M', u'w'), (0x1D43, 'M', u'a'), (0x1D44, 'M', u'ɐ'), (0x1D45, 'M', u'ɑ'), (0x1D46, 'M', u'ᴂ'), (0x1D47, 'M', u'b'), (0x1D48, 'M', u'd'), (0x1D49, 'M', u'e'), (0x1D4A, 'M', u'ə'), (0x1D4B, 'M', u'ɛ'), (0x1D4C, 'M', u'ɜ'), (0x1D4D, 'M', u'g'), (0x1D4E, 'V'), (0x1D4F, 'M', u'k'), (0x1D50, 'M', u'm'), (0x1D51, 'M', u'ŋ'), (0x1D52, 'M', u'o'), (0x1D53, 'M', u'ɔ'), (0x1D54, 'M', u'ᴖ'), (0x1D55, 'M', u'ᴗ'), (0x1D56, 'M', u'p'), (0x1D57, 'M', u't'), (0x1D58, 'M', u'u'), (0x1D59, 'M', u'ᴝ'), (0x1D5A, 'M', u'ɯ'), (0x1D5B, 'M', u'v'), (0x1D5C, 'M', u'ᴥ'), (0x1D5D, 'M', u'β'), (0x1D5E, 'M', u'γ'), (0x1D5F, 'M', u'δ'), (0x1D60, 'M', u'φ'), (0x1D61, 'M', u'χ'), (0x1D62, 'M', u'i'), (0x1D63, 'M', u'r'), (0x1D64, 'M', u'u'), (0x1D65, 'M', u'v'), (0x1D66, 'M', u'β'), (0x1D67, 'M', u'γ'), (0x1D68, 'M', u'ρ'), (0x1D69, 'M', u'φ'), (0x1D6A, 'M', u'χ'), (0x1D6B, 'V'), (0x1D78, 'M', u'н'), (0x1D79, 'V'), (0x1D9B, 'M', u'ɒ'), (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), (0x1DA2, 'M', u'ɡ'), (0x1DA3, 'M', u'ɥ'), (0x1DA4, 'M', u'ɨ'), (0x1DA5, 'M', u'ɩ'), (0x1DA6, 'M', u'ɪ'), (0x1DA7, 'M', u'ᵻ'), (0x1DA8, 'M', u'ʝ'), (0x1DA9, 'M', u'ɭ'), ] def _seg_16(): return [ (0x1DAA, 'M', u'ᶅ'), (0x1DAB, 'M', u'ʟ'), (0x1DAC, 'M', u'ɱ'), (0x1DAD, 'M', u'ɰ'), (0x1DAE, 'M', u'ɲ'), (0x1DAF, 'M', u'ɳ'), (0x1DB0, 'M', u'ɴ'), (0x1DB1, 'M', u'ɵ'), (0x1DB2, 'M', u'ɸ'), (0x1DB3, 'M', u'ʂ'), (0x1DB4, 'M', u'ʃ'), (0x1DB5, 'M', u'ƫ'), (0x1DB6, 'M', u'ʉ'), (0x1DB7, 'M', u'ʊ'), (0x1DB8, 'M', u'ᴜ'), (0x1DB9, 'M', u'ʋ'), (0x1DBA, 'M', u'ʌ'), (0x1DBB, 'M', u'z'), (0x1DBC, 'M', u'ʐ'), (0x1DBD, 'M', u'ʑ'), (0x1DBE, 'M', u'ʒ'), (0x1DBF, 'M', u'θ'), (0x1DC0, 'V'), (0x1DE7, 'X'), (0x1DFC, 'V'), (0x1E00, 'M', u'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', u'ḃ'), (0x1E03, 'V'), (0x1E04, 'M', u'ḅ'), (0x1E05, 'V'), (0x1E06, 'M', u'ḇ'), (0x1E07, 'V'), (0x1E08, 'M', u'ḉ'), (0x1E09, 'V'), (0x1E0A, 'M', u'ḋ'), (0x1E0B, 'V'), (0x1E0C, 'M', u'ḍ'), (0x1E0D, 'V'), (0x1E0E, 'M', u'ḏ'), (0x1E0F, 'V'), (0x1E10, 'M', u'ḑ'), (0x1E11, 'V'), (0x1E12, 'M', u'ḓ'), (0x1E13, 'V'), (0x1E14, 'M', u'ḕ'), (0x1E15, 'V'), (0x1E16, 'M', u'ḗ'), (0x1E17, 'V'), (0x1E18, 'M', u'ḙ'), (0x1E19, 'V'), (0x1E1A, 'M', u'ḛ'), (0x1E1B, 'V'), (0x1E1C, 'M', u'ḝ'), (0x1E1D, 'V'), (0x1E1E, 'M', u'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', u'ḡ'), (0x1E21, 'V'), (0x1E22, 'M', u'ḣ'), (0x1E23, 'V'), (0x1E24, 'M', u'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', u'ḧ'), (0x1E27, 'V'), (0x1E28, 'M', u'ḩ'), (0x1E29, 'V'), (0x1E2A, 'M', u'ḫ'), (0x1E2B, 'V'), (0x1E2C, 'M', u'ḭ'), (0x1E2D, 'V'), (0x1E2E, 'M', u'ḯ'), (0x1E2F, 'V'), (0x1E30, 'M', u'ḱ'), (0x1E31, 'V'), (0x1E32, 'M', u'ḳ'), (0x1E33, 'V'), (0x1E34, 'M', u'ḵ'), (0x1E35, 'V'), (0x1E36, 'M', u'ḷ'), (0x1E37, 'V'), (0x1E38, 'M', u'ḹ'), (0x1E39, 'V'), (0x1E3A, 'M', u'ḻ'), (0x1E3B, 'V'), (0x1E3C, 'M', u'ḽ'), (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), (0x1E43, 'V'), (0x1E44, 'M', u'ṅ'), (0x1E45, 'V'), (0x1E46, 'M', u'ṇ'), (0x1E47, 'V'), (0x1E48, 'M', u'ṉ'), (0x1E49, 'V'), (0x1E4A, 'M', u'ṋ'), ] def _seg_17(): return [ (0x1E4B, 'V'), (0x1E4C, 'M', u'ṍ'), (0x1E4D, 'V'), (0x1E4E, 'M', u'ṏ'), (0x1E4F, 'V'), (0x1E50, 'M', u'ṑ'), (0x1E51, 'V'), (0x1E52, 'M', u'ṓ'), (0x1E53, 'V'), (0x1E54, 'M', u'ṕ'), (0x1E55, 'V'), (0x1E56, 'M', u'ṗ'), (0x1E57, 'V'), (0x1E58, 'M', u'ṙ'), (0x1E59, 'V'), (0x1E5A, 'M', u'ṛ'), (0x1E5B, 'V'), (0x1E5C, 'M', u'ṝ'), (0x1E5D, 'V'), (0x1E5E, 'M', u'ṟ'), (0x1E5F, 'V'), (0x1E60, 'M', u'ṡ'), (0x1E61, 'V'), (0x1E62, 'M', u'ṣ'), (0x1E63, 'V'), (0x1E64, 'M', u'ṥ'), (0x1E65, 'V'), (0x1E66, 'M', u'ṧ'), (0x1E67, 'V'), (0x1E68, 'M', u'ṩ'), (0x1E69, 'V'), (0x1E6A, 'M', u'ṫ'), (0x1E6B, 'V'), (0x1E6C, 'M', u'ṭ'), (0x1E6D, 'V'), (0x1E6E, 'M', u'ṯ'), (0x1E6F, 'V'), (0x1E70, 'M', u'ṱ'), (0x1E71, 'V'), (0x1E72, 'M', u'ṳ'), (0x1E73, 'V'), (0x1E74, 'M', u'ṵ'), (0x1E75, 'V'), (0x1E76, 'M', u'ṷ'), (0x1E77, 'V'), (0x1E78, 'M', u'ṹ'), (0x1E79, 'V'), (0x1E7A, 'M', u'ṻ'), (0x1E7B, 'V'), (0x1E7C, 'M', u'ṽ'), (0x1E7D, 'V'), (0x1E7E, 'M', u'ṿ'), (0x1E7F, 'V'), (0x1E80, 'M', u'ẁ'), (0x1E81, 'V'), (0x1E82, 'M', u'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', u'ẅ'), (0x1E85, 'V'), (0x1E86, 'M', u'ẇ'), (0x1E87, 'V'), (0x1E88, 'M', u'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', u'ẋ'), (0x1E8B, 'V'), (0x1E8C, 'M', u'ẍ'), (0x1E8D, 'V'), (0x1E8E, 'M', u'ẏ'), (0x1E8F, 'V'), (0x1E90, 'M', u'ẑ'), (0x1E91, 'V'), (0x1E92, 'M', u'ẓ'), (0x1E93, 'V'), (0x1E94, 'M', u'ẕ'), (0x1E95, 'V'), (0x1E9A, 'M', u'aʾ'), (0x1E9B, 'M', u'ṡ'), (0x1E9C, 'V'), (0x1E9E, 'M', u'ss'), (0x1E9F, 'V'), (0x1EA0, 'M', u'ạ'), (0x1EA1, 'V'), (0x1EA2, 'M', u'ả'), (0x1EA3, 'V'), (0x1EA4, 'M', u'ấ'), (0x1EA5, 'V'), (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), (0x1EAC, 'M', u'ậ'), (0x1EAD, 'V'), (0x1EAE, 'M', u'ắ'), (0x1EAF, 'V'), (0x1EB0, 'M', u'ằ'), (0x1EB1, 'V'), (0x1EB2, 'M', u'ẳ'), (0x1EB3, 'V'), ] def _seg_18(): return [ (0x1EB4, 'M', u'ẵ'), (0x1EB5, 'V'), (0x1EB6, 'M', u'ặ'), (0x1EB7, 'V'), (0x1EB8, 'M', u'ẹ'), (0x1EB9, 'V'), (0x1EBA, 'M', u'ẻ'), (0x1EBB, 'V'), (0x1EBC, 'M', u'ẽ'), (0x1EBD, 'V'), (0x1EBE, 'M', u'ế'), (0x1EBF, 'V'), (0x1EC0, 'M', u'ề'), (0x1EC1, 'V'), (0x1EC2, 'M', u'ể'), (0x1EC3, 'V'), (0x1EC4, 'M', u'ễ'), (0x1EC5, 'V'), (0x1EC6, 'M', u'ệ'), (0x1EC7, 'V'), (0x1EC8, 'M', u'ỉ'), (0x1EC9, 'V'), (0x1ECA, 'M', u'ị'), (0x1ECB, 'V'), (0x1ECC, 'M', u'ọ'), (0x1ECD, 'V'), (0x1ECE, 'M', u'ỏ'), (0x1ECF, 'V'), (0x1ED0, 'M', u'ố'), (0x1ED1, 'V'), (0x1ED2, 'M', u'ồ'), (0x1ED3, 'V'), (0x1ED4, 'M', u'ổ'), (0x1ED5, 'V'), (0x1ED6, 'M', u'ỗ'), (0x1ED7, 'V'), (0x1ED8, 'M', u'ộ'), (0x1ED9, 'V'), (0x1EDA, 'M', u'ớ'), (0x1EDB, 'V'), (0x1EDC, 'M', u'ờ'), (0x1EDD, 'V'), (0x1EDE, 'M', u'ở'), (0x1EDF, 'V'), (0x1EE0, 'M', u'ỡ'), (0x1EE1, 'V'), (0x1EE2, 'M', u'ợ'), (0x1EE3, 'V'), (0x1EE4, 'M', u'ụ'), (0x1EE5, 'V'), (0x1EE6, 'M', u'ủ'), (0x1EE7, 'V'), (0x1EE8, 'M', u'ứ'), (0x1EE9, 'V'), (0x1EEA, 'M', u'ừ'), (0x1EEB, 'V'), (0x1EEC, 'M', u'ử'), (0x1EED, 'V'), (0x1EEE, 'M', u'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', u'ự'), (0x1EF1, 'V'), (0x1EF2, 'M', u'ỳ'), (0x1EF3, 'V'), (0x1EF4, 'M', u'ỵ'), (0x1EF5, 'V'), (0x1EF6, 'M', u'ỷ'), (0x1EF7, 'V'), (0x1EF8, 'M', u'ỹ'), (0x1EF9, 'V'), (0x1EFA, 'M', u'ỻ'), (0x1EFB, 'V'), (0x1EFC, 'M', u'ỽ'), (0x1EFD, 'V'), (0x1EFE, 'M', u'ỿ'), (0x1EFF, 'V'), (0x1F08, 'M', u'ἀ'), (0x1F09, 'M', u'ἁ'), (0x1F0A, 'M', u'ἂ'), (0x1F0B, 'M', u'ἃ'), (0x1F0C, 'M', u'ἄ'), (0x1F0D, 'M', u'ἅ'), (0x1F0E, 'M', u'ἆ'), (0x1F0F, 'M', u'ἇ'), (0x1F10, 'V'), (0x1F16, 'X'), (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), (0x1F1E, 'X'), (0x1F20, 'V'), (0x1F28, 'M', u'ἠ'), (0x1F29, 'M', u'ἡ'), (0x1F2A, 'M', u'ἢ'), (0x1F2B, 'M', u'ἣ'), (0x1F2C, 'M', u'ἤ'), (0x1F2D, 'M', u'ἥ'), ] def _seg_19(): return [ (0x1F2E, 'M', u'ἦ'), (0x1F2F, 'M', u'ἧ'), (0x1F30, 'V'), (0x1F38, 'M', u'ἰ'), (0x1F39, 'M', u'ἱ'), (0x1F3A, 'M', u'ἲ'), (0x1F3B, 'M', u'ἳ'), (0x1F3C, 'M', u'ἴ'), (0x1F3D, 'M', u'ἵ'), (0x1F3E, 'M', u'ἶ'), (0x1F3F, 'M', u'ἷ'), (0x1F40, 'V'), (0x1F46, 'X'), (0x1F48, 'M', u'ὀ'), (0x1F49, 'M', u'ὁ'), (0x1F4A, 'M', u'ὂ'), (0x1F4B, 'M', u'ὃ'), (0x1F4C, 'M', u'ὄ'), (0x1F4D, 'M', u'ὅ'), (0x1F4E, 'X'), (0x1F50, 'V'), (0x1F58, 'X'), (0x1F59, 'M', u'ὑ'), (0x1F5A, 'X'), (0x1F5B, 'M', u'ὓ'), (0x1F5C, 'X'), (0x1F5D, 'M', u'ὕ'), (0x1F5E, 'X'), (0x1F5F, 'M', u'ὗ'), (0x1F60, 'V'), (0x1F68, 'M', u'ὠ'), (0x1F69, 'M', u'ὡ'), (0x1F6A, 'M', u'ὢ'), (0x1F6B, 'M', u'ὣ'), (0x1F6C, 'M', u'ὤ'), (0x1F6D, 'M', u'ὥ'), (0x1F6E, 'M', u'ὦ'), (0x1F6F, 'M', u'ὧ'), (0x1F70, 'V'), (0x1F71, 'M', u'ά'), (0x1F72, 'V'), (0x1F73, 'M', u'έ'), (0x1F74, 'V'), (0x1F75, 'M', u'ή'), (0x1F76, 'V'), (0x1F77, 'M', u'ί'), (0x1F78, 'V'), (0x1F79, 'M', u'ό'), (0x1F7A, 'V'), (0x1F7B, 'M', u'ύ'), (0x1F7C, 'V'), (0x1F7D, 'M', u'ώ'), (0x1F7E, 'X'), (0x1F80, 'M', u'ἀι'), (0x1F81, 'M', u'ἁι'), (0x1F82, 'M', u'ἂι'), (0x1F83, 'M', u'ἃι'), (0x1F84, 'M', u'ἄι'), (0x1F85, 'M', u'ἅι'), (0x1F86, 'M', u'ἆι'), (0x1F87, 'M', u'ἇι'), (0x1F88, 'M', u'ἀι'), (0x1F89, 'M', u'ἁι'), (0x1F8A, 'M', u'ἂι'), (0x1F8B, 'M', u'ἃι'), (0x1F8C, 'M', u'ἄι'), (0x1F8D, 'M', u'ἅι'), (0x1F8E, 'M', u'ἆι'), (0x1F8F, 'M', u'ἇι'), (0x1F90, 'M', u'ἠι'), (0x1F91, 'M', u'ἡι'), (0x1F92, 'M', u'ἢι'), (0x1F93, 'M', u'ἣι'), (0x1F94, 'M', u'ἤι'), (0x1F95, 'M', u'ἥι'), (0x1F96, 'M', u'ἦι'), (0x1F97, 'M', u'ἧι'), (0x1F98, 'M', u'ἠι'), (0x1F99, 'M', u'ἡι'), (0x1F9A, 'M', u'ἢι'), (0x1F9B, 'M', u'ἣι'), (0x1F9C, 'M', u'ἤι'), (0x1F9D, 'M', u'ἥι'), (0x1F9E, 'M', u'ἦι'), (0x1F9F, 'M', u'ἧι'), (0x1FA0, 'M', u'ὠι'), (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), (0x1FA7, 'M', u'ὧι'), (0x1FA8, 'M', u'ὠι'), (0x1FA9, 'M', u'ὡι'), (0x1FAA, 'M', u'ὢι'), (0x1FAB, 'M', u'ὣι'), (0x1FAC, 'M', u'ὤι'), (0x1FAD, 'M', u'ὥι'), (0x1FAE, 'M', u'ὦι'), ] def _seg_20(): return [ (0x1FAF, 'M', u'ὧι'), (0x1FB0, 'V'), (0x1FB2, 'M', u'ὰι'), (0x1FB3, 'M', u'αι'), (0x1FB4, 'M', u'άι'), (0x1FB5, 'X'), (0x1FB6, 'V'), (0x1FB7, 'M', u'ᾶι'), (0x1FB8, 'M', u'ᾰ'), (0x1FB9, 'M', u'ᾱ'), (0x1FBA, 'M', u'ὰ'), (0x1FBB, 'M', u'ά'), (0x1FBC, 'M', u'αι'), (0x1FBD, '3', u' ̓'), (0x1FBE, 'M', u'ι'), (0x1FBF, '3', u' ̓'), (0x1FC0, '3', u' ͂'), (0x1FC1, '3', u' ̈͂'), (0x1FC2, 'M', u'ὴι'), (0x1FC3, 'M', u'ηι'), (0x1FC4, 'M', u'ήι'), (0x1FC5, 'X'), (0x1FC6, 'V'), (0x1FC7, 'M', u'ῆι'), (0x1FC8, 'M', u'ὲ'), (0x1FC9, 'M', u'έ'), (0x1FCA, 'M', u'ὴ'), (0x1FCB, 'M', u'ή'), (0x1FCC, 'M', u'ηι'), (0x1FCD, '3', u' ̓̀'), (0x1FCE, '3', u' ̓́'), (0x1FCF, '3', u' ̓͂'), (0x1FD0, 'V'), (0x1FD3, 'M', u'ΐ'), (0x1FD4, 'X'), (0x1FD6, 'V'), (0x1FD8, 'M', u'ῐ'), (0x1FD9, 'M', u'ῑ'), (0x1FDA, 'M', u'ὶ'), (0x1FDB, 'M', u'ί'), (0x1FDC, 'X'), (0x1FDD, '3', u' ̔̀'), (0x1FDE, '3', u' ̔́'), (0x1FDF, '3', u' ̔͂'), (0x1FE0, 'V'), (0x1FE3, 'M', u'ΰ'), (0x1FE4, 'V'), (0x1FE8, 'M', u'ῠ'), (0x1FE9, 'M', u'ῡ'), (0x1FEA, 'M', u'ὺ'), (0x1FEB, 'M', u'ύ'), (0x1FEC, 'M', u'ῥ'), (0x1FED, '3', u' ̈̀'), (0x1FEE, '3', u' ̈́'), (0x1FEF, '3', u'`'), (0x1FF0, 'X'), (0x1FF2, 'M', u'ὼι'), (0x1FF3, 'M', u'ωι'), (0x1FF4, 'M', u'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), (0x1FF7, 'M', u'ῶι'), (0x1FF8, 'M', u'ὸ'), (0x1FF9, 'M', u'ό'), (0x1FFA, 'M', u'ὼ'), (0x1FFB, 'M', u'ώ'), (0x1FFC, 'M', u'ωι'), (0x1FFD, '3', u' ́'), (0x1FFE, '3', u' ̔'), (0x1FFF, 'X'), (0x2000, '3', u' '), (0x200B, 'I'), (0x200C, 'D', u''), (0x200E, 'X'), (0x2010, 'V'), (0x2011, 'M', u'‐'), (0x2012, 'V'), (0x2017, '3', u' ̳'), (0x2018, 'V'), (0x2024, 'X'), (0x2027, 'V'), (0x2028, 'X'), (0x202F, '3', u' '), (0x2030, 'V'), (0x2033, 'M', u'′′'), (0x2034, 'M', u'′′′'), (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), (0x203E, '3', u' ̅'), (0x203F, 'V'), (0x2047, '3', u'??'), (0x2048, '3', u'?!'), (0x2049, '3', u'!?'), (0x204A, 'V'), (0x2057, 'M', u'′′′′'), (0x2058, 'V'), ] def _seg_21(): return [ (0x205F, '3', u' '), (0x2060, 'I'), (0x2061, 'X'), (0x2064, 'I'), (0x2065, 'X'), (0x2070, 'M', u'0'), (0x2071, 'M', u'i'), (0x2072, 'X'), (0x2074, 'M', u'4'), (0x2075, 'M', u'5'), (0x2076, 'M', u'6'), (0x2077, 'M', u'7'), (0x2078, 'M', u'8'), (0x2079, 'M', u'9'), (0x207A, '3', u'+'), (0x207B, 'M', u'−'), (0x207C, '3', u'='), (0x207D, '3', u'('), (0x207E, '3', u')'), (0x207F, 'M', u'n'), (0x2080, 'M', u'0'), (0x2081, 'M', u'1'), (0x2082, 'M', u'2'), (0x2083, 'M', u'3'), (0x2084, 'M', u'4'), (0x2085, 'M', u'5'), (0x2086, 'M', u'6'), (0x2087, 'M', u'7'), (0x2088, 'M', u'8'), (0x2089, 'M', u'9'), (0x208A, '3', u'+'), (0x208B, 'M', u'−'), (0x208C, '3', u'='), (0x208D, '3', u'('), (0x208E, '3', u')'), (0x208F, 'X'), (0x2090, 'M', u'a'), (0x2091, 'M', u'e'), (0x2092, 'M', u'o'), (0x2093, 'M', u'x'), (0x2094, 'M', u'ə'), (0x2095, 'M', u'h'), (0x2096, 'M', u'k'), (0x2097, 'M', u'l'), (0x2098, 'M', u'm'), (0x2099, 'M', u'n'), (0x209A, 'M', u'p'), (0x209B, 'M', u's'), (0x209C, 'M', u't'), (0x209D, 'X'), (0x20A0, 'V'), (0x20A8, 'M', u'rs'), (0x20A9, 'V'), (0x20BB, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', u'a/c'), (0x2101, '3', u'a/s'), (0x2102, 'M', u'c'), (0x2103, 'M', u'°c'), (0x2104, 'V'), (0x2105, '3', u'c/o'), (0x2106, '3', u'c/u'), (0x2107, 'M', u'ɛ'), (0x2108, 'V'), (0x2109, 'M', u'°f'), (0x210A, 'M', u'g'), (0x210B, 'M', u'h'), (0x210F, 'M', u'ħ'), (0x2110, 'M', u'i'), (0x2112, 'M', u'l'), (0x2114, 'V'), (0x2115, 'M', u'n'), (0x2116, 'M', u'no'), (0x2117, 'V'), (0x2119, 'M', u'p'), (0x211A, 'M', u'q'), (0x211B, 'M', u'r'), (0x211E, 'V'), (0x2120, 'M', u'sm'), (0x2121, 'M', u'tel'), (0x2122, 'M', u'tm'), (0x2123, 'V'), (0x2124, 'M', u'z'), (0x2125, 'V'), (0x2126, 'M', u'ω'), (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), (0x212D, 'M', u'c'), (0x212E, 'V'), (0x212F, 'M', u'e'), (0x2131, 'M', u'f'), (0x2132, 'X'), (0x2133, 'M', u'm'), (0x2134, 'M', u'o'), (0x2135, 'M', u'א'), ] def _seg_22(): return [ (0x2136, 'M', u'ב'), (0x2137, 'M', u'ג'), (0x2138, 'M', u'ד'), (0x2139, 'M', u'i'), (0x213A, 'V'), (0x213B, 'M', u'fax'), (0x213C, 'M', u'π'), (0x213D, 'M', u'γ'), (0x213F, 'M', u'π'), (0x2140, 'M', u'∑'), (0x2141, 'V'), (0x2145, 'M', u'd'), (0x2147, 'M', u'e'), (0x2148, 'M', u'i'), (0x2149, 'M', u'j'), (0x214A, 'V'), (0x2150, 'M', u'1⁄7'), (0x2151, 'M', u'1⁄9'), (0x2152, 'M', u'1⁄10'), (0x2153, 'M', u'1⁄3'), (0x2154, 'M', u'2⁄3'), (0x2155, 'M', u'1⁄5'), (0x2156, 'M', u'2⁄5'), (0x2157, 'M', u'3⁄5'), (0x2158, 'M', u'4⁄5'), (0x2159, 'M', u'1⁄6'), (0x215A, 'M', u'5⁄6'), (0x215B, 'M', u'1⁄8'), (0x215C, 'M', u'3⁄8'), (0x215D, 'M', u'5⁄8'), (0x215E, 'M', u'7⁄8'), (0x215F, 'M', u'1⁄'), (0x2160, 'M', u'i'), (0x2161, 'M', u'ii'), (0x2162, 'M', u'iii'), (0x2163, 'M', u'iv'), (0x2164, 'M', u'v'), (0x2165, 'M', u'vi'), (0x2166, 'M', u'vii'), (0x2167, 'M', u'viii'), (0x2168, 'M', u'ix'), (0x2169, 'M', u'x'), (0x216A, 'M', u'xi'), (0x216B, 'M', u'xii'), (0x216C, 'M', u'l'), (0x216D, 'M', u'c'), (0x216E, 'M', u'd'), (0x216F, 'M', u'm'), (0x2170, 'M', u'i'), (0x2171, 'M', u'ii'), (0x2172, 'M', u'iii'), (0x2173, 'M', u'iv'), (0x2174, 'M', u'v'), (0x2175, 'M', u'vi'), (0x2176, 'M', u'vii'), (0x2177, 'M', u'viii'), (0x2178, 'M', u'ix'), (0x2179, 'M', u'x'), (0x217A, 'M', u'xi'), (0x217B, 'M', u'xii'), (0x217C, 'M', u'l'), (0x217D, 'M', u'c'), (0x217E, 'M', u'd'), (0x217F, 'M', u'm'), (0x2180, 'V'), (0x2183, 'X'), (0x2184, 'V'), (0x2189, 'M', u'0⁄3'), (0x218A, 'X'), (0x2190, 'V'), (0x222C, 'M', u'∫∫'), (0x222D, 'M', u'∫∫∫'), (0x222E, 'V'), (0x222F, 'M', u'∮∮'), (0x2230, 'M', u'∮∮∮'), (0x2231, 'V'), (0x2260, '3'), (0x2261, 'V'), (0x226E, '3'), (0x2270, 'V'), (0x2329, 'M', u'〈'), (0x232A, 'M', u'〉'), (0x232B, 'V'), (0x23F4, 'X'), (0x2400, 'V'), (0x2427, 'X'), (0x2440, 'V'), (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), (0x2465, 'M', u'6'), (0x2466, 'M', u'7'), (0x2467, 'M', u'8'), (0x2468, 'M', u'9'), (0x2469, 'M', u'10'), (0x246A, 'M', u'11'), (0x246B, 'M', u'12'), ] def _seg_23(): return [ (0x246C, 'M', u'13'), (0x246D, 'M', u'14'), (0x246E, 'M', u'15'), (0x246F, 'M', u'16'), (0x2470, 'M', u'17'), (0x2471, 'M', u'18'), (0x2472, 'M', u'19'), (0x2473, 'M', u'20'), (0x2474, '3', u'(1)'), (0x2475, '3', u'(2)'), (0x2476, '3', u'(3)'), (0x2477, '3', u'(4)'), (0x2478, '3', u'(5)'), (0x2479, '3', u'(6)'), (0x247A, '3', u'(7)'), (0x247B, '3', u'(8)'), (0x247C, '3', u'(9)'), (0x247D, '3', u'(10)'), (0x247E, '3', u'(11)'), (0x247F, '3', u'(12)'), (0x2480, '3', u'(13)'), (0x2481, '3', u'(14)'), (0x2482, '3', u'(15)'), (0x2483, '3', u'(16)'), (0x2484, '3', u'(17)'), (0x2485, '3', u'(18)'), (0x2486, '3', u'(19)'), (0x2487, '3', u'(20)'), (0x2488, 'X'), (0x249C, '3', u'(a)'), (0x249D, '3', u'(b)'), (0x249E, '3', u'(c)'), (0x249F, '3', u'(d)'), (0x24A0, '3', u'(e)'), (0x24A1, '3', u'(f)'), (0x24A2, '3', u'(g)'), (0x24A3, '3', u'(h)'), (0x24A4, '3', u'(i)'), (0x24A5, '3', u'(j)'), (0x24A6, '3', u'(k)'), (0x24A7, '3', u'(l)'), (0x24A8, '3', u'(m)'), (0x24A9, '3', u'(n)'), (0x24AA, '3', u'(o)'), (0x24AB, '3', u'(p)'), (0x24AC, '3', u'(q)'), (0x24AD, '3', u'(r)'), (0x24AE, '3', u'(s)'), (0x24AF, '3', u'(t)'), (0x24B0, '3', u'(u)'), (0x24B1, '3', u'(v)'), (0x24B2, '3', u'(w)'), (0x24B3, '3', u'(x)'), (0x24B4, '3', u'(y)'), (0x24B5, '3', u'(z)'), (0x24B6, 'M', u'a'), (0x24B7, 'M', u'b'), (0x24B8, 'M', u'c'), (0x24B9, 'M', u'd'), (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), ] def _seg_24(): return [ (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2700, 'X'), (0x2701, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B4D, 'X'), (0x2B50, 'V'), (0x2B5A, 'X'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), (0x2C26, 'M', u'ⱖ'), (0x2C27, 'M', u'ⱗ'), (0x2C28, 'M', u'ⱘ'), (0x2C29, 'M', u'ⱙ'), (0x2C2A, 'M', u'ⱚ'), (0x2C2B, 'M', u'ⱛ'), (0x2C2C, 'M', u'ⱜ'), (0x2C2D, 'M', u'ⱝ'), (0x2C2E, 'M', u'ⱞ'), (0x2C2F, 'X'), (0x2C30, 'V'), (0x2C5F, 'X'), (0x2C60, 'M', u'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', u'ɫ'), (0x2C63, 'M', u'ᵽ'), (0x2C64, 'M', u'ɽ'), (0x2C65, 'V'), (0x2C67, 'M', u'ⱨ'), (0x2C68, 'V'), (0x2C69, 'M', u'ⱪ'), (0x2C6A, 'V'), (0x2C6B, 'M', u'ⱬ'), (0x2C6C, 'V'), (0x2C6D, 'M', u'ɑ'), (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), (0x2C75, 'M', u'ⱶ'), (0x2C76, 'V'), (0x2C7C, 'M', u'j'), (0x2C7D, 'M', u'v'), (0x2C7E, 'M', u'ȿ'), (0x2C7F, 'M', u'ɀ'), (0x2C80, 'M', u'ⲁ'), (0x2C81, 'V'), (0x2C82, 'M', u'ⲃ'), ] def _seg_25(): return [ (0x2C83, 'V'), (0x2C84, 'M', u'ⲅ'), (0x2C85, 'V'), (0x2C86, 'M', u'ⲇ'), (0x2C87, 'V'), (0x2C88, 'M', u'ⲉ'), (0x2C89, 'V'), (0x2C8A, 'M', u'ⲋ'), (0x2C8B, 'V'), (0x2C8C, 'M', u'ⲍ'), (0x2C8D, 'V'), (0x2C8E, 'M', u'ⲏ'), (0x2C8F, 'V'), (0x2C90, 'M', u'ⲑ'), (0x2C91, 'V'), (0x2C92, 'M', u'ⲓ'), (0x2C93, 'V'), (0x2C94, 'M', u'ⲕ'), (0x2C95, 'V'), (0x2C96, 'M', u'ⲗ'), (0x2C97, 'V'), (0x2C98, 'M', u'ⲙ'), (0x2C99, 'V'), (0x2C9A, 'M', u'ⲛ'), (0x2C9B, 'V'), (0x2C9C, 'M', u'ⲝ'), (0x2C9D, 'V'), (0x2C9E, 'M', u'ⲟ'), (0x2C9F, 'V'), (0x2CA0, 'M', u'ⲡ'), (0x2CA1, 'V'), (0x2CA2, 'M', u'ⲣ'), (0x2CA3, 'V'), (0x2CA4, 'M', u'ⲥ'), (0x2CA5, 'V'), (0x2CA6, 'M', u'ⲧ'), (0x2CA7, 'V'), (0x2CA8, 'M', u'ⲩ'), (0x2CA9, 'V'), (0x2CAA, 'M', u'ⲫ'), (0x2CAB, 'V'), (0x2CAC, 'M', u'ⲭ'), (0x2CAD, 'V'), (0x2CAE, 'M', u'ⲯ'), (0x2CAF, 'V'), (0x2CB0, 'M', u'ⲱ'), (0x2CB1, 'V'), (0x2CB2, 'M', u'ⲳ'), (0x2CB3, 'V'), (0x2CB4, 'M', u'ⲵ'), (0x2CB5, 'V'), (0x2CB6, 'M', u'ⲷ'), (0x2CB7, 'V'), (0x2CB8, 'M', u'ⲹ'), (0x2CB9, 'V'), (0x2CBA, 'M', u'ⲻ'), (0x2CBB, 'V'), (0x2CBC, 'M', u'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', u'ⲿ'), (0x2CBF, 'V'), (0x2CC0, 'M', u'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', u'ⳃ'), (0x2CC3, 'V'), (0x2CC4, 'M', u'ⳅ'), (0x2CC5, 'V'), (0x2CC6, 'M', u'ⳇ'), (0x2CC7, 'V'), (0x2CC8, 'M', u'ⳉ'), (0x2CC9, 'V'), (0x2CCA, 'M', u'ⳋ'), (0x2CCB, 'V'), (0x2CCC, 'M', u'ⳍ'), (0x2CCD, 'V'), (0x2CCE, 'M', u'ⳏ'), (0x2CCF, 'V'), (0x2CD0, 'M', u'ⳑ'), (0x2CD1, 'V'), (0x2CD2, 'M', u'ⳓ'), (0x2CD3, 'V'), (0x2CD4, 'M', u'ⳕ'), (0x2CD5, 'V'), (0x2CD6, 'M', u'ⳗ'), (0x2CD7, 'V'), (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), (0x2CDE, 'M', u'ⳟ'), (0x2CDF, 'V'), (0x2CE0, 'M', u'ⳡ'), (0x2CE1, 'V'), (0x2CE2, 'M', u'ⳣ'), (0x2CE3, 'V'), (0x2CEB, 'M', u'ⳬ'), (0x2CEC, 'V'), (0x2CED, 'M', u'ⳮ'), ] def _seg_26(): return [ (0x2CEE, 'V'), (0x2CF2, 'M', u'ⳳ'), (0x2CF3, 'V'), (0x2CF4, 'X'), (0x2CF9, 'V'), (0x2D26, 'X'), (0x2D27, 'V'), (0x2D28, 'X'), (0x2D2D, 'V'), (0x2D2E, 'X'), (0x2D30, 'V'), (0x2D68, 'X'), (0x2D6F, 'M', u'ⵡ'), (0x2D70, 'V'), (0x2D71, 'X'), (0x2D7F, 'V'), (0x2D97, 'X'), (0x2DA0, 'V'), (0x2DA7, 'X'), (0x2DA8, 'V'), (0x2DAF, 'X'), (0x2DB0, 'V'), (0x2DB7, 'X'), (0x2DB8, 'V'), (0x2DBF, 'X'), (0x2DC0, 'V'), (0x2DC7, 'X'), (0x2DC8, 'V'), (0x2DCF, 'X'), (0x2DD0, 'V'), (0x2DD7, 'X'), (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), (0x2E3C, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), (0x2E9F, 'M', u'母'), (0x2EA0, 'V'), (0x2EF3, 'M', u'龟'), (0x2EF4, 'X'), (0x2F00, 'M', u'一'), (0x2F01, 'M', u'丨'), (0x2F02, 'M', u'丶'), (0x2F03, 'M', u'丿'), (0x2F04, 'M', u'乙'), (0x2F05, 'M', u'亅'), (0x2F06, 'M', u'二'), (0x2F07, 'M', u'亠'), (0x2F08, 'M', u'人'), (0x2F09, 'M', u'儿'), (0x2F0A, 'M', u'入'), (0x2F0B, 'M', u'八'), (0x2F0C, 'M', u'冂'), (0x2F0D, 'M', u'冖'), (0x2F0E, 'M', u'冫'), (0x2F0F, 'M', u'几'), (0x2F10, 'M', u'凵'), (0x2F11, 'M', u'刀'), (0x2F12, 'M', u'力'), (0x2F13, 'M', u'勹'), (0x2F14, 'M', u'匕'), (0x2F15, 'M', u'匚'), (0x2F16, 'M', u'匸'), (0x2F17, 'M', u'十'), (0x2F18, 'M', u'卜'), (0x2F19, 'M', u'卩'), (0x2F1A, 'M', u'厂'), (0x2F1B, 'M', u'厶'), (0x2F1C, 'M', u'又'), (0x2F1D, 'M', u'口'), (0x2F1E, 'M', u'囗'), (0x2F1F, 'M', u'土'), (0x2F20, 'M', u'士'), (0x2F21, 'M', u'夂'), (0x2F22, 'M', u'夊'), (0x2F23, 'M', u'夕'), (0x2F24, 'M', u'大'), (0x2F25, 'M', u'女'), (0x2F26, 'M', u'子'), (0x2F27, 'M', u'宀'), (0x2F28, 'M', u'寸'), (0x2F29, 'M', u'小'), (0x2F2A, 'M', u'尢'), (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), (0x2F31, 'M', u'巾'), (0x2F32, 'M', u'干'), (0x2F33, 'M', u'幺'), (0x2F34, 'M', u'广'), (0x2F35, 'M', u'廴'), (0x2F36, 'M', u'廾'), (0x2F37, 'M', u'弋'), (0x2F38, 'M', u'弓'), (0x2F39, 'M', u'彐'), ] def _seg_27(): return [ (0x2F3A, 'M', u'彡'), (0x2F3B, 'M', u'彳'), (0x2F3C, 'M', u'心'), (0x2F3D, 'M', u'戈'), (0x2F3E, 'M', u'戶'), (0x2F3F, 'M', u'手'), (0x2F40, 'M', u'支'), (0x2F41, 'M', u'攴'), (0x2F42, 'M', u'文'), (0x2F43, 'M', u'斗'), (0x2F44, 'M', u'斤'), (0x2F45, 'M', u'方'), (0x2F46, 'M', u'无'), (0x2F47, 'M', u'日'), (0x2F48, 'M', u'曰'), (0x2F49, 'M', u'月'), (0x2F4A, 'M', u'木'), (0x2F4B, 'M', u'欠'), (0x2F4C, 'M', u'止'), (0x2F4D, 'M', u'歹'), (0x2F4E, 'M', u'殳'), (0x2F4F, 'M', u'毋'), (0x2F50, 'M', u'比'), (0x2F51, 'M', u'毛'), (0x2F52, 'M', u'氏'), (0x2F53, 'M', u'气'), (0x2F54, 'M', u'水'), (0x2F55, 'M', u'火'), (0x2F56, 'M', u'爪'), (0x2F57, 'M', u'父'), (0x2F58, 'M', u'爻'), (0x2F59, 'M', u'爿'), (0x2F5A, 'M', u'片'), (0x2F5B, 'M', u'牙'), (0x2F5C, 'M', u'牛'), (0x2F5D, 'M', u'犬'), (0x2F5E, 'M', u'玄'), (0x2F5F, 'M', u'玉'), (0x2F60, 'M', u'瓜'), (0x2F61, 'M', u'瓦'), (0x2F62, 'M', u'甘'), (0x2F63, 'M', u'生'), (0x2F64, 'M', u'用'), (0x2F65, 'M', u'田'), (0x2F66, 'M', u'疋'), (0x2F67, 'M', u'疒'), (0x2F68, 'M', u'癶'), (0x2F69, 'M', u'白'), (0x2F6A, 'M', u'皮'), (0x2F6B, 'M', u'皿'), (0x2F6C, 'M', u'目'), (0x2F6D, 'M', u'矛'), (0x2F6E, 'M', u'矢'), (0x2F6F, 'M', u'石'), (0x2F70, 'M', u'示'), (0x2F71, 'M', u'禸'), (0x2F72, 'M', u'禾'), (0x2F73, 'M', u'穴'), (0x2F74, 'M', u'立'), (0x2F75, 'M', u'竹'), (0x2F76, 'M', u'米'), (0x2F77, 'M', u'糸'), (0x2F78, 'M', u'缶'), (0x2F79, 'M', u'网'), (0x2F7A, 'M', u'羊'), (0x2F7B, 'M', u'羽'), (0x2F7C, 'M', u'老'), (0x2F7D, 'M', u'而'), (0x2F7E, 'M', u'耒'), (0x2F7F, 'M', u'耳'), (0x2F80, 'M', u'聿'), (0x2F81, 'M', u'肉'), (0x2F82, 'M', u'臣'), (0x2F83, 'M', u'自'), (0x2F84, 'M', u'至'), (0x2F85, 'M', u'臼'), (0x2F86, 'M', u'舌'), (0x2F87, 'M', u'舛'), (0x2F88, 'M', u'舟'), (0x2F89, 'M', u'艮'), (0x2F8A, 'M', u'色'), (0x2F8B, 'M', u'艸'), (0x2F8C, 'M', u'虍'), (0x2F8D, 'M', u'虫'), (0x2F8E, 'M', u'血'), (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), (0x2F95, 'M', u'谷'), (0x2F96, 'M', u'豆'), (0x2F97, 'M', u'豕'), (0x2F98, 'M', u'豸'), (0x2F99, 'M', u'貝'), (0x2F9A, 'M', u'赤'), (0x2F9B, 'M', u'走'), (0x2F9C, 'M', u'足'), (0x2F9D, 'M', u'身'), ] def _seg_28(): return [ (0x2F9E, 'M', u'車'), (0x2F9F, 'M', u'辛'), (0x2FA0, 'M', u'辰'), (0x2FA1, 'M', u'辵'), (0x2FA2, 'M', u'邑'), (0x2FA3, 'M', u'酉'), (0x2FA4, 'M', u'釆'), (0x2FA5, 'M', u'里'), (0x2FA6, 'M', u'金'), (0x2FA7, 'M', u'長'), (0x2FA8, 'M', u'門'), (0x2FA9, 'M', u'阜'), (0x2FAA, 'M', u'隶'), (0x2FAB, 'M', u'隹'), (0x2FAC, 'M', u'雨'), (0x2FAD, 'M', u'靑'), (0x2FAE, 'M', u'非'), (0x2FAF, 'M', u'面'), (0x2FB0, 'M', u'革'), (0x2FB1, 'M', u'韋'), (0x2FB2, 'M', u'韭'), (0x2FB3, 'M', u'音'), (0x2FB4, 'M', u'頁'), (0x2FB5, 'M', u'風'), (0x2FB6, 'M', u'飛'), (0x2FB7, 'M', u'食'), (0x2FB8, 'M', u'首'), (0x2FB9, 'M', u'香'), (0x2FBA, 'M', u'馬'), (0x2FBB, 'M', u'骨'), (0x2FBC, 'M', u'高'), (0x2FBD, 'M', u'髟'), (0x2FBE, 'M', u'鬥'), (0x2FBF, 'M', u'鬯'), (0x2FC0, 'M', u'鬲'), (0x2FC1, 'M', u'鬼'), (0x2FC2, 'M', u'魚'), (0x2FC3, 'M', u'鳥'), (0x2FC4, 'M', u'鹵'), (0x2FC5, 'M', u'鹿'), (0x2FC6, 'M', u'麥'), (0x2FC7, 'M', u'麻'), (0x2FC8, 'M', u'黃'), (0x2FC9, 'M', u'黍'), (0x2FCA, 'M', u'黑'), (0x2FCB, 'M', u'黹'), (0x2FCC, 'M', u'黽'), (0x2FCD, 'M', u'鼎'), (0x2FCE, 'M', u'鼓'), (0x2FCF, 'M', u'鼠'), (0x2FD0, 'M', u'鼻'), (0x2FD1, 'M', u'齊'), (0x2FD2, 'M', u'齒'), (0x2FD3, 'M', u'龍'), (0x2FD4, 'M', u'龜'), (0x2FD5, 'M', u'龠'), (0x2FD6, 'X'), (0x3000, '3', u' '), (0x3001, 'V'), (0x3002, 'M', u'.'), (0x3003, 'V'), (0x3036, 'M', u'〒'), (0x3037, 'V'), (0x3038, 'M', u'十'), (0x3039, 'M', u'卄'), (0x303A, 'M', u'卅'), (0x303B, 'V'), (0x3040, 'X'), (0x3041, 'V'), (0x3097, 'X'), (0x3099, 'V'), (0x309B, '3', u' ゙'), (0x309C, '3', u' ゚'), (0x309D, 'V'), (0x309F, 'M', u'より'), (0x30A0, 'V'), (0x30FF, 'M', u'コト'), (0x3100, 'X'), (0x3105, 'V'), (0x312E, 'X'), (0x3131, 'M', u'ᄀ'), (0x3132, 'M', u'ᄁ'), (0x3133, 'M', u'ᆪ'), (0x3134, 'M', u'ᄂ'), (0x3135, 'M', u'ᆬ'), (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), (0x313C, 'M', u'ᆲ'), (0x313D, 'M', u'ᆳ'), (0x313E, 'M', u'ᆴ'), (0x313F, 'M', u'ᆵ'), (0x3140, 'M', u'ᄚ'), (0x3141, 'M', u'ᄆ'), (0x3142, 'M', u'ᄇ'), (0x3143, 'M', u'ᄈ'), (0x3144, 'M', u'ᄡ'), ] def _seg_29(): return [ (0x3145, 'M', u'ᄉ'), (0x3146, 'M', u'ᄊ'), (0x3147, 'M', u'ᄋ'), (0x3148, 'M', u'ᄌ'), (0x3149, 'M', u'ᄍ'), (0x314A, 'M', u'ᄎ'), (0x314B, 'M', u'ᄏ'), (0x314C, 'M', u'ᄐ'), (0x314D, 'M', u'ᄑ'), (0x314E, 'M', u'ᄒ'), (0x314F, 'M', u'ᅡ'), (0x3150, 'M', u'ᅢ'), (0x3151, 'M', u'ᅣ'), (0x3152, 'M', u'ᅤ'), (0x3153, 'M', u'ᅥ'), (0x3154, 'M', u'ᅦ'), (0x3155, 'M', u'ᅧ'), (0x3156, 'M', u'ᅨ'), (0x3157, 'M', u'ᅩ'), (0x3158, 'M', u'ᅪ'), (0x3159, 'M', u'ᅫ'), (0x315A, 'M', u'ᅬ'), (0x315B, 'M', u'ᅭ'), (0x315C, 'M', u'ᅮ'), (0x315D, 'M', u'ᅯ'), (0x315E, 'M', u'ᅰ'), (0x315F, 'M', u'ᅱ'), (0x3160, 'M', u'ᅲ'), (0x3161, 'M', u'ᅳ'), (0x3162, 'M', u'ᅴ'), (0x3163, 'M', u'ᅵ'), (0x3164, 'X'), (0x3165, 'M', u'ᄔ'), (0x3166, 'M', u'ᄕ'), (0x3167, 'M', u'ᇇ'), (0x3168, 'M', u'ᇈ'), (0x3169, 'M', u'ᇌ'), (0x316A, 'M', u'ᇎ'), (0x316B, 'M', u'ᇓ'), (0x316C, 'M', u'ᇗ'), (0x316D, 'M', u'ᇙ'), (0x316E, 'M', u'ᄜ'), (0x316F, 'M', u'ᇝ'), (0x3170, 'M', u'ᇟ'), (0x3171, 'M', u'ᄝ'), (0x3172, 'M', u'ᄞ'), (0x3173, 'M', u'ᄠ'), (0x3174, 'M', u'ᄢ'), (0x3175, 'M', u'ᄣ'), (0x3176, 'M', u'ᄧ'), (0x3177, 'M', u'ᄩ'), (0x3178, 'M', u'ᄫ'), (0x3179, 'M', u'ᄬ'), (0x317A, 'M', u'ᄭ'), (0x317B, 'M', u'ᄮ'), (0x317C, 'M', u'ᄯ'), (0x317D, 'M', u'ᄲ'), (0x317E, 'M', u'ᄶ'), (0x317F, 'M', u'ᅀ'), (0x3180, 'M', u'ᅇ'), (0x3181, 'M', u'ᅌ'), (0x3182, 'M', u'ᇱ'), (0x3183, 'M', u'ᇲ'), (0x3184, 'M', u'ᅗ'), (0x3185, 'M', u'ᅘ'), (0x3186, 'M', u'ᅙ'), (0x3187, 'M', u'ᆄ'), (0x3188, 'M', u'ᆅ'), (0x3189, 'M', u'ᆈ'), (0x318A, 'M', u'ᆑ'), (0x318B, 'M', u'ᆒ'), (0x318C, 'M', u'ᆔ'), (0x318D, 'M', u'ᆞ'), (0x318E, 'M', u'ᆡ'), (0x318F, 'X'), (0x3190, 'V'), (0x3192, 'M', u'一'), (0x3193, 'M', u'二'), (0x3194, 'M', u'三'), (0x3195, 'M', u'四'), (0x3196, 'M', u'上'), (0x3197, 'M', u'中'), (0x3198, 'M', u'下'), (0x3199, 'M', u'甲'), (0x319A, 'M', u'乙'), (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), (0x31BB, 'X'), (0x31C0, 'V'), (0x31E4, 'X'), (0x31F0, 'V'), (0x3200, '3', u'(ᄀ)'), (0x3201, '3', u'(ᄂ)'), (0x3202, '3', u'(ᄃ)'), (0x3203, '3', u'(ᄅ)'), (0x3204, '3', u'(ᄆ)'), ] def _seg_30(): return [ (0x3205, '3', u'(ᄇ)'), (0x3206, '3', u'(ᄉ)'), (0x3207, '3', u'(ᄋ)'), (0x3208, '3', u'(ᄌ)'), (0x3209, '3', u'(ᄎ)'), (0x320A, '3', u'(ᄏ)'), (0x320B, '3', u'(ᄐ)'), (0x320C, '3', u'(ᄑ)'), (0x320D, '3', u'(ᄒ)'), (0x320E, '3', u'(가)'), (0x320F, '3', u'(나)'), (0x3210, '3', u'(다)'), (0x3211, '3', u'(라)'), (0x3212, '3', u'(마)'), (0x3213, '3', u'(바)'), (0x3214, '3', u'(사)'), (0x3215, '3', u'(아)'), (0x3216, '3', u'(자)'), (0x3217, '3', u'(차)'), (0x3218, '3', u'(카)'), (0x3219, '3', u'(타)'), (0x321A, '3', u'(파)'), (0x321B, '3', u'(하)'), (0x321C, '3', u'(주)'), (0x321D, '3', u'(오전)'), (0x321E, '3', u'(오후)'), (0x321F, 'X'), (0x3220, '3', u'(一)'), (0x3221, '3', u'(二)'), (0x3222, '3', u'(三)'), (0x3223, '3', u'(四)'), (0x3224, '3', u'(五)'), (0x3225, '3', u'(六)'), (0x3226, '3', u'(七)'), (0x3227, '3', u'(八)'), (0x3228, '3', u'(九)'), (0x3229, '3', u'(十)'), (0x322A, '3', u'(月)'), (0x322B, '3', u'(火)'), (0x322C, '3', u'(水)'), (0x322D, '3', u'(木)'), (0x322E, '3', u'(金)'), (0x322F, '3', u'(土)'), (0x3230, '3', u'(日)'), (0x3231, '3', u'(株)'), (0x3232, '3', u'(有)'), (0x3233, '3', u'(社)'), (0x3234, '3', u'(名)'), (0x3235, '3', u'(特)'), (0x3236, '3', u'(財)'), (0x3237, '3', u'(祝)'), (0x3238, '3', u'(労)'), (0x3239, '3', u'(代)'), (0x323A, '3', u'(呼)'), (0x323B, '3', u'(学)'), (0x323C, '3', u'(監)'), (0x323D, '3', u'(企)'), (0x323E, '3', u'(資)'), (0x323F, '3', u'(協)'), (0x3240, '3', u'(祭)'), (0x3241, '3', u'(休)'), (0x3242, '3', u'(自)'), (0x3243, '3', u'(至)'), (0x3244, 'M', u'問'), (0x3245, 'M', u'幼'), (0x3246, 'M', u'文'), (0x3247, 'M', u'箏'), (0x3248, 'V'), (0x3250, 'M', u'pte'), (0x3251, 'M', u'21'), (0x3252, 'M', u'22'), (0x3253, 'M', u'23'), (0x3254, 'M', u'24'), (0x3255, 'M', u'25'), (0x3256, 'M', u'26'), (0x3257, 'M', u'27'), (0x3258, 'M', u'28'), (0x3259, 'M', u'29'), (0x325A, 'M', u'30'), (0x325B, 'M', u'31'), (0x325C, 'M', u'32'), (0x325D, 'M', u'33'), (0x325E, 'M', u'34'), (0x325F, 'M', u'35'), (0x3260, 'M', u'ᄀ'), (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), (0x3267, 'M', u'ᄋ'), (0x3268, 'M', u'ᄌ'), (0x3269, 'M', u'ᄎ'), (0x326A, 'M', u'ᄏ'), (0x326B, 'M', u'ᄐ'), (0x326C, 'M', u'ᄑ'), (0x326D, 'M', u'ᄒ'), (0x326E, 'M', u'가'), (0x326F, 'M', u'나'), ] def _seg_31(): return [ (0x3270, 'M', u'다'), (0x3271, 'M', u'라'), (0x3272, 'M', u'마'), (0x3273, 'M', u'바'), (0x3274, 'M', u'사'), (0x3275, 'M', u'아'), (0x3276, 'M', u'자'), (0x3277, 'M', u'차'), (0x3278, 'M', u'카'), (0x3279, 'M', u'타'), (0x327A, 'M', u'파'), (0x327B, 'M', u'하'), (0x327C, 'M', u'참고'), (0x327D, 'M', u'주의'), (0x327E, 'M', u'우'), (0x327F, 'V'), (0x3280, 'M', u'一'), (0x3281, 'M', u'二'), (0x3282, 'M', u'三'), (0x3283, 'M', u'四'), (0x3284, 'M', u'五'), (0x3285, 'M', u'六'), (0x3286, 'M', u'七'), (0x3287, 'M', u'八'), (0x3288, 'M', u'九'), (0x3289, 'M', u'十'), (0x328A, 'M', u'月'), (0x328B, 'M', u'火'), (0x328C, 'M', u'水'), (0x328D, 'M', u'木'), (0x328E, 'M', u'金'), (0x328F, 'M', u'土'), (0x3290, 'M', u'日'), (0x3291, 'M', u'株'), (0x3292, 'M', u'有'), (0x3293, 'M', u'社'), (0x3294, 'M', u'名'), (0x3295, 'M', u'特'), (0x3296, 'M', u'財'), (0x3297, 'M', u'祝'), (0x3298, 'M', u'労'), (0x3299, 'M', u'秘'), (0x329A, 'M', u'男'), (0x329B, 'M', u'女'), (0x329C, 'M', u'適'), (0x329D, 'M', u'優'), (0x329E, 'M', u'印'), (0x329F, 'M', u'注'), (0x32A0, 'M', u'項'), (0x32A1, 'M', u'休'), (0x32A2, 'M', u'写'), (0x32A3, 'M', u'正'), (0x32A4, 'M', u'上'), (0x32A5, 'M', u'中'), (0x32A6, 'M', u'下'), (0x32A7, 'M', u'左'), (0x32A8, 'M', u'右'), (0x32A9, 'M', u'医'), (0x32AA, 'M', u'宗'), (0x32AB, 'M', u'学'), (0x32AC, 'M', u'監'), (0x32AD, 'M', u'企'), (0x32AE, 'M', u'資'), (0x32AF, 'M', u'協'), (0x32B0, 'M', u'夜'), (0x32B1, 'M', u'36'), (0x32B2, 'M', u'37'), (0x32B3, 'M', u'38'), (0x32B4, 'M', u'39'), (0x32B5, 'M', u'40'), (0x32B6, 'M', u'41'), (0x32B7, 'M', u'42'), (0x32B8, 'M', u'43'), (0x32B9, 'M', u'44'), (0x32BA, 'M', u'45'), (0x32BB, 'M', u'46'), (0x32BC, 'M', u'47'), (0x32BD, 'M', u'48'), (0x32BE, 'M', u'49'), (0x32BF, 'M', u'50'), (0x32C0, 'M', u'1月'), (0x32C1, 'M', u'2月'), (0x32C2, 'M', u'3月'), (0x32C3, 'M', u'4月'), (0x32C4, 'M', u'5月'), (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), (0x32CB, 'M', u'12月'), (0x32CC, 'M', u'hg'), (0x32CD, 'M', u'erg'), (0x32CE, 'M', u'ev'), (0x32CF, 'M', u'ltd'), (0x32D0, 'M', u'ア'), (0x32D1, 'M', u'イ'), (0x32D2, 'M', u'ウ'), (0x32D3, 'M', u'エ'), ] def _seg_32(): return [ (0x32D4, 'M', u'オ'), (0x32D5, 'M', u'カ'), (0x32D6, 'M', u'キ'), (0x32D7, 'M', u'ク'), (0x32D8, 'M', u'ケ'), (0x32D9, 'M', u'コ'), (0x32DA, 'M', u'サ'), (0x32DB, 'M', u'シ'), (0x32DC, 'M', u'ス'), (0x32DD, 'M', u'セ'), (0x32DE, 'M', u'ソ'), (0x32DF, 'M', u'タ'), (0x32E0, 'M', u'チ'), (0x32E1, 'M', u'ツ'), (0x32E2, 'M', u'テ'), (0x32E3, 'M', u'ト'), (0x32E4, 'M', u'ナ'), (0x32E5, 'M', u'ニ'), (0x32E6, 'M', u'ヌ'), (0x32E7, 'M', u'ネ'), (0x32E8, 'M', u'ノ'), (0x32E9, 'M', u'ハ'), (0x32EA, 'M', u'ヒ'), (0x32EB, 'M', u'フ'), (0x32EC, 'M', u'ヘ'), (0x32ED, 'M', u'ホ'), (0x32EE, 'M', u'マ'), (0x32EF, 'M', u'ミ'), (0x32F0, 'M', u'ム'), (0x32F1, 'M', u'メ'), (0x32F2, 'M', u'モ'), (0x32F3, 'M', u'ヤ'), (0x32F4, 'M', u'ユ'), (0x32F5, 'M', u'ヨ'), (0x32F6, 'M', u'ラ'), (0x32F7, 'M', u'リ'), (0x32F8, 'M', u'ル'), (0x32F9, 'M', u'レ'), (0x32FA, 'M', u'ロ'), (0x32FB, 'M', u'ワ'), (0x32FC, 'M', u'ヰ'), (0x32FD, 'M', u'ヱ'), (0x32FE, 'M', u'ヲ'), (0x32FF, 'X'), (0x3300, 'M', u'アパート'), (0x3301, 'M', u'アルファ'), (0x3302, 'M', u'アンペア'), (0x3303, 'M', u'アール'), (0x3304, 'M', u'イニング'), (0x3305, 'M', u'インチ'), (0x3306, 'M', u'ウォン'), (0x3307, 'M', u'エスクード'), (0x3308, 'M', u'エーカー'), (0x3309, 'M', u'オンス'), (0x330A, 'M', u'オーム'), (0x330B, 'M', u'カイリ'), (0x330C, 'M', u'カラット'), (0x330D, 'M', u'カロリー'), (0x330E, 'M', u'ガロン'), (0x330F, 'M', u'ガンマ'), (0x3310, 'M', u'ギガ'), (0x3311, 'M', u'ギニー'), (0x3312, 'M', u'キュリー'), (0x3313, 'M', u'ギルダー'), (0x3314, 'M', u'キロ'), (0x3315, 'M', u'キログラム'), (0x3316, 'M', u'キロメートル'), (0x3317, 'M', u'キロワット'), (0x3318, 'M', u'グラム'), (0x3319, 'M', u'グラムトン'), (0x331A, 'M', u'クルゼイロ'), (0x331B, 'M', u'クローネ'), (0x331C, 'M', u'ケース'), (0x331D, 'M', u'コルナ'), (0x331E, 'M', u'コーポ'), (0x331F, 'M', u'サイクル'), (0x3320, 'M', u'サンチーム'), (0x3321, 'M', u'シリング'), (0x3322, 'M', u'センチ'), (0x3323, 'M', u'セント'), (0x3324, 'M', u'ダース'), (0x3325, 'M', u'デシ'), (0x3326, 'M', u'ドル'), (0x3327, 'M', u'トン'), (0x3328, 'M', u'ナノ'), (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), (0x332F, 'M', u'ピクル'), (0x3330, 'M', u'ピコ'), (0x3331, 'M', u'ビル'), (0x3332, 'M', u'ファラッド'), (0x3333, 'M', u'フィート'), (0x3334, 'M', u'ブッシェル'), (0x3335, 'M', u'フラン'), (0x3336, 'M', u'ヘクタール'), (0x3337, 'M', u'ペソ'), ] def _seg_33(): return [ (0x3338, 'M', u'ペニヒ'), (0x3339, 'M', u'ヘルツ'), (0x333A, 'M', u'ペンス'), (0x333B, 'M', u'ページ'), (0x333C, 'M', u'ベータ'), (0x333D, 'M', u'ポイント'), (0x333E, 'M', u'ボルト'), (0x333F, 'M', u'ホン'), (0x3340, 'M', u'ポンド'), (0x3341, 'M', u'ホール'), (0x3342, 'M', u'ホーン'), (0x3343, 'M', u'マイクロ'), (0x3344, 'M', u'マイル'), (0x3345, 'M', u'マッハ'), (0x3346, 'M', u'マルク'), (0x3347, 'M', u'マンション'), (0x3348, 'M', u'ミクロン'), (0x3349, 'M', u'ミリ'), (0x334A, 'M', u'ミリバール'), (0x334B, 'M', u'メガ'), (0x334C, 'M', u'メガトン'), (0x334D, 'M', u'メートル'), (0x334E, 'M', u'ヤード'), (0x334F, 'M', u'ヤール'), (0x3350, 'M', u'ユアン'), (0x3351, 'M', u'リットル'), (0x3352, 'M', u'リラ'), (0x3353, 'M', u'ルピー'), (0x3354, 'M', u'ルーブル'), (0x3355, 'M', u'レム'), (0x3356, 'M', u'レントゲン'), (0x3357, 'M', u'ワット'), (0x3358, 'M', u'0点'), (0x3359, 'M', u'1点'), (0x335A, 'M', u'2点'), (0x335B, 'M', u'3点'), (0x335C, 'M', u'4点'), (0x335D, 'M', u'5点'), (0x335E, 'M', u'6点'), (0x335F, 'M', u'7点'), (0x3360, 'M', u'8点'), (0x3361, 'M', u'9点'), (0x3362, 'M', u'10点'), (0x3363, 'M', u'11点'), (0x3364, 'M', u'12点'), (0x3365, 'M', u'13点'), (0x3366, 'M', u'14点'), (0x3367, 'M', u'15点'), (0x3368, 'M', u'16点'), (0x3369, 'M', u'17点'), (0x336A, 'M', u'18点'), (0x336B, 'M', u'19点'), (0x336C, 'M', u'20点'), (0x336D, 'M', u'21点'), (0x336E, 'M', u'22点'), (0x336F, 'M', u'23点'), (0x3370, 'M', u'24点'), (0x3371, 'M', u'hpa'), (0x3372, 'M', u'da'), (0x3373, 'M', u'au'), (0x3374, 'M', u'bar'), (0x3375, 'M', u'ov'), (0x3376, 'M', u'pc'), (0x3377, 'M', u'dm'), (0x3378, 'M', u'dm2'), (0x3379, 'M', u'dm3'), (0x337A, 'M', u'iu'), (0x337B, 'M', u'平成'), (0x337C, 'M', u'昭和'), (0x337D, 'M', u'大正'), (0x337E, 'M', u'明治'), (0x337F, 'M', u'株式会社'), (0x3380, 'M', u'pa'), (0x3381, 'M', u'na'), (0x3382, 'M', u'μa'), (0x3383, 'M', u'ma'), (0x3384, 'M', u'ka'), (0x3385, 'M', u'kb'), (0x3386, 'M', u'mb'), (0x3387, 'M', u'gb'), (0x3388, 'M', u'cal'), (0x3389, 'M', u'kcal'), (0x338A, 'M', u'pf'), (0x338B, 'M', u'nf'), (0x338C, 'M', u'μf'), (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), (0x3393, 'M', u'ghz'), (0x3394, 'M', u'thz'), (0x3395, 'M', u'μl'), (0x3396, 'M', u'ml'), (0x3397, 'M', u'dl'), (0x3398, 'M', u'kl'), (0x3399, 'M', u'fm'), (0x339A, 'M', u'nm'), (0x339B, 'M', u'μm'), ] def _seg_34(): return [ (0x339C, 'M', u'mm'), (0x339D, 'M', u'cm'), (0x339E, 'M', u'km'), (0x339F, 'M', u'mm2'), (0x33A0, 'M', u'cm2'), (0x33A1, 'M', u'm2'), (0x33A2, 'M', u'km2'), (0x33A3, 'M', u'mm3'), (0x33A4, 'M', u'cm3'), (0x33A5, 'M', u'm3'), (0x33A6, 'M', u'km3'), (0x33A7, 'M', u'm∕s'), (0x33A8, 'M', u'm∕s2'), (0x33A9, 'M', u'pa'), (0x33AA, 'M', u'kpa'), (0x33AB, 'M', u'mpa'), (0x33AC, 'M', u'gpa'), (0x33AD, 'M', u'rad'), (0x33AE, 'M', u'rad∕s'), (0x33AF, 'M', u'rad∕s2'), (0x33B0, 'M', u'ps'), (0x33B1, 'M', u'ns'), (0x33B2, 'M', u'μs'), (0x33B3, 'M', u'ms'), (0x33B4, 'M', u'pv'), (0x33B5, 'M', u'nv'), (0x33B6, 'M', u'μv'), (0x33B7, 'M', u'mv'), (0x33B8, 'M', u'kv'), (0x33B9, 'M', u'mv'), (0x33BA, 'M', u'pw'), (0x33BB, 'M', u'nw'), (0x33BC, 'M', u'μw'), (0x33BD, 'M', u'mw'), (0x33BE, 'M', u'kw'), (0x33BF, 'M', u'mw'), (0x33C0, 'M', u'kω'), (0x33C1, 'M', u'mω'), (0x33C2, 'X'), (0x33C3, 'M', u'bq'), (0x33C4, 'M', u'cc'), (0x33C5, 'M', u'cd'), (0x33C6, 'M', u'c∕kg'), (0x33C7, 'X'), (0x33C8, 'M', u'db'), (0x33C9, 'M', u'gy'), (0x33CA, 'M', u'ha'), (0x33CB, 'M', u'hp'), (0x33CC, 'M', u'in'), (0x33CD, 'M', u'kk'), (0x33CE, 'M', u'km'), (0x33CF, 'M', u'kt'), (0x33D0, 'M', u'lm'), (0x33D1, 'M', u'ln'), (0x33D2, 'M', u'log'), (0x33D3, 'M', u'lx'), (0x33D4, 'M', u'mb'), (0x33D5, 'M', u'mil'), (0x33D6, 'M', u'mol'), (0x33D7, 'M', u'ph'), (0x33D8, 'X'), (0x33D9, 'M', u'ppm'), (0x33DA, 'M', u'pr'), (0x33DB, 'M', u'sr'), (0x33DC, 'M', u'sv'), (0x33DD, 'M', u'wb'), (0x33DE, 'M', u'v∕m'), (0x33DF, 'M', u'a∕m'), (0x33E0, 'M', u'1日'), (0x33E1, 'M', u'2日'), (0x33E2, 'M', u'3日'), (0x33E3, 'M', u'4日'), (0x33E4, 'M', u'5日'), (0x33E5, 'M', u'6日'), (0x33E6, 'M', u'7日'), (0x33E7, 'M', u'8日'), (0x33E8, 'M', u'9日'), (0x33E9, 'M', u'10日'), (0x33EA, 'M', u'11日'), (0x33EB, 'M', u'12日'), (0x33EC, 'M', u'13日'), (0x33ED, 'M', u'14日'), (0x33EE, 'M', u'15日'), (0x33EF, 'M', u'16日'), (0x33F0, 'M', u'17日'), (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), (0x33F7, 'M', u'24日'), (0x33F8, 'M', u'25日'), (0x33F9, 'M', u'26日'), (0x33FA, 'M', u'27日'), (0x33FB, 'M', u'28日'), (0x33FC, 'M', u'29日'), (0x33FD, 'M', u'30日'), (0x33FE, 'M', u'31日'), (0x33FF, 'M', u'gal'), ] def _seg_35(): return [ (0x3400, 'V'), (0x4DB6, 'X'), (0x4DC0, 'V'), (0x9FCD, 'X'), (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), (0xA4D0, 'V'), (0xA62C, 'X'), (0xA640, 'M', u'ꙁ'), (0xA641, 'V'), (0xA642, 'M', u'ꙃ'), (0xA643, 'V'), (0xA644, 'M', u'ꙅ'), (0xA645, 'V'), (0xA646, 'M', u'ꙇ'), (0xA647, 'V'), (0xA648, 'M', u'ꙉ'), (0xA649, 'V'), (0xA64A, 'M', u'ꙋ'), (0xA64B, 'V'), (0xA64C, 'M', u'ꙍ'), (0xA64D, 'V'), (0xA64E, 'M', u'ꙏ'), (0xA64F, 'V'), (0xA650, 'M', u'ꙑ'), (0xA651, 'V'), (0xA652, 'M', u'ꙓ'), (0xA653, 'V'), (0xA654, 'M', u'ꙕ'), (0xA655, 'V'), (0xA656, 'M', u'ꙗ'), (0xA657, 'V'), (0xA658, 'M', u'ꙙ'), (0xA659, 'V'), (0xA65A, 'M', u'ꙛ'), (0xA65B, 'V'), (0xA65C, 'M', u'ꙝ'), (0xA65D, 'V'), (0xA65E, 'M', u'ꙟ'), (0xA65F, 'V'), (0xA660, 'M', u'ꙡ'), (0xA661, 'V'), (0xA662, 'M', u'ꙣ'), (0xA663, 'V'), (0xA664, 'M', u'ꙥ'), (0xA665, 'V'), (0xA666, 'M', u'ꙧ'), (0xA667, 'V'), (0xA668, 'M', u'ꙩ'), (0xA669, 'V'), (0xA66A, 'M', u'ꙫ'), (0xA66B, 'V'), (0xA66C, 'M', u'ꙭ'), (0xA66D, 'V'), (0xA680, 'M', u'ꚁ'), (0xA681, 'V'), (0xA682, 'M', u'ꚃ'), (0xA683, 'V'), (0xA684, 'M', u'ꚅ'), (0xA685, 'V'), (0xA686, 'M', u'ꚇ'), (0xA687, 'V'), (0xA688, 'M', u'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', u'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', u'ꚍ'), (0xA68D, 'V'), (0xA68E, 'M', u'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', u'ꚑ'), (0xA691, 'V'), (0xA692, 'M', u'ꚓ'), (0xA693, 'V'), (0xA694, 'M', u'ꚕ'), (0xA695, 'V'), (0xA696, 'M', u'ꚗ'), (0xA697, 'V'), (0xA698, 'X'), (0xA69F, 'V'), (0xA6F8, 'X'), (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), (0xA724, 'M', u'ꜥ'), (0xA725, 'V'), (0xA726, 'M', u'ꜧ'), (0xA727, 'V'), (0xA728, 'M', u'ꜩ'), (0xA729, 'V'), (0xA72A, 'M', u'ꜫ'), (0xA72B, 'V'), (0xA72C, 'M', u'ꜭ'), (0xA72D, 'V'), (0xA72E, 'M', u'ꜯ'), (0xA72F, 'V'), (0xA732, 'M', u'ꜳ'), (0xA733, 'V'), ] def _seg_36(): return [ (0xA734, 'M', u'ꜵ'), (0xA735, 'V'), (0xA736, 'M', u'ꜷ'), (0xA737, 'V'), (0xA738, 'M', u'ꜹ'), (0xA739, 'V'), (0xA73A, 'M', u'ꜻ'), (0xA73B, 'V'), (0xA73C, 'M', u'ꜽ'), (0xA73D, 'V'), (0xA73E, 'M', u'ꜿ'), (0xA73F, 'V'), (0xA740, 'M', u'ꝁ'), (0xA741, 'V'), (0xA742, 'M', u'ꝃ'), (0xA743, 'V'), (0xA744, 'M', u'ꝅ'), (0xA745, 'V'), (0xA746, 'M', u'ꝇ'), (0xA747, 'V'), (0xA748, 'M', u'ꝉ'), (0xA749, 'V'), (0xA74A, 'M', u'ꝋ'), (0xA74B, 'V'), (0xA74C, 'M', u'ꝍ'), (0xA74D, 'V'), (0xA74E, 'M', u'ꝏ'), (0xA74F, 'V'), (0xA750, 'M', u'ꝑ'), (0xA751, 'V'), (0xA752, 'M', u'ꝓ'), (0xA753, 'V'), (0xA754, 'M', u'ꝕ'), (0xA755, 'V'), (0xA756, 'M', u'ꝗ'), (0xA757, 'V'), (0xA758, 'M', u'ꝙ'), (0xA759, 'V'), (0xA75A, 'M', u'ꝛ'), (0xA75B, 'V'), (0xA75C, 'M', u'ꝝ'), (0xA75D, 'V'), (0xA75E, 'M', u'ꝟ'), (0xA75F, 'V'), (0xA760, 'M', u'ꝡ'), (0xA761, 'V'), (0xA762, 'M', u'ꝣ'), (0xA763, 'V'), (0xA764, 'M', u'ꝥ'), (0xA765, 'V'), (0xA766, 'M', u'ꝧ'), (0xA767, 'V'), (0xA768, 'M', u'ꝩ'), (0xA769, 'V'), (0xA76A, 'M', u'ꝫ'), (0xA76B, 'V'), (0xA76C, 'M', u'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', u'ꝯ'), (0xA76F, 'V'), (0xA770, 'M', u'ꝯ'), (0xA771, 'V'), (0xA779, 'M', u'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', u'ꝼ'), (0xA77C, 'V'), (0xA77D, 'M', u'ᵹ'), (0xA77E, 'M', u'ꝿ'), (0xA77F, 'V'), (0xA780, 'M', u'ꞁ'), (0xA781, 'V'), (0xA782, 'M', u'ꞃ'), (0xA783, 'V'), (0xA784, 'M', u'ꞅ'), (0xA785, 'V'), (0xA786, 'M', u'ꞇ'), (0xA787, 'V'), (0xA78B, 'M', u'ꞌ'), (0xA78C, 'V'), (0xA78D, 'M', u'ɥ'), (0xA78E, 'V'), (0xA78F, 'X'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA794, 'X'), (0xA7A0, 'M', u'ꞡ'), (0xA7A1, 'V'), (0xA7A2, 'M', u'ꞣ'), (0xA7A3, 'V'), (0xA7A4, 'M', u'ꞥ'), (0xA7A5, 'V'), (0xA7A6, 'M', u'ꞧ'), (0xA7A7, 'V'), (0xA7A8, 'M', u'ꞩ'), (0xA7A9, 'V'), (0xA7AA, 'M', u'ɦ'), (0xA7AB, 'X'), (0xA7F8, 'M', u'ħ'), ] def _seg_37(): return [ (0xA7F9, 'M', u'œ'), (0xA7FA, 'V'), (0xA82C, 'X'), (0xA830, 'V'), (0xA83A, 'X'), (0xA840, 'V'), (0xA878, 'X'), (0xA880, 'V'), (0xA8C5, 'X'), (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), (0xA8FC, 'X'), (0xA900, 'V'), (0xA954, 'X'), (0xA95F, 'V'), (0xA97D, 'X'), (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9E0, 'X'), (0xAA00, 'V'), (0xAA37, 'X'), (0xAA40, 'V'), (0xAA4E, 'X'), (0xAA50, 'V'), (0xAA5A, 'X'), (0xAA5C, 'V'), (0xAA7C, 'X'), (0xAA80, 'V'), (0xAAC3, 'X'), (0xAADB, 'V'), (0xAAF7, 'X'), (0xAB01, 'V'), (0xAB07, 'X'), (0xAB09, 'V'), (0xAB0F, 'X'), (0xAB11, 'V'), (0xAB17, 'X'), (0xAB20, 'V'), (0xAB27, 'X'), (0xAB28, 'V'), (0xAB2F, 'X'), (0xABC0, 'V'), (0xABEE, 'X'), (0xABF0, 'V'), (0xABFA, 'X'), (0xAC00, 'V'), (0xD7A4, 'X'), (0xD7B0, 'V'), (0xD7C7, 'X'), (0xD7CB, 'V'), (0xD7FC, 'X'), (0xF900, 'M', u'豈'), (0xF901, 'M', u'更'), (0xF902, 'M', u'車'), (0xF903, 'M', u'賈'), (0xF904, 'M', u'滑'), (0xF905, 'M', u'串'), (0xF906, 'M', u'句'), (0xF907, 'M', u'龜'), (0xF909, 'M', u'契'), (0xF90A, 'M', u'金'), (0xF90B, 'M', u'喇'), (0xF90C, 'M', u'奈'), (0xF90D, 'M', u'懶'), (0xF90E, 'M', u'癩'), (0xF90F, 'M', u'羅'), (0xF910, 'M', u'蘿'), (0xF911, 'M', u'螺'), (0xF912, 'M', u'裸'), (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), (0xF919, 'M', u'酪'), (0xF91A, 'M', u'駱'), (0xF91B, 'M', u'亂'), (0xF91C, 'M', u'卵'), (0xF91D, 'M', u'欄'), (0xF91E, 'M', u'爛'), (0xF91F, 'M', u'蘭'), (0xF920, 'M', u'鸞'), (0xF921, 'M', u'嵐'), (0xF922, 'M', u'濫'), (0xF923, 'M', u'藍'), (0xF924, 'M', u'襤'), (0xF925, 'M', u'拉'), (0xF926, 'M', u'臘'), (0xF927, 'M', u'蠟'), (0xF928, 'M', u'廊'), (0xF929, 'M', u'朗'), (0xF92A, 'M', u'浪'), (0xF92B, 'M', u'狼'), (0xF92C, 'M', u'郎'), (0xF92D, 'M', u'來'), ] def _seg_38(): return [ (0xF92E, 'M', u'冷'), (0xF92F, 'M', u'勞'), (0xF930, 'M', u'擄'), (0xF931, 'M', u'櫓'), (0xF932, 'M', u'爐'), (0xF933, 'M', u'盧'), (0xF934, 'M', u'老'), (0xF935, 'M', u'蘆'), (0xF936, 'M', u'虜'), (0xF937, 'M', u'路'), (0xF938, 'M', u'露'), (0xF939, 'M', u'魯'), (0xF93A, 'M', u'鷺'), (0xF93B, 'M', u'碌'), (0xF93C, 'M', u'祿'), (0xF93D, 'M', u'綠'), (0xF93E, 'M', u'菉'), (0xF93F, 'M', u'錄'), (0xF940, 'M', u'鹿'), (0xF941, 'M', u'論'), (0xF942, 'M', u'壟'), (0xF943, 'M', u'弄'), (0xF944, 'M', u'籠'), (0xF945, 'M', u'聾'), (0xF946, 'M', u'牢'), (0xF947, 'M', u'磊'), (0xF948, 'M', u'賂'), (0xF949, 'M', u'雷'), (0xF94A, 'M', u'壘'), (0xF94B, 'M', u'屢'), (0xF94C, 'M', u'樓'), (0xF94D, 'M', u'淚'), (0xF94E, 'M', u'漏'), (0xF94F, 'M', u'累'), (0xF950, 'M', u'縷'), (0xF951, 'M', u'陋'), (0xF952, 'M', u'勒'), (0xF953, 'M', u'肋'), (0xF954, 'M', u'凜'), (0xF955, 'M', u'凌'), (0xF956, 'M', u'稜'), (0xF957, 'M', u'綾'), (0xF958, 'M', u'菱'), (0xF959, 'M', u'陵'), (0xF95A, 'M', u'讀'), (0xF95B, 'M', u'拏'), (0xF95C, 'M', u'樂'), (0xF95D, 'M', u'諾'), (0xF95E, 'M', u'丹'), (0xF95F, 'M', u'寧'), (0xF960, 'M', u'怒'), (0xF961, 'M', u'率'), (0xF962, 'M', u'異'), (0xF963, 'M', u'北'), (0xF964, 'M', u'磻'), (0xF965, 'M', u'便'), (0xF966, 'M', u'復'), (0xF967, 'M', u'不'), (0xF968, 'M', u'泌'), (0xF969, 'M', u'數'), (0xF96A, 'M', u'索'), (0xF96B, 'M', u'參'), (0xF96C, 'M', u'塞'), (0xF96D, 'M', u'省'), (0xF96E, 'M', u'葉'), (0xF96F, 'M', u'說'), (0xF970, 'M', u'殺'), (0xF971, 'M', u'辰'), (0xF972, 'M', u'沈'), (0xF973, 'M', u'拾'), (0xF974, 'M', u'若'), (0xF975, 'M', u'掠'), (0xF976, 'M', u'略'), (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), (0xF97D, 'M', u'諒'), (0xF97E, 'M', u'量'), (0xF97F, 'M', u'勵'), (0xF980, 'M', u'呂'), (0xF981, 'M', u'女'), (0xF982, 'M', u'廬'), (0xF983, 'M', u'旅'), (0xF984, 'M', u'濾'), (0xF985, 'M', u'礪'), (0xF986, 'M', u'閭'), (0xF987, 'M', u'驪'), (0xF988, 'M', u'麗'), (0xF989, 'M', u'黎'), (0xF98A, 'M', u'力'), (0xF98B, 'M', u'曆'), (0xF98C, 'M', u'歷'), (0xF98D, 'M', u'轢'), (0xF98E, 'M', u'年'), (0xF98F, 'M', u'憐'), (0xF990, 'M', u'戀'), (0xF991, 'M', u'撚'), ] def _seg_39(): return [ (0xF992, 'M', u'漣'), (0xF993, 'M', u'煉'), (0xF994, 'M', u'璉'), (0xF995, 'M', u'秊'), (0xF996, 'M', u'練'), (0xF997, 'M', u'聯'), (0xF998, 'M', u'輦'), (0xF999, 'M', u'蓮'), (0xF99A, 'M', u'連'), (0xF99B, 'M', u'鍊'), (0xF99C, 'M', u'列'), (0xF99D, 'M', u'劣'), (0xF99E, 'M', u'咽'), (0xF99F, 'M', u'烈'), (0xF9A0, 'M', u'裂'), (0xF9A1, 'M', u'說'), (0xF9A2, 'M', u'廉'), (0xF9A3, 'M', u'念'), (0xF9A4, 'M', u'捻'), (0xF9A5, 'M', u'殮'), (0xF9A6, 'M', u'簾'), (0xF9A7, 'M', u'獵'), (0xF9A8, 'M', u'令'), (0xF9A9, 'M', u'囹'), (0xF9AA, 'M', u'寧'), (0xF9AB, 'M', u'嶺'), (0xF9AC, 'M', u'怜'), (0xF9AD, 'M', u'玲'), (0xF9AE, 'M', u'瑩'), (0xF9AF, 'M', u'羚'), (0xF9B0, 'M', u'聆'), (0xF9B1, 'M', u'鈴'), (0xF9B2, 'M', u'零'), (0xF9B3, 'M', u'靈'), (0xF9B4, 'M', u'領'), (0xF9B5, 'M', u'例'), (0xF9B6, 'M', u'禮'), (0xF9B7, 'M', u'醴'), (0xF9B8, 'M', u'隸'), (0xF9B9, 'M', u'惡'), (0xF9BA, 'M', u'了'), (0xF9BB, 'M', u'僚'), (0xF9BC, 'M', u'寮'), (0xF9BD, 'M', u'尿'), (0xF9BE, 'M', u'料'), (0xF9BF, 'M', u'樂'), (0xF9C0, 'M', u'燎'), (0xF9C1, 'M', u'療'), (0xF9C2, 'M', u'蓼'), (0xF9C3, 'M', u'遼'), (0xF9C4, 'M', u'龍'), (0xF9C5, 'M', u'暈'), (0xF9C6, 'M', u'阮'), (0xF9C7, 'M', u'劉'), (0xF9C8, 'M', u'杻'), (0xF9C9, 'M', u'柳'), (0xF9CA, 'M', u'流'), (0xF9CB, 'M', u'溜'), (0xF9CC, 'M', u'琉'), (0xF9CD, 'M', u'留'), (0xF9CE, 'M', u'硫'), (0xF9CF, 'M', u'紐'), (0xF9D0, 'M', u'類'), (0xF9D1, 'M', u'六'), (0xF9D2, 'M', u'戮'), (0xF9D3, 'M', u'陸'), (0xF9D4, 'M', u'倫'), (0xF9D5, 'M', u'崙'), (0xF9D6, 'M', u'淪'), (0xF9D7, 'M', u'輪'), (0xF9D8, 'M', u'律'), (0xF9D9, 'M', u'慄'), (0xF9DA, 'M', u'栗'), (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), (0xF9E1, 'M', u'李'), (0xF9E2, 'M', u'梨'), (0xF9E3, 'M', u'泥'), (0xF9E4, 'M', u'理'), (0xF9E5, 'M', u'痢'), (0xF9E6, 'M', u'罹'), (0xF9E7, 'M', u'裏'), (0xF9E8, 'M', u'裡'), (0xF9E9, 'M', u'里'), (0xF9EA, 'M', u'離'), (0xF9EB, 'M', u'匿'), (0xF9EC, 'M', u'溺'), (0xF9ED, 'M', u'吝'), (0xF9EE, 'M', u'燐'), (0xF9EF, 'M', u'璘'), (0xF9F0, 'M', u'藺'), (0xF9F1, 'M', u'隣'), (0xF9F2, 'M', u'鱗'), (0xF9F3, 'M', u'麟'), (0xF9F4, 'M', u'林'), (0xF9F5, 'M', u'淋'), ] def _seg_40(): return [ (0xF9F6, 'M', u'臨'), (0xF9F7, 'M', u'立'), (0xF9F8, 'M', u'笠'), (0xF9F9, 'M', u'粒'), (0xF9FA, 'M', u'狀'), (0xF9FB, 'M', u'炙'), (0xF9FC, 'M', u'識'), (0xF9FD, 'M', u'什'), (0xF9FE, 'M', u'茶'), (0xF9FF, 'M', u'刺'), (0xFA00, 'M', u'切'), (0xFA01, 'M', u'度'), (0xFA02, 'M', u'拓'), (0xFA03, 'M', u'糖'), (0xFA04, 'M', u'宅'), (0xFA05, 'M', u'洞'), (0xFA06, 'M', u'暴'), (0xFA07, 'M', u'輻'), (0xFA08, 'M', u'行'), (0xFA09, 'M', u'降'), (0xFA0A, 'M', u'見'), (0xFA0B, 'M', u'廓'), (0xFA0C, 'M', u'兀'), (0xFA0D, 'M', u'嗀'), (0xFA0E, 'V'), (0xFA10, 'M', u'塚'), (0xFA11, 'V'), (0xFA12, 'M', u'晴'), (0xFA13, 'V'), (0xFA15, 'M', u'凞'), (0xFA16, 'M', u'猪'), (0xFA17, 'M', u'益'), (0xFA18, 'M', u'礼'), (0xFA19, 'M', u'神'), (0xFA1A, 'M', u'祥'), (0xFA1B, 'M', u'福'), (0xFA1C, 'M', u'靖'), (0xFA1D, 'M', u'精'), (0xFA1E, 'M', u'羽'), (0xFA1F, 'V'), (0xFA20, 'M', u'蘒'), (0xFA21, 'V'), (0xFA22, 'M', u'諸'), (0xFA23, 'V'), (0xFA25, 'M', u'逸'), (0xFA26, 'M', u'都'), (0xFA27, 'V'), (0xFA2A, 'M', u'飯'), (0xFA2B, 'M', u'飼'), (0xFA2C, 'M', u'館'), (0xFA2D, 'M', u'鶴'), (0xFA2E, 'M', u'郞'), (0xFA2F, 'M', u'隷'), (0xFA30, 'M', u'侮'), (0xFA31, 'M', u'僧'), (0xFA32, 'M', u'免'), (0xFA33, 'M', u'勉'), (0xFA34, 'M', u'勤'), (0xFA35, 'M', u'卑'), (0xFA36, 'M', u'喝'), (0xFA37, 'M', u'嘆'), (0xFA38, 'M', u'器'), (0xFA39, 'M', u'塀'), (0xFA3A, 'M', u'墨'), (0xFA3B, 'M', u'層'), (0xFA3C, 'M', u'屮'), (0xFA3D, 'M', u'悔'), (0xFA3E, 'M', u'慨'), (0xFA3F, 'M', u'憎'), (0xFA40, 'M', u'懲'), (0xFA41, 'M', u'敏'), (0xFA42, 'M', u'既'), (0xFA43, 'M', u'暑'), (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), (0xFA4A, 'M', u'琢'), (0xFA4B, 'M', u'碑'), (0xFA4C, 'M', u'社'), (0xFA4D, 'M', u'祉'), (0xFA4E, 'M', u'祈'), (0xFA4F, 'M', u'祐'), (0xFA50, 'M', u'祖'), (0xFA51, 'M', u'祝'), (0xFA52, 'M', u'禍'), (0xFA53, 'M', u'禎'), (0xFA54, 'M', u'穀'), (0xFA55, 'M', u'突'), (0xFA56, 'M', u'節'), (0xFA57, 'M', u'練'), (0xFA58, 'M', u'縉'), (0xFA59, 'M', u'繁'), (0xFA5A, 'M', u'署'), (0xFA5B, 'M', u'者'), (0xFA5C, 'M', u'臭'), (0xFA5D, 'M', u'艹'), (0xFA5F, 'M', u'著'), ] def _seg_41(): return [ (0xFA60, 'M', u'褐'), (0xFA61, 'M', u'視'), (0xFA62, 'M', u'謁'), (0xFA63, 'M', u'謹'), (0xFA64, 'M', u'賓'), (0xFA65, 'M', u'贈'), (0xFA66, 'M', u'辶'), (0xFA67, 'M', u'逸'), (0xFA68, 'M', u'難'), (0xFA69, 'M', u'響'), (0xFA6A, 'M', u'頻'), (0xFA6B, 'M', u'恵'), (0xFA6C, 'M', u'𤋮'), (0xFA6D, 'M', u'舘'), (0xFA6E, 'X'), (0xFA70, 'M', u'並'), (0xFA71, 'M', u'况'), (0xFA72, 'M', u'全'), (0xFA73, 'M', u'侀'), (0xFA74, 'M', u'充'), (0xFA75, 'M', u'冀'), (0xFA76, 'M', u'勇'), (0xFA77, 'M', u'勺'), (0xFA78, 'M', u'喝'), (0xFA79, 'M', u'啕'), (0xFA7A, 'M', u'喙'), (0xFA7B, 'M', u'嗢'), (0xFA7C, 'M', u'塚'), (0xFA7D, 'M', u'墳'), (0xFA7E, 'M', u'奄'), (0xFA7F, 'M', u'奔'), (0xFA80, 'M', u'婢'), (0xFA81, 'M', u'嬨'), (0xFA82, 'M', u'廒'), (0xFA83, 'M', u'廙'), (0xFA84, 'M', u'彩'), (0xFA85, 'M', u'徭'), (0xFA86, 'M', u'惘'), (0xFA87, 'M', u'慎'), (0xFA88, 'M', u'愈'), (0xFA89, 'M', u'憎'), (0xFA8A, 'M', u'慠'), (0xFA8B, 'M', u'懲'), (0xFA8C, 'M', u'戴'), (0xFA8D, 'M', u'揄'), (0xFA8E, 'M', u'搜'), (0xFA8F, 'M', u'摒'), (0xFA90, 'M', u'敖'), (0xFA91, 'M', u'晴'), (0xFA92, 'M', u'朗'), (0xFA93, 'M', u'望'), (0xFA94, 'M', u'杖'), (0xFA95, 'M', u'歹'), (0xFA96, 'M', u'殺'), (0xFA97, 'M', u'流'), (0xFA98, 'M', u'滛'), (0xFA99, 'M', u'滋'), (0xFA9A, 'M', u'漢'), (0xFA9B, 'M', u'瀞'), (0xFA9C, 'M', u'煮'), (0xFA9D, 'M', u'瞧'), (0xFA9E, 'M', u'爵'), (0xFA9F, 'M', u'犯'), (0xFAA0, 'M', u'猪'), (0xFAA1, 'M', u'瑱'), (0xFAA2, 'M', u'甆'), (0xFAA3, 'M', u'画'), (0xFAA4, 'M', u'瘝'), (0xFAA5, 'M', u'瘟'), (0xFAA6, 'M', u'益'), (0xFAA7, 'M', u'盛'), (0xFAA8, 'M', u'直'), (0xFAA9, 'M', u'睊'), (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), (0xFAB0, 'M', u'練'), (0xFAB1, 'M', u'缾'), (0xFAB2, 'M', u'者'), (0xFAB3, 'M', u'荒'), (0xFAB4, 'M', u'華'), (0xFAB5, 'M', u'蝹'), (0xFAB6, 'M', u'襁'), (0xFAB7, 'M', u'覆'), (0xFAB8, 'M', u'視'), (0xFAB9, 'M', u'調'), (0xFABA, 'M', u'諸'), (0xFABB, 'M', u'請'), (0xFABC, 'M', u'謁'), (0xFABD, 'M', u'諾'), (0xFABE, 'M', u'諭'), (0xFABF, 'M', u'謹'), (0xFAC0, 'M', u'變'), (0xFAC1, 'M', u'贈'), (0xFAC2, 'M', u'輸'), (0xFAC3, 'M', u'遲'), (0xFAC4, 'M', u'醙'), ] def _seg_42(): return [ (0xFAC5, 'M', u'鉶'), (0xFAC6, 'M', u'陼'), (0xFAC7, 'M', u'難'), (0xFAC8, 'M', u'靖'), (0xFAC9, 'M', u'韛'), (0xFACA, 'M', u'響'), (0xFACB, 'M', u'頋'), (0xFACC, 'M', u'頻'), (0xFACD, 'M', u'鬒'), (0xFACE, 'M', u'龜'), (0xFACF, 'M', u'𢡊'), (0xFAD0, 'M', u'𢡄'), (0xFAD1, 'M', u'𣏕'), (0xFAD2, 'M', u'㮝'), (0xFAD3, 'M', u'䀘'), (0xFAD4, 'M', u'䀹'), (0xFAD5, 'M', u'𥉉'), (0xFAD6, 'M', u'𥳐'), (0xFAD7, 'M', u'𧻓'), (0xFAD8, 'M', u'齃'), (0xFAD9, 'M', u'龎'), (0xFADA, 'X'), (0xFB00, 'M', u'ff'), (0xFB01, 'M', u'fi'), (0xFB02, 'M', u'fl'), (0xFB03, 'M', u'ffi'), (0xFB04, 'M', u'ffl'), (0xFB05, 'M', u'st'), (0xFB07, 'X'), (0xFB13, 'M', u'մն'), (0xFB14, 'M', u'մե'), (0xFB15, 'M', u'մի'), (0xFB16, 'M', u'վն'), (0xFB17, 'M', u'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', u'יִ'), (0xFB1E, 'V'), (0xFB1F, 'M', u'ײַ'), (0xFB20, 'M', u'ע'), (0xFB21, 'M', u'א'), (0xFB22, 'M', u'ד'), (0xFB23, 'M', u'ה'), (0xFB24, 'M', u'כ'), (0xFB25, 'M', u'ל'), (0xFB26, 'M', u'ם'), (0xFB27, 'M', u'ר'), (0xFB28, 'M', u'ת'), (0xFB29, '3', u'+'), (0xFB2A, 'M', u'שׁ'), (0xFB2B, 'M', u'שׂ'), (0xFB2C, 'M', u'שּׁ'), (0xFB2D, 'M', u'שּׂ'), (0xFB2E, 'M', u'אַ'), (0xFB2F, 'M', u'אָ'), (0xFB30, 'M', u'אּ'), (0xFB31, 'M', u'בּ'), (0xFB32, 'M', u'גּ'), (0xFB33, 'M', u'דּ'), (0xFB34, 'M', u'הּ'), (0xFB35, 'M', u'וּ'), (0xFB36, 'M', u'זּ'), (0xFB37, 'X'), (0xFB38, 'M', u'טּ'), (0xFB39, 'M', u'יּ'), (0xFB3A, 'M', u'ךּ'), (0xFB3B, 'M', u'כּ'), (0xFB3C, 'M', u'לּ'), (0xFB3D, 'X'), (0xFB3E, 'M', u'מּ'), (0xFB3F, 'X'), (0xFB40, 'M', u'נּ'), (0xFB41, 'M', u'סּ'), (0xFB42, 'X'), (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), (0xFB49, 'M', u'שּ'), (0xFB4A, 'M', u'תּ'), (0xFB4B, 'M', u'וֹ'), (0xFB4C, 'M', u'בֿ'), (0xFB4D, 'M', u'כֿ'), (0xFB4E, 'M', u'פֿ'), (0xFB4F, 'M', u'אל'), (0xFB50, 'M', u'ٱ'), (0xFB52, 'M', u'ٻ'), (0xFB56, 'M', u'پ'), (0xFB5A, 'M', u'ڀ'), (0xFB5E, 'M', u'ٺ'), (0xFB62, 'M', u'ٿ'), (0xFB66, 'M', u'ٹ'), (0xFB6A, 'M', u'ڤ'), (0xFB6E, 'M', u'ڦ'), (0xFB72, 'M', u'ڄ'), (0xFB76, 'M', u'ڃ'), (0xFB7A, 'M', u'چ'), (0xFB7E, 'M', u'ڇ'), (0xFB82, 'M', u'ڍ'), ] def _seg_43(): return [ (0xFB84, 'M', u'ڌ'), (0xFB86, 'M', u'ڎ'), (0xFB88, 'M', u'ڈ'), (0xFB8A, 'M', u'ژ'), (0xFB8C, 'M', u'ڑ'), (0xFB8E, 'M', u'ک'), (0xFB92, 'M', u'گ'), (0xFB96, 'M', u'ڳ'), (0xFB9A, 'M', u'ڱ'), (0xFB9E, 'M', u'ں'), (0xFBA0, 'M', u'ڻ'), (0xFBA4, 'M', u'ۀ'), (0xFBA6, 'M', u'ہ'), (0xFBAA, 'M', u'ھ'), (0xFBAE, 'M', u'ے'), (0xFBB0, 'M', u'ۓ'), (0xFBB2, 'V'), (0xFBC2, 'X'), (0xFBD3, 'M', u'ڭ'), (0xFBD7, 'M', u'ۇ'), (0xFBD9, 'M', u'ۆ'), (0xFBDB, 'M', u'ۈ'), (0xFBDD, 'M', u'ۇٴ'), (0xFBDE, 'M', u'ۋ'), (0xFBE0, 'M', u'ۅ'), (0xFBE2, 'M', u'ۉ'), (0xFBE4, 'M', u'ې'), (0xFBE8, 'M', u'ى'), (0xFBEA, 'M', u'ئا'), (0xFBEC, 'M', u'ئە'), (0xFBEE, 'M', u'ئو'), (0xFBF0, 'M', u'ئۇ'), (0xFBF2, 'M', u'ئۆ'), (0xFBF4, 'M', u'ئۈ'), (0xFBF6, 'M', u'ئې'), (0xFBF9, 'M', u'ئى'), (0xFBFC, 'M', u'ی'), (0xFC00, 'M', u'ئج'), (0xFC01, 'M', u'ئح'), (0xFC02, 'M', u'ئم'), (0xFC03, 'M', u'ئى'), (0xFC04, 'M', u'ئي'), (0xFC05, 'M', u'بج'), (0xFC06, 'M', u'بح'), (0xFC07, 'M', u'بخ'), (0xFC08, 'M', u'بم'), (0xFC09, 'M', u'بى'), (0xFC0A, 'M', u'بي'), (0xFC0B, 'M', u'تج'), (0xFC0C, 'M', u'تح'), (0xFC0D, 'M', u'تخ'), (0xFC0E, 'M', u'تم'), (0xFC0F, 'M', u'تى'), (0xFC10, 'M', u'تي'), (0xFC11, 'M', u'ثج'), (0xFC12, 'M', u'ثم'), (0xFC13, 'M', u'ثى'), (0xFC14, 'M', u'ثي'), (0xFC15, 'M', u'جح'), (0xFC16, 'M', u'جم'), (0xFC17, 'M', u'حج'), (0xFC18, 'M', u'حم'), (0xFC19, 'M', u'خج'), (0xFC1A, 'M', u'خح'), (0xFC1B, 'M', u'خم'), (0xFC1C, 'M', u'سج'), (0xFC1D, 'M', u'سح'), (0xFC1E, 'M', u'سخ'), (0xFC1F, 'M', u'سم'), (0xFC20, 'M', u'صح'), (0xFC21, 'M', u'صم'), (0xFC22, 'M', u'ضج'), (0xFC23, 'M', u'ضح'), (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), (0xFC2A, 'M', u'عم'), (0xFC2B, 'M', u'غج'), (0xFC2C, 'M', u'غم'), (0xFC2D, 'M', u'فج'), (0xFC2E, 'M', u'فح'), (0xFC2F, 'M', u'فخ'), (0xFC30, 'M', u'فم'), (0xFC31, 'M', u'فى'), (0xFC32, 'M', u'في'), (0xFC33, 'M', u'قح'), (0xFC34, 'M', u'قم'), (0xFC35, 'M', u'قى'), (0xFC36, 'M', u'قي'), (0xFC37, 'M', u'كا'), (0xFC38, 'M', u'كج'), (0xFC39, 'M', u'كح'), (0xFC3A, 'M', u'كخ'), (0xFC3B, 'M', u'كل'), (0xFC3C, 'M', u'كم'), (0xFC3D, 'M', u'كى'), (0xFC3E, 'M', u'كي'), ] def _seg_44(): return [ (0xFC3F, 'M', u'لج'), (0xFC40, 'M', u'لح'), (0xFC41, 'M', u'لخ'), (0xFC42, 'M', u'لم'), (0xFC43, 'M', u'لى'), (0xFC44, 'M', u'لي'), (0xFC45, 'M', u'مج'), (0xFC46, 'M', u'مح'), (0xFC47, 'M', u'مخ'), (0xFC48, 'M', u'مم'), (0xFC49, 'M', u'مى'), (0xFC4A, 'M', u'مي'), (0xFC4B, 'M', u'نج'), (0xFC4C, 'M', u'نح'), (0xFC4D, 'M', u'نخ'), (0xFC4E, 'M', u'نم'), (0xFC4F, 'M', u'نى'), (0xFC50, 'M', u'ني'), (0xFC51, 'M', u'هج'), (0xFC52, 'M', u'هم'), (0xFC53, 'M', u'هى'), (0xFC54, 'M', u'هي'), (0xFC55, 'M', u'يج'), (0xFC56, 'M', u'يح'), (0xFC57, 'M', u'يخ'), (0xFC58, 'M', u'يم'), (0xFC59, 'M', u'يى'), (0xFC5A, 'M', u'يي'), (0xFC5B, 'M', u'ذٰ'), (0xFC5C, 'M', u'رٰ'), (0xFC5D, 'M', u'ىٰ'), (0xFC5E, '3', u' ٌّ'), (0xFC5F, '3', u' ٍّ'), (0xFC60, '3', u' َّ'), (0xFC61, '3', u' ُّ'), (0xFC62, '3', u' ِّ'), (0xFC63, '3', u' ّٰ'), (0xFC64, 'M', u'ئر'), (0xFC65, 'M', u'ئز'), (0xFC66, 'M', u'ئم'), (0xFC67, 'M', u'ئن'), (0xFC68, 'M', u'ئى'), (0xFC69, 'M', u'ئي'), (0xFC6A, 'M', u'بر'), (0xFC6B, 'M', u'بز'), (0xFC6C, 'M', u'بم'), (0xFC6D, 'M', u'بن'), (0xFC6E, 'M', u'بى'), (0xFC6F, 'M', u'بي'), (0xFC70, 'M', u'تر'), (0xFC71, 'M', u'تز'), (0xFC72, 'M', u'تم'), (0xFC73, 'M', u'تن'), (0xFC74, 'M', u'تى'), (0xFC75, 'M', u'تي'), (0xFC76, 'M', u'ثر'), (0xFC77, 'M', u'ثز'), (0xFC78, 'M', u'ثم'), (0xFC79, 'M', u'ثن'), (0xFC7A, 'M', u'ثى'), (0xFC7B, 'M', u'ثي'), (0xFC7C, 'M', u'فى'), (0xFC7D, 'M', u'في'), (0xFC7E, 'M', u'قى'), (0xFC7F, 'M', u'قي'), (0xFC80, 'M', u'كا'), (0xFC81, 'M', u'كل'), (0xFC82, 'M', u'كم'), (0xFC83, 'M', u'كى'), (0xFC84, 'M', u'كي'), (0xFC85, 'M', u'لم'), (0xFC86, 'M', u'لى'), (0xFC87, 'M', u'لي'), (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), (0xFC8E, 'M', u'نى'), (0xFC8F, 'M', u'ني'), (0xFC90, 'M', u'ىٰ'), (0xFC91, 'M', u'ير'), (0xFC92, 'M', u'يز'), (0xFC93, 'M', u'يم'), (0xFC94, 'M', u'ين'), (0xFC95, 'M', u'يى'), (0xFC96, 'M', u'يي'), (0xFC97, 'M', u'ئج'), (0xFC98, 'M', u'ئح'), (0xFC99, 'M', u'ئخ'), (0xFC9A, 'M', u'ئم'), (0xFC9B, 'M', u'ئه'), (0xFC9C, 'M', u'بج'), (0xFC9D, 'M', u'بح'), (0xFC9E, 'M', u'بخ'), (0xFC9F, 'M', u'بم'), (0xFCA0, 'M', u'به'), (0xFCA1, 'M', u'تج'), (0xFCA2, 'M', u'تح'), ] def _seg_45(): return [ (0xFCA3, 'M', u'تخ'), (0xFCA4, 'M', u'تم'), (0xFCA5, 'M', u'ته'), (0xFCA6, 'M', u'ثم'), (0xFCA7, 'M', u'جح'), (0xFCA8, 'M', u'جم'), (0xFCA9, 'M', u'حج'), (0xFCAA, 'M', u'حم'), (0xFCAB, 'M', u'خج'), (0xFCAC, 'M', u'خم'), (0xFCAD, 'M', u'سج'), (0xFCAE, 'M', u'سح'), (0xFCAF, 'M', u'سخ'), (0xFCB0, 'M', u'سم'), (0xFCB1, 'M', u'صح'), (0xFCB2, 'M', u'صخ'), (0xFCB3, 'M', u'صم'), (0xFCB4, 'M', u'ضج'), (0xFCB5, 'M', u'ضح'), (0xFCB6, 'M', u'ضخ'), (0xFCB7, 'M', u'ضم'), (0xFCB8, 'M', u'طح'), (0xFCB9, 'M', u'ظم'), (0xFCBA, 'M', u'عج'), (0xFCBB, 'M', u'عم'), (0xFCBC, 'M', u'غج'), (0xFCBD, 'M', u'غم'), (0xFCBE, 'M', u'فج'), (0xFCBF, 'M', u'فح'), (0xFCC0, 'M', u'فخ'), (0xFCC1, 'M', u'فم'), (0xFCC2, 'M', u'قح'), (0xFCC3, 'M', u'قم'), (0xFCC4, 'M', u'كج'), (0xFCC5, 'M', u'كح'), (0xFCC6, 'M', u'كخ'), (0xFCC7, 'M', u'كل'), (0xFCC8, 'M', u'كم'), (0xFCC9, 'M', u'لج'), (0xFCCA, 'M', u'لح'), (0xFCCB, 'M', u'لخ'), (0xFCCC, 'M', u'لم'), (0xFCCD, 'M', u'له'), (0xFCCE, 'M', u'مج'), (0xFCCF, 'M', u'مح'), (0xFCD0, 'M', u'مخ'), (0xFCD1, 'M', u'مم'), (0xFCD2, 'M', u'نج'), (0xFCD3, 'M', u'نح'), (0xFCD4, 'M', u'نخ'), (0xFCD5, 'M', u'نم'), (0xFCD6, 'M', u'نه'), (0xFCD7, 'M', u'هج'), (0xFCD8, 'M', u'هم'), (0xFCD9, 'M', u'هٰ'), (0xFCDA, 'M', u'يج'), (0xFCDB, 'M', u'يح'), (0xFCDC, 'M', u'يخ'), (0xFCDD, 'M', u'يم'), (0xFCDE, 'M', u'يه'), (0xFCDF, 'M', u'ئم'), (0xFCE0, 'M', u'ئه'), (0xFCE1, 'M', u'بم'), (0xFCE2, 'M', u'به'), (0xFCE3, 'M', u'تم'), (0xFCE4, 'M', u'ته'), (0xFCE5, 'M', u'ثم'), (0xFCE6, 'M', u'ثه'), (0xFCE7, 'M', u'سم'), (0xFCE8, 'M', u'سه'), (0xFCE9, 'M', u'شم'), (0xFCEA, 'M', u'شه'), (0xFCEB, 'M', u'كل'), (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), (0xFCF2, 'M', u'ـَّ'), (0xFCF3, 'M', u'ـُّ'), (0xFCF4, 'M', u'ـِّ'), (0xFCF5, 'M', u'طى'), (0xFCF6, 'M', u'طي'), (0xFCF7, 'M', u'عى'), (0xFCF8, 'M', u'عي'), (0xFCF9, 'M', u'غى'), (0xFCFA, 'M', u'غي'), (0xFCFB, 'M', u'سى'), (0xFCFC, 'M', u'سي'), (0xFCFD, 'M', u'شى'), (0xFCFE, 'M', u'شي'), (0xFCFF, 'M', u'حى'), (0xFD00, 'M', u'حي'), (0xFD01, 'M', u'جى'), (0xFD02, 'M', u'جي'), (0xFD03, 'M', u'خى'), (0xFD04, 'M', u'خي'), (0xFD05, 'M', u'صى'), (0xFD06, 'M', u'صي'), ] def _seg_46(): return [ (0xFD07, 'M', u'ضى'), (0xFD08, 'M', u'ضي'), (0xFD09, 'M', u'شج'), (0xFD0A, 'M', u'شح'), (0xFD0B, 'M', u'شخ'), (0xFD0C, 'M', u'شم'), (0xFD0D, 'M', u'شر'), (0xFD0E, 'M', u'سر'), (0xFD0F, 'M', u'صر'), (0xFD10, 'M', u'ضر'), (0xFD11, 'M', u'طى'), (0xFD12, 'M', u'طي'), (0xFD13, 'M', u'عى'), (0xFD14, 'M', u'عي'), (0xFD15, 'M', u'غى'), (0xFD16, 'M', u'غي'), (0xFD17, 'M', u'سى'), (0xFD18, 'M', u'سي'), (0xFD19, 'M', u'شى'), (0xFD1A, 'M', u'شي'), (0xFD1B, 'M', u'حى'), (0xFD1C, 'M', u'حي'), (0xFD1D, 'M', u'جى'), (0xFD1E, 'M', u'جي'), (0xFD1F, 'M', u'خى'), (0xFD20, 'M', u'خي'), (0xFD21, 'M', u'صى'), (0xFD22, 'M', u'صي'), (0xFD23, 'M', u'ضى'), (0xFD24, 'M', u'ضي'), (0xFD25, 'M', u'شج'), (0xFD26, 'M', u'شح'), (0xFD27, 'M', u'شخ'), (0xFD28, 'M', u'شم'), (0xFD29, 'M', u'شر'), (0xFD2A, 'M', u'سر'), (0xFD2B, 'M', u'صر'), (0xFD2C, 'M', u'ضر'), (0xFD2D, 'M', u'شج'), (0xFD2E, 'M', u'شح'), (0xFD2F, 'M', u'شخ'), (0xFD30, 'M', u'شم'), (0xFD31, 'M', u'سه'), (0xFD32, 'M', u'شه'), (0xFD33, 'M', u'طم'), (0xFD34, 'M', u'سج'), (0xFD35, 'M', u'سح'), (0xFD36, 'M', u'سخ'), (0xFD37, 'M', u'شج'), (0xFD38, 'M', u'شح'), (0xFD39, 'M', u'شخ'), (0xFD3A, 'M', u'طم'), (0xFD3B, 'M', u'ظم'), (0xFD3C, 'M', u'اً'), (0xFD3E, 'V'), (0xFD40, 'X'), (0xFD50, 'M', u'تجم'), (0xFD51, 'M', u'تحج'), (0xFD53, 'M', u'تحم'), (0xFD54, 'M', u'تخم'), (0xFD55, 'M', u'تمج'), (0xFD56, 'M', u'تمح'), (0xFD57, 'M', u'تمخ'), (0xFD58, 'M', u'جمح'), (0xFD5A, 'M', u'حمي'), (0xFD5B, 'M', u'حمى'), (0xFD5C, 'M', u'سحج'), (0xFD5D, 'M', u'سجح'), (0xFD5E, 'M', u'سجى'), (0xFD5F, 'M', u'سمح'), (0xFD61, 'M', u'سمج'), (0xFD62, 'M', u'سمم'), (0xFD64, 'M', u'صحح'), (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), (0xFD6F, 'M', u'ضخم'), (0xFD71, 'M', u'طمح'), (0xFD73, 'M', u'طمم'), (0xFD74, 'M', u'طمي'), (0xFD75, 'M', u'عجم'), (0xFD76, 'M', u'عمم'), (0xFD78, 'M', u'عمى'), (0xFD79, 'M', u'غمم'), (0xFD7A, 'M', u'غمي'), (0xFD7B, 'M', u'غمى'), (0xFD7C, 'M', u'فخم'), (0xFD7E, 'M', u'قمح'), (0xFD7F, 'M', u'قمم'), (0xFD80, 'M', u'لحم'), (0xFD81, 'M', u'لحي'), (0xFD82, 'M', u'لحى'), (0xFD83, 'M', u'لجج'), (0xFD85, 'M', u'لخم'), (0xFD87, 'M', u'لمح'), (0xFD89, 'M', u'محج'), (0xFD8A, 'M', u'محم'), ] def _seg_47(): return [ (0xFD8B, 'M', u'محي'), (0xFD8C, 'M', u'مجح'), (0xFD8D, 'M', u'مجم'), (0xFD8E, 'M', u'مخج'), (0xFD8F, 'M', u'مخم'), (0xFD90, 'X'), (0xFD92, 'M', u'مجخ'), (0xFD93, 'M', u'همج'), (0xFD94, 'M', u'همم'), (0xFD95, 'M', u'نحم'), (0xFD96, 'M', u'نحى'), (0xFD97, 'M', u'نجم'), (0xFD99, 'M', u'نجى'), (0xFD9A, 'M', u'نمي'), (0xFD9B, 'M', u'نمى'), (0xFD9C, 'M', u'يمم'), (0xFD9E, 'M', u'بخي'), (0xFD9F, 'M', u'تجي'), (0xFDA0, 'M', u'تجى'), (0xFDA1, 'M', u'تخي'), (0xFDA2, 'M', u'تخى'), (0xFDA3, 'M', u'تمي'), (0xFDA4, 'M', u'تمى'), (0xFDA5, 'M', u'جمي'), (0xFDA6, 'M', u'جحى'), (0xFDA7, 'M', u'جمى'), (0xFDA8, 'M', u'سخى'), (0xFDA9, 'M', u'صحي'), (0xFDAA, 'M', u'شحي'), (0xFDAB, 'M', u'ضحي'), (0xFDAC, 'M', u'لجي'), (0xFDAD, 'M', u'لمي'), (0xFDAE, 'M', u'يحي'), (0xFDAF, 'M', u'يجي'), (0xFDB0, 'M', u'يمي'), (0xFDB1, 'M', u'ممي'), (0xFDB2, 'M', u'قمي'), (0xFDB3, 'M', u'نحي'), (0xFDB4, 'M', u'قمح'), (0xFDB5, 'M', u'لحم'), (0xFDB6, 'M', u'عمي'), (0xFDB7, 'M', u'كمي'), (0xFDB8, 'M', u'نجح'), (0xFDB9, 'M', u'مخي'), (0xFDBA, 'M', u'لجم'), (0xFDBB, 'M', u'كمم'), (0xFDBC, 'M', u'لجم'), (0xFDBD, 'M', u'نجح'), (0xFDBE, 'M', u'جحي'), (0xFDBF, 'M', u'حجي'), (0xFDC0, 'M', u'مجي'), (0xFDC1, 'M', u'فمي'), (0xFDC2, 'M', u'بحي'), (0xFDC3, 'M', u'كمم'), (0xFDC4, 'M', u'عجم'), (0xFDC5, 'M', u'صمم'), (0xFDC6, 'M', u'سخي'), (0xFDC7, 'M', u'نجي'), (0xFDC8, 'X'), (0xFDF0, 'M', u'صلے'), (0xFDF1, 'M', u'قلے'), (0xFDF2, 'M', u'الله'), (0xFDF3, 'M', u'اكبر'), (0xFDF4, 'M', u'محمد'), (0xFDF5, 'M', u'صلعم'), (0xFDF6, 'M', u'رسول'), (0xFDF7, 'M', u'عليه'), (0xFDF8, 'M', u'وسلم'), (0xFDF9, 'M', u'صلى'), (0xFDFA, '3', u'صلى الله عليه وسلم'), (0xFDFB, '3', u'جل جلاله'), (0xFDFC, 'M', u'ریال'), (0xFDFD, 'V'), (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), (0xFE14, '3', u';'), (0xFE15, '3', u'!'), (0xFE16, '3', u'?'), (0xFE17, 'M', u'〖'), (0xFE18, 'M', u'〗'), (0xFE19, 'X'), (0xFE20, 'V'), (0xFE27, 'X'), (0xFE31, 'M', u'—'), (0xFE32, 'M', u'–'), (0xFE33, '3', u'_'), (0xFE35, '3', u'('), (0xFE36, '3', u')'), (0xFE37, '3', u'{'), (0xFE38, '3', u'}'), (0xFE39, 'M', u'〔'), (0xFE3A, 'M', u'〕'), (0xFE3B, 'M', u'【'), (0xFE3C, 'M', u'】'), (0xFE3D, 'M', u'《'), (0xFE3E, 'M', u'》'), ] def _seg_48(): return [ (0xFE3F, 'M', u'〈'), (0xFE40, 'M', u'〉'), (0xFE41, 'M', u'「'), (0xFE42, 'M', u'」'), (0xFE43, 'M', u'『'), (0xFE44, 'M', u'』'), (0xFE45, 'V'), (0xFE47, '3', u'['), (0xFE48, '3', u']'), (0xFE49, '3', u' ̅'), (0xFE4D, '3', u'_'), (0xFE50, '3', u','), (0xFE51, 'M', u'、'), (0xFE52, 'X'), (0xFE54, '3', u';'), (0xFE55, '3', u':'), (0xFE56, '3', u'?'), (0xFE57, '3', u'!'), (0xFE58, 'M', u'—'), (0xFE59, '3', u'('), (0xFE5A, '3', u')'), (0xFE5B, '3', u'{'), (0xFE5C, '3', u'}'), (0xFE5D, 'M', u'〔'), (0xFE5E, 'M', u'〕'), (0xFE5F, '3', u'#'), (0xFE60, '3', u'&'), (0xFE61, '3', u'*'), (0xFE62, '3', u'+'), (0xFE63, 'M', u'-'), (0xFE64, '3', u'<'), (0xFE65, '3', u'>'), (0xFE66, '3', u'='), (0xFE67, 'X'), (0xFE68, '3', u'\\'), (0xFE69, '3', u'$'), (0xFE6A, '3', u'%'), (0xFE6B, '3', u'@'), (0xFE6C, 'X'), (0xFE70, '3', u' ً'), (0xFE71, 'M', u'ـً'), (0xFE72, '3', u' ٌ'), (0xFE73, 'V'), (0xFE74, '3', u' ٍ'), (0xFE75, 'X'), (0xFE76, '3', u' َ'), (0xFE77, 'M', u'ـَ'), (0xFE78, '3', u' ُ'), (0xFE79, 'M', u'ـُ'), (0xFE7A, '3', u' ِ'), (0xFE7B, 'M', u'ـِ'), (0xFE7C, '3', u' ّ'), (0xFE7D, 'M', u'ـّ'), (0xFE7E, '3', u' ْ'), (0xFE7F, 'M', u'ـْ'), (0xFE80, 'M', u'ء'), (0xFE81, 'M', u'آ'), (0xFE83, 'M', u'أ'), (0xFE85, 'M', u'ؤ'), (0xFE87, 'M', u'إ'), (0xFE89, 'M', u'ئ'), (0xFE8D, 'M', u'ا'), (0xFE8F, 'M', u'ب'), (0xFE93, 'M', u'ة'), (0xFE95, 'M', u'ت'), (0xFE99, 'M', u'ث'), (0xFE9D, 'M', u'ج'), (0xFEA1, 'M', u'ح'), (0xFEA5, 'M', u'خ'), (0xFEA9, 'M', u'د'), (0xFEAB, 'M', u'ذ'), (0xFEAD, 'M', u'ر'), (0xFEAF, 'M', u'ز'), (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), (0xFEC9, 'M', u'ع'), (0xFECD, 'M', u'غ'), (0xFED1, 'M', u'ف'), (0xFED5, 'M', u'ق'), (0xFED9, 'M', u'ك'), (0xFEDD, 'M', u'ل'), (0xFEE1, 'M', u'م'), (0xFEE5, 'M', u'ن'), (0xFEE9, 'M', u'ه'), (0xFEED, 'M', u'و'), (0xFEEF, 'M', u'ى'), (0xFEF1, 'M', u'ي'), (0xFEF5, 'M', u'لآ'), (0xFEF7, 'M', u'لأ'), (0xFEF9, 'M', u'لإ'), (0xFEFB, 'M', u'لا'), (0xFEFD, 'X'), (0xFEFF, 'I'), (0xFF00, 'X'), (0xFF01, '3', u'!'), (0xFF02, '3', u'"'), ] def _seg_49(): return [ (0xFF03, '3', u'#'), (0xFF04, '3', u'$'), (0xFF05, '3', u'%'), (0xFF06, '3', u'&'), (0xFF07, '3', u'\''), (0xFF08, '3', u'('), (0xFF09, '3', u')'), (0xFF0A, '3', u'*'), (0xFF0B, '3', u'+'), (0xFF0C, '3', u','), (0xFF0D, 'M', u'-'), (0xFF0E, 'M', u'.'), (0xFF0F, '3', u'/'), (0xFF10, 'M', u'0'), (0xFF11, 'M', u'1'), (0xFF12, 'M', u'2'), (0xFF13, 'M', u'3'), (0xFF14, 'M', u'4'), (0xFF15, 'M', u'5'), (0xFF16, 'M', u'6'), (0xFF17, 'M', u'7'), (0xFF18, 'M', u'8'), (0xFF19, 'M', u'9'), (0xFF1A, '3', u':'), (0xFF1B, '3', u';'), (0xFF1C, '3', u'<'), (0xFF1D, '3', u'='), (0xFF1E, '3', u'>'), (0xFF1F, '3', u'?'), (0xFF20, '3', u'@'), (0xFF21, 'M', u'a'), (0xFF22, 'M', u'b'), (0xFF23, 'M', u'c'), (0xFF24, 'M', u'd'), (0xFF25, 'M', u'e'), (0xFF26, 'M', u'f'), (0xFF27, 'M', u'g'), (0xFF28, 'M', u'h'), (0xFF29, 'M', u'i'), (0xFF2A, 'M', u'j'), (0xFF2B, 'M', u'k'), (0xFF2C, 'M', u'l'), (0xFF2D, 'M', u'm'), (0xFF2E, 'M', u'n'), (0xFF2F, 'M', u'o'), (0xFF30, 'M', u'p'), (0xFF31, 'M', u'q'), (0xFF32, 'M', u'r'), (0xFF33, 'M', u's'), (0xFF34, 'M', u't'), (0xFF35, 'M', u'u'), (0xFF36, 'M', u'v'), (0xFF37, 'M', u'w'), (0xFF38, 'M', u'x'), (0xFF39, 'M', u'y'), (0xFF3A, 'M', u'z'), (0xFF3B, '3', u'['), (0xFF3C, '3', u'\\'), (0xFF3D, '3', u']'), (0xFF3E, '3', u'^'), (0xFF3F, '3', u'_'), (0xFF40, '3', u'`'), (0xFF41, 'M', u'a'), (0xFF42, 'M', u'b'), (0xFF43, 'M', u'c'), (0xFF44, 'M', u'd'), (0xFF45, 'M', u'e'), (0xFF46, 'M', u'f'), (0xFF47, 'M', u'g'), (0xFF48, 'M', u'h'), (0xFF49, 'M', u'i'), (0xFF4A, 'M', u'j'), (0xFF4B, 'M', u'k'), (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), (0xFF52, 'M', u'r'), (0xFF53, 'M', u's'), (0xFF54, 'M', u't'), (0xFF55, 'M', u'u'), (0xFF56, 'M', u'v'), (0xFF57, 'M', u'w'), (0xFF58, 'M', u'x'), (0xFF59, 'M', u'y'), (0xFF5A, 'M', u'z'), (0xFF5B, '3', u'{'), (0xFF5C, '3', u'|'), (0xFF5D, '3', u'}'), (0xFF5E, '3', u'~'), (0xFF5F, 'M', u'⦅'), (0xFF60, 'M', u'⦆'), (0xFF61, 'M', u'.'), (0xFF62, 'M', u'「'), (0xFF63, 'M', u'」'), (0xFF64, 'M', u'、'), (0xFF65, 'M', u'・'), (0xFF66, 'M', u'ヲ'), ] def _seg_50(): return [ (0xFF67, 'M', u'ァ'), (0xFF68, 'M', u'ィ'), (0xFF69, 'M', u'ゥ'), (0xFF6A, 'M', u'ェ'), (0xFF6B, 'M', u'ォ'), (0xFF6C, 'M', u'ャ'), (0xFF6D, 'M', u'ュ'), (0xFF6E, 'M', u'ョ'), (0xFF6F, 'M', u'ッ'), (0xFF70, 'M', u'ー'), (0xFF71, 'M', u'ア'), (0xFF72, 'M', u'イ'), (0xFF73, 'M', u'ウ'), (0xFF74, 'M', u'エ'), (0xFF75, 'M', u'オ'), (0xFF76, 'M', u'カ'), (0xFF77, 'M', u'キ'), (0xFF78, 'M', u'ク'), (0xFF79, 'M', u'ケ'), (0xFF7A, 'M', u'コ'), (0xFF7B, 'M', u'サ'), (0xFF7C, 'M', u'シ'), (0xFF7D, 'M', u'ス'), (0xFF7E, 'M', u'セ'), (0xFF7F, 'M', u'ソ'), (0xFF80, 'M', u'タ'), (0xFF81, 'M', u'チ'), (0xFF82, 'M', u'ツ'), (0xFF83, 'M', u'テ'), (0xFF84, 'M', u'ト'), (0xFF85, 'M', u'ナ'), (0xFF86, 'M', u'ニ'), (0xFF87, 'M', u'ヌ'), (0xFF88, 'M', u'ネ'), (0xFF89, 'M', u'ノ'), (0xFF8A, 'M', u'ハ'), (0xFF8B, 'M', u'ヒ'), (0xFF8C, 'M', u'フ'), (0xFF8D, 'M', u'ヘ'), (0xFF8E, 'M', u'ホ'), (0xFF8F, 'M', u'マ'), (0xFF90, 'M', u'ミ'), (0xFF91, 'M', u'ム'), (0xFF92, 'M', u'メ'), (0xFF93, 'M', u'モ'), (0xFF94, 'M', u'ヤ'), (0xFF95, 'M', u'ユ'), (0xFF96, 'M', u'ヨ'), (0xFF97, 'M', u'ラ'), (0xFF98, 'M', u'リ'), (0xFF99, 'M', u'ル'), (0xFF9A, 'M', u'レ'), (0xFF9B, 'M', u'ロ'), (0xFF9C, 'M', u'ワ'), (0xFF9D, 'M', u'ン'), (0xFF9E, 'M', u'゙'), (0xFF9F, 'M', u'゚'), (0xFFA0, 'X'), (0xFFA1, 'M', u'ᄀ'), (0xFFA2, 'M', u'ᄁ'), (0xFFA3, 'M', u'ᆪ'), (0xFFA4, 'M', u'ᄂ'), (0xFFA5, 'M', u'ᆬ'), (0xFFA6, 'M', u'ᆭ'), (0xFFA7, 'M', u'ᄃ'), (0xFFA8, 'M', u'ᄄ'), (0xFFA9, 'M', u'ᄅ'), (0xFFAA, 'M', u'ᆰ'), (0xFFAB, 'M', u'ᆱ'), (0xFFAC, 'M', u'ᆲ'), (0xFFAD, 'M', u'ᆳ'), (0xFFAE, 'M', u'ᆴ'), (0xFFAF, 'M', u'ᆵ'), (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), (0xFFB6, 'M', u'ᄊ'), (0xFFB7, 'M', u'ᄋ'), (0xFFB8, 'M', u'ᄌ'), (0xFFB9, 'M', u'ᄍ'), (0xFFBA, 'M', u'ᄎ'), (0xFFBB, 'M', u'ᄏ'), (0xFFBC, 'M', u'ᄐ'), (0xFFBD, 'M', u'ᄑ'), (0xFFBE, 'M', u'ᄒ'), (0xFFBF, 'X'), (0xFFC2, 'M', u'ᅡ'), (0xFFC3, 'M', u'ᅢ'), (0xFFC4, 'M', u'ᅣ'), (0xFFC5, 'M', u'ᅤ'), (0xFFC6, 'M', u'ᅥ'), (0xFFC7, 'M', u'ᅦ'), (0xFFC8, 'X'), (0xFFCA, 'M', u'ᅧ'), (0xFFCB, 'M', u'ᅨ'), (0xFFCC, 'M', u'ᅩ'), (0xFFCD, 'M', u'ᅪ'), ] def _seg_51(): return [ (0xFFCE, 'M', u'ᅫ'), (0xFFCF, 'M', u'ᅬ'), (0xFFD0, 'X'), (0xFFD2, 'M', u'ᅭ'), (0xFFD3, 'M', u'ᅮ'), (0xFFD4, 'M', u'ᅯ'), (0xFFD5, 'M', u'ᅰ'), (0xFFD6, 'M', u'ᅱ'), (0xFFD7, 'M', u'ᅲ'), (0xFFD8, 'X'), (0xFFDA, 'M', u'ᅳ'), (0xFFDB, 'M', u'ᅴ'), (0xFFDC, 'M', u'ᅵ'), (0xFFDD, 'X'), (0xFFE0, 'M', u'¢'), (0xFFE1, 'M', u'£'), (0xFFE2, 'M', u'¬'), (0xFFE3, '3', u' ̄'), (0xFFE4, 'M', u'¦'), (0xFFE5, 'M', u'¥'), (0xFFE6, 'M', u'₩'), (0xFFE7, 'X'), (0xFFE8, 'M', u'│'), (0xFFE9, 'M', u'←'), (0xFFEA, 'M', u'↑'), (0xFFEB, 'M', u'→'), (0xFFEC, 'M', u'↓'), (0xFFED, 'M', u'■'), (0xFFEE, 'M', u'○'), (0xFFEF, 'X'), (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), (0x1003C, 'V'), (0x1003E, 'X'), (0x1003F, 'V'), (0x1004E, 'X'), (0x10050, 'V'), (0x1005E, 'X'), (0x10080, 'V'), (0x100FB, 'X'), (0x10100, 'V'), (0x10103, 'X'), (0x10107, 'V'), (0x10134, 'X'), (0x10137, 'V'), (0x1018B, 'X'), (0x10190, 'V'), (0x1019C, 'X'), (0x101D0, 'V'), (0x101FE, 'X'), (0x10280, 'V'), (0x1029D, 'X'), (0x102A0, 'V'), (0x102D1, 'X'), (0x10300, 'V'), (0x1031F, 'X'), (0x10320, 'V'), (0x10324, 'X'), (0x10330, 'V'), (0x1034B, 'X'), (0x10380, 'V'), (0x1039E, 'X'), (0x1039F, 'V'), (0x103C4, 'X'), (0x103C8, 'V'), (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), (0x10405, 'M', u'𐐭'), (0x10406, 'M', u'𐐮'), (0x10407, 'M', u'𐐯'), (0x10408, 'M', u'𐐰'), (0x10409, 'M', u'𐐱'), (0x1040A, 'M', u'𐐲'), (0x1040B, 'M', u'𐐳'), (0x1040C, 'M', u'𐐴'), (0x1040D, 'M', u'𐐵'), (0x1040E, 'M', u'𐐶'), (0x1040F, 'M', u'𐐷'), (0x10410, 'M', u'𐐸'), (0x10411, 'M', u'𐐹'), (0x10412, 'M', u'𐐺'), (0x10413, 'M', u'𐐻'), (0x10414, 'M', u'𐐼'), (0x10415, 'M', u'𐐽'), (0x10416, 'M', u'𐐾'), (0x10417, 'M', u'𐐿'), (0x10418, 'M', u'𐑀'), (0x10419, 'M', u'𐑁'), (0x1041A, 'M', u'𐑂'), (0x1041B, 'M', u'𐑃'), (0x1041C, 'M', u'𐑄'), (0x1041D, 'M', u'𐑅'), ] def _seg_52(): return [ (0x1041E, 'M', u'𐑆'), (0x1041F, 'M', u'𐑇'), (0x10420, 'M', u'𐑈'), (0x10421, 'M', u'𐑉'), (0x10422, 'M', u'𐑊'), (0x10423, 'M', u'𐑋'), (0x10424, 'M', u'𐑌'), (0x10425, 'M', u'𐑍'), (0x10426, 'M', u'𐑎'), (0x10427, 'M', u'𐑏'), (0x10428, 'V'), (0x1049E, 'X'), (0x104A0, 'V'), (0x104AA, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), (0x10809, 'X'), (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), (0x1083F, 'V'), (0x10856, 'X'), (0x10857, 'V'), (0x10860, 'X'), (0x10900, 'V'), (0x1091C, 'X'), (0x1091F, 'V'), (0x1093A, 'X'), (0x1093F, 'V'), (0x10940, 'X'), (0x10980, 'V'), (0x109B8, 'X'), (0x109BE, 'V'), (0x109C0, 'X'), (0x10A00, 'V'), (0x10A04, 'X'), (0x10A05, 'V'), (0x10A07, 'X'), (0x10A0C, 'V'), (0x10A14, 'X'), (0x10A15, 'V'), (0x10A18, 'X'), (0x10A19, 'V'), (0x10A34, 'X'), (0x10A38, 'V'), (0x10A3B, 'X'), (0x10A3F, 'V'), (0x10A48, 'X'), (0x10A50, 'V'), (0x10A59, 'X'), (0x10A60, 'V'), (0x10A80, 'X'), (0x10B00, 'V'), (0x10B36, 'X'), (0x10B39, 'V'), (0x10B56, 'X'), (0x10B58, 'V'), (0x10B73, 'X'), (0x10B78, 'V'), (0x10B80, 'X'), (0x10C00, 'V'), (0x10C49, 'X'), (0x10E60, 'V'), (0x10E7F, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), (0x11070, 'X'), (0x11080, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), (0x110C2, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), (0x110FA, 'X'), (0x11100, 'V'), (0x11135, 'X'), (0x11136, 'V'), (0x11144, 'X'), (0x11180, 'V'), (0x111C9, 'X'), (0x111D0, 'V'), (0x111DA, 'X'), (0x11680, 'V'), (0x116B8, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x12000, 'V'), (0x1236F, 'X'), (0x12400, 'V'), (0x12463, 'X'), (0x12470, 'V'), (0x12474, 'X'), (0x13000, 'V'), (0x1342F, 'X'), ] def _seg_53(): return [ (0x16800, 'V'), (0x16A39, 'X'), (0x16F00, 'V'), (0x16F45, 'X'), (0x16F50, 'V'), (0x16F7F, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x1B000, 'V'), (0x1B002, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), (0x1D127, 'X'), (0x1D129, 'V'), (0x1D15E, 'M', u'𝅗𝅥'), (0x1D15F, 'M', u'𝅘𝅥'), (0x1D160, 'M', u'𝅘𝅥𝅮'), (0x1D161, 'M', u'𝅘𝅥𝅯'), (0x1D162, 'M', u'𝅘𝅥𝅰'), (0x1D163, 'M', u'𝅘𝅥𝅱'), (0x1D164, 'M', u'𝅘𝅥𝅲'), (0x1D165, 'V'), (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', u'𝆹𝅥'), (0x1D1BC, 'M', u'𝆺𝅥'), (0x1D1BD, 'M', u'𝆹𝅥𝅮'), (0x1D1BE, 'M', u'𝆺𝅥𝅮'), (0x1D1BF, 'M', u'𝆹𝅥𝅯'), (0x1D1C0, 'M', u'𝆺𝅥𝅯'), (0x1D1C1, 'V'), (0x1D1DE, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), (0x1D300, 'V'), (0x1D357, 'X'), (0x1D360, 'V'), (0x1D372, 'X'), (0x1D400, 'M', u'a'), (0x1D401, 'M', u'b'), (0x1D402, 'M', u'c'), (0x1D403, 'M', u'd'), (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), (0x1D406, 'M', u'g'), (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), (0x1D409, 'M', u'j'), (0x1D40A, 'M', u'k'), (0x1D40B, 'M', u'l'), (0x1D40C, 'M', u'm'), (0x1D40D, 'M', u'n'), (0x1D40E, 'M', u'o'), (0x1D40F, 'M', u'p'), (0x1D410, 'M', u'q'), (0x1D411, 'M', u'r'), (0x1D412, 'M', u's'), (0x1D413, 'M', u't'), (0x1D414, 'M', u'u'), (0x1D415, 'M', u'v'), (0x1D416, 'M', u'w'), (0x1D417, 'M', u'x'), (0x1D418, 'M', u'y'), (0x1D419, 'M', u'z'), (0x1D41A, 'M', u'a'), (0x1D41B, 'M', u'b'), (0x1D41C, 'M', u'c'), (0x1D41D, 'M', u'd'), (0x1D41E, 'M', u'e'), (0x1D41F, 'M', u'f'), (0x1D420, 'M', u'g'), (0x1D421, 'M', u'h'), (0x1D422, 'M', u'i'), (0x1D423, 'M', u'j'), (0x1D424, 'M', u'k'), (0x1D425, 'M', u'l'), (0x1D426, 'M', u'm'), (0x1D427, 'M', u'n'), (0x1D428, 'M', u'o'), (0x1D429, 'M', u'p'), (0x1D42A, 'M', u'q'), (0x1D42B, 'M', u'r'), (0x1D42C, 'M', u's'), (0x1D42D, 'M', u't'), (0x1D42E, 'M', u'u'), (0x1D42F, 'M', u'v'), (0x1D430, 'M', u'w'), (0x1D431, 'M', u'x'), (0x1D432, 'M', u'y'), (0x1D433, 'M', u'z'), (0x1D434, 'M', u'a'), (0x1D435, 'M', u'b'), (0x1D436, 'M', u'c'), (0x1D437, 'M', u'd'), (0x1D438, 'M', u'e'), (0x1D439, 'M', u'f'), (0x1D43A, 'M', u'g'), (0x1D43B, 'M', u'h'), (0x1D43C, 'M', u'i'), ] def _seg_54(): return [ (0x1D43D, 'M', u'j'), (0x1D43E, 'M', u'k'), (0x1D43F, 'M', u'l'), (0x1D440, 'M', u'm'), (0x1D441, 'M', u'n'), (0x1D442, 'M', u'o'), (0x1D443, 'M', u'p'), (0x1D444, 'M', u'q'), (0x1D445, 'M', u'r'), (0x1D446, 'M', u's'), (0x1D447, 'M', u't'), (0x1D448, 'M', u'u'), (0x1D449, 'M', u'v'), (0x1D44A, 'M', u'w'), (0x1D44B, 'M', u'x'), (0x1D44C, 'M', u'y'), (0x1D44D, 'M', u'z'), (0x1D44E, 'M', u'a'), (0x1D44F, 'M', u'b'), (0x1D450, 'M', u'c'), (0x1D451, 'M', u'd'), (0x1D452, 'M', u'e'), (0x1D453, 'M', u'f'), (0x1D454, 'M', u'g'), (0x1D455, 'X'), (0x1D456, 'M', u'i'), (0x1D457, 'M', u'j'), (0x1D458, 'M', u'k'), (0x1D459, 'M', u'l'), (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), (0x1D460, 'M', u's'), (0x1D461, 'M', u't'), (0x1D462, 'M', u'u'), (0x1D463, 'M', u'v'), (0x1D464, 'M', u'w'), (0x1D465, 'M', u'x'), (0x1D466, 'M', u'y'), (0x1D467, 'M', u'z'), (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), (0x1D46A, 'M', u'c'), (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), (0x1D46D, 'M', u'f'), (0x1D46E, 'M', u'g'), (0x1D46F, 'M', u'h'), (0x1D470, 'M', u'i'), (0x1D471, 'M', u'j'), (0x1D472, 'M', u'k'), (0x1D473, 'M', u'l'), (0x1D474, 'M', u'm'), (0x1D475, 'M', u'n'), (0x1D476, 'M', u'o'), (0x1D477, 'M', u'p'), (0x1D478, 'M', u'q'), (0x1D479, 'M', u'r'), (0x1D47A, 'M', u's'), (0x1D47B, 'M', u't'), (0x1D47C, 'M', u'u'), (0x1D47D, 'M', u'v'), (0x1D47E, 'M', u'w'), (0x1D47F, 'M', u'x'), (0x1D480, 'M', u'y'), (0x1D481, 'M', u'z'), (0x1D482, 'M', u'a'), (0x1D483, 'M', u'b'), (0x1D484, 'M', u'c'), (0x1D485, 'M', u'd'), (0x1D486, 'M', u'e'), (0x1D487, 'M', u'f'), (0x1D488, 'M', u'g'), (0x1D489, 'M', u'h'), (0x1D48A, 'M', u'i'), (0x1D48B, 'M', u'j'), (0x1D48C, 'M', u'k'), (0x1D48D, 'M', u'l'), (0x1D48E, 'M', u'm'), (0x1D48F, 'M', u'n'), (0x1D490, 'M', u'o'), (0x1D491, 'M', u'p'), (0x1D492, 'M', u'q'), (0x1D493, 'M', u'r'), (0x1D494, 'M', u's'), (0x1D495, 'M', u't'), (0x1D496, 'M', u'u'), (0x1D497, 'M', u'v'), (0x1D498, 'M', u'w'), (0x1D499, 'M', u'x'), (0x1D49A, 'M', u'y'), (0x1D49B, 'M', u'z'), (0x1D49C, 'M', u'a'), (0x1D49D, 'X'), (0x1D49E, 'M', u'c'), (0x1D49F, 'M', u'd'), (0x1D4A0, 'X'), ] def _seg_55(): return [ (0x1D4A2, 'M', u'g'), (0x1D4A3, 'X'), (0x1D4A5, 'M', u'j'), (0x1D4A6, 'M', u'k'), (0x1D4A7, 'X'), (0x1D4A9, 'M', u'n'), (0x1D4AA, 'M', u'o'), (0x1D4AB, 'M', u'p'), (0x1D4AC, 'M', u'q'), (0x1D4AD, 'X'), (0x1D4AE, 'M', u's'), (0x1D4AF, 'M', u't'), (0x1D4B0, 'M', u'u'), (0x1D4B1, 'M', u'v'), (0x1D4B2, 'M', u'w'), (0x1D4B3, 'M', u'x'), (0x1D4B4, 'M', u'y'), (0x1D4B5, 'M', u'z'), (0x1D4B6, 'M', u'a'), (0x1D4B7, 'M', u'b'), (0x1D4B8, 'M', u'c'), (0x1D4B9, 'M', u'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', u'f'), (0x1D4BC, 'X'), (0x1D4BD, 'M', u'h'), (0x1D4BE, 'M', u'i'), (0x1D4BF, 'M', u'j'), (0x1D4C0, 'M', u'k'), (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), (0x1D4C7, 'M', u'r'), (0x1D4C8, 'M', u's'), (0x1D4C9, 'M', u't'), (0x1D4CA, 'M', u'u'), (0x1D4CB, 'M', u'v'), (0x1D4CC, 'M', u'w'), (0x1D4CD, 'M', u'x'), (0x1D4CE, 'M', u'y'), (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), (0x1D4D1, 'M', u'b'), (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), (0x1D4D4, 'M', u'e'), (0x1D4D5, 'M', u'f'), (0x1D4D6, 'M', u'g'), (0x1D4D7, 'M', u'h'), (0x1D4D8, 'M', u'i'), (0x1D4D9, 'M', u'j'), (0x1D4DA, 'M', u'k'), (0x1D4DB, 'M', u'l'), (0x1D4DC, 'M', u'm'), (0x1D4DD, 'M', u'n'), (0x1D4DE, 'M', u'o'), (0x1D4DF, 'M', u'p'), (0x1D4E0, 'M', u'q'), (0x1D4E1, 'M', u'r'), (0x1D4E2, 'M', u's'), (0x1D4E3, 'M', u't'), (0x1D4E4, 'M', u'u'), (0x1D4E5, 'M', u'v'), (0x1D4E6, 'M', u'w'), (0x1D4E7, 'M', u'x'), (0x1D4E8, 'M', u'y'), (0x1D4E9, 'M', u'z'), (0x1D4EA, 'M', u'a'), (0x1D4EB, 'M', u'b'), (0x1D4EC, 'M', u'c'), (0x1D4ED, 'M', u'd'), (0x1D4EE, 'M', u'e'), (0x1D4EF, 'M', u'f'), (0x1D4F0, 'M', u'g'), (0x1D4F1, 'M', u'h'), (0x1D4F2, 'M', u'i'), (0x1D4F3, 'M', u'j'), (0x1D4F4, 'M', u'k'), (0x1D4F5, 'M', u'l'), (0x1D4F6, 'M', u'm'), (0x1D4F7, 'M', u'n'), (0x1D4F8, 'M', u'o'), (0x1D4F9, 'M', u'p'), (0x1D4FA, 'M', u'q'), (0x1D4FB, 'M', u'r'), (0x1D4FC, 'M', u's'), (0x1D4FD, 'M', u't'), (0x1D4FE, 'M', u'u'), (0x1D4FF, 'M', u'v'), (0x1D500, 'M', u'w'), (0x1D501, 'M', u'x'), (0x1D502, 'M', u'y'), (0x1D503, 'M', u'z'), (0x1D504, 'M', u'a'), (0x1D505, 'M', u'b'), (0x1D506, 'X'), (0x1D507, 'M', u'd'), ] def _seg_56(): return [ (0x1D508, 'M', u'e'), (0x1D509, 'M', u'f'), (0x1D50A, 'M', u'g'), (0x1D50B, 'X'), (0x1D50D, 'M', u'j'), (0x1D50E, 'M', u'k'), (0x1D50F, 'M', u'l'), (0x1D510, 'M', u'm'), (0x1D511, 'M', u'n'), (0x1D512, 'M', u'o'), (0x1D513, 'M', u'p'), (0x1D514, 'M', u'q'), (0x1D515, 'X'), (0x1D516, 'M', u's'), (0x1D517, 'M', u't'), (0x1D518, 'M', u'u'), (0x1D519, 'M', u'v'), (0x1D51A, 'M', u'w'), (0x1D51B, 'M', u'x'), (0x1D51C, 'M', u'y'), (0x1D51D, 'X'), (0x1D51E, 'M', u'a'), (0x1D51F, 'M', u'b'), (0x1D520, 'M', u'c'), (0x1D521, 'M', u'd'), (0x1D522, 'M', u'e'), (0x1D523, 'M', u'f'), (0x1D524, 'M', u'g'), (0x1D525, 'M', u'h'), (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), (0x1D52C, 'M', u'o'), (0x1D52D, 'M', u'p'), (0x1D52E, 'M', u'q'), (0x1D52F, 'M', u'r'), (0x1D530, 'M', u's'), (0x1D531, 'M', u't'), (0x1D532, 'M', u'u'), (0x1D533, 'M', u'v'), (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), (0x1D536, 'M', u'y'), (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), (0x1D539, 'M', u'b'), (0x1D53A, 'X'), (0x1D53B, 'M', u'd'), (0x1D53C, 'M', u'e'), (0x1D53D, 'M', u'f'), (0x1D53E, 'M', u'g'), (0x1D53F, 'X'), (0x1D540, 'M', u'i'), (0x1D541, 'M', u'j'), (0x1D542, 'M', u'k'), (0x1D543, 'M', u'l'), (0x1D544, 'M', u'm'), (0x1D545, 'X'), (0x1D546, 'M', u'o'), (0x1D547, 'X'), (0x1D54A, 'M', u's'), (0x1D54B, 'M', u't'), (0x1D54C, 'M', u'u'), (0x1D54D, 'M', u'v'), (0x1D54E, 'M', u'w'), (0x1D54F, 'M', u'x'), (0x1D550, 'M', u'y'), (0x1D551, 'X'), (0x1D552, 'M', u'a'), (0x1D553, 'M', u'b'), (0x1D554, 'M', u'c'), (0x1D555, 'M', u'd'), (0x1D556, 'M', u'e'), (0x1D557, 'M', u'f'), (0x1D558, 'M', u'g'), (0x1D559, 'M', u'h'), (0x1D55A, 'M', u'i'), (0x1D55B, 'M', u'j'), (0x1D55C, 'M', u'k'), (0x1D55D, 'M', u'l'), (0x1D55E, 'M', u'm'), (0x1D55F, 'M', u'n'), (0x1D560, 'M', u'o'), (0x1D561, 'M', u'p'), (0x1D562, 'M', u'q'), (0x1D563, 'M', u'r'), (0x1D564, 'M', u's'), (0x1D565, 'M', u't'), (0x1D566, 'M', u'u'), (0x1D567, 'M', u'v'), (0x1D568, 'M', u'w'), (0x1D569, 'M', u'x'), (0x1D56A, 'M', u'y'), (0x1D56B, 'M', u'z'), (0x1D56C, 'M', u'a'), (0x1D56D, 'M', u'b'), (0x1D56E, 'M', u'c'), ] def _seg_57(): return [ (0x1D56F, 'M', u'd'), (0x1D570, 'M', u'e'), (0x1D571, 'M', u'f'), (0x1D572, 'M', u'g'), (0x1D573, 'M', u'h'), (0x1D574, 'M', u'i'), (0x1D575, 'M', u'j'), (0x1D576, 'M', u'k'), (0x1D577, 'M', u'l'), (0x1D578, 'M', u'm'), (0x1D579, 'M', u'n'), (0x1D57A, 'M', u'o'), (0x1D57B, 'M', u'p'), (0x1D57C, 'M', u'q'), (0x1D57D, 'M', u'r'), (0x1D57E, 'M', u's'), (0x1D57F, 'M', u't'), (0x1D580, 'M', u'u'), (0x1D581, 'M', u'v'), (0x1D582, 'M', u'w'), (0x1D583, 'M', u'x'), (0x1D584, 'M', u'y'), (0x1D585, 'M', u'z'), (0x1D586, 'M', u'a'), (0x1D587, 'M', u'b'), (0x1D588, 'M', u'c'), (0x1D589, 'M', u'd'), (0x1D58A, 'M', u'e'), (0x1D58B, 'M', u'f'), (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), (0x1D592, 'M', u'm'), (0x1D593, 'M', u'n'), (0x1D594, 'M', u'o'), (0x1D595, 'M', u'p'), (0x1D596, 'M', u'q'), (0x1D597, 'M', u'r'), (0x1D598, 'M', u's'), (0x1D599, 'M', u't'), (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), (0x1D59C, 'M', u'w'), (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), (0x1D59F, 'M', u'z'), (0x1D5A0, 'M', u'a'), (0x1D5A1, 'M', u'b'), (0x1D5A2, 'M', u'c'), (0x1D5A3, 'M', u'd'), (0x1D5A4, 'M', u'e'), (0x1D5A5, 'M', u'f'), (0x1D5A6, 'M', u'g'), (0x1D5A7, 'M', u'h'), (0x1D5A8, 'M', u'i'), (0x1D5A9, 'M', u'j'), (0x1D5AA, 'M', u'k'), (0x1D5AB, 'M', u'l'), (0x1D5AC, 'M', u'm'), (0x1D5AD, 'M', u'n'), (0x1D5AE, 'M', u'o'), (0x1D5AF, 'M', u'p'), (0x1D5B0, 'M', u'q'), (0x1D5B1, 'M', u'r'), (0x1D5B2, 'M', u's'), (0x1D5B3, 'M', u't'), (0x1D5B4, 'M', u'u'), (0x1D5B5, 'M', u'v'), (0x1D5B6, 'M', u'w'), (0x1D5B7, 'M', u'x'), (0x1D5B8, 'M', u'y'), (0x1D5B9, 'M', u'z'), (0x1D5BA, 'M', u'a'), (0x1D5BB, 'M', u'b'), (0x1D5BC, 'M', u'c'), (0x1D5BD, 'M', u'd'), (0x1D5BE, 'M', u'e'), (0x1D5BF, 'M', u'f'), (0x1D5C0, 'M', u'g'), (0x1D5C1, 'M', u'h'), (0x1D5C2, 'M', u'i'), (0x1D5C3, 'M', u'j'), (0x1D5C4, 'M', u'k'), (0x1D5C5, 'M', u'l'), (0x1D5C6, 'M', u'm'), (0x1D5C7, 'M', u'n'), (0x1D5C8, 'M', u'o'), (0x1D5C9, 'M', u'p'), (0x1D5CA, 'M', u'q'), (0x1D5CB, 'M', u'r'), (0x1D5CC, 'M', u's'), (0x1D5CD, 'M', u't'), (0x1D5CE, 'M', u'u'), (0x1D5CF, 'M', u'v'), (0x1D5D0, 'M', u'w'), (0x1D5D1, 'M', u'x'), (0x1D5D2, 'M', u'y'), ] def _seg_58(): return [ (0x1D5D3, 'M', u'z'), (0x1D5D4, 'M', u'a'), (0x1D5D5, 'M', u'b'), (0x1D5D6, 'M', u'c'), (0x1D5D7, 'M', u'd'), (0x1D5D8, 'M', u'e'), (0x1D5D9, 'M', u'f'), (0x1D5DA, 'M', u'g'), (0x1D5DB, 'M', u'h'), (0x1D5DC, 'M', u'i'), (0x1D5DD, 'M', u'j'), (0x1D5DE, 'M', u'k'), (0x1D5DF, 'M', u'l'), (0x1D5E0, 'M', u'm'), (0x1D5E1, 'M', u'n'), (0x1D5E2, 'M', u'o'), (0x1D5E3, 'M', u'p'), (0x1D5E4, 'M', u'q'), (0x1D5E5, 'M', u'r'), (0x1D5E6, 'M', u's'), (0x1D5E7, 'M', u't'), (0x1D5E8, 'M', u'u'), (0x1D5E9, 'M', u'v'), (0x1D5EA, 'M', u'w'), (0x1D5EB, 'M', u'x'), (0x1D5EC, 'M', u'y'), (0x1D5ED, 'M', u'z'), (0x1D5EE, 'M', u'a'), (0x1D5EF, 'M', u'b'), (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), (0x1D5F6, 'M', u'i'), (0x1D5F7, 'M', u'j'), (0x1D5F8, 'M', u'k'), (0x1D5F9, 'M', u'l'), (0x1D5FA, 'M', u'm'), (0x1D5FB, 'M', u'n'), (0x1D5FC, 'M', u'o'), (0x1D5FD, 'M', u'p'), (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), (0x1D600, 'M', u's'), (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), (0x1D603, 'M', u'v'), (0x1D604, 'M', u'w'), (0x1D605, 'M', u'x'), (0x1D606, 'M', u'y'), (0x1D607, 'M', u'z'), (0x1D608, 'M', u'a'), (0x1D609, 'M', u'b'), (0x1D60A, 'M', u'c'), (0x1D60B, 'M', u'd'), (0x1D60C, 'M', u'e'), (0x1D60D, 'M', u'f'), (0x1D60E, 'M', u'g'), (0x1D60F, 'M', u'h'), (0x1D610, 'M', u'i'), (0x1D611, 'M', u'j'), (0x1D612, 'M', u'k'), (0x1D613, 'M', u'l'), (0x1D614, 'M', u'm'), (0x1D615, 'M', u'n'), (0x1D616, 'M', u'o'), (0x1D617, 'M', u'p'), (0x1D618, 'M', u'q'), (0x1D619, 'M', u'r'), (0x1D61A, 'M', u's'), (0x1D61B, 'M', u't'), (0x1D61C, 'M', u'u'), (0x1D61D, 'M', u'v'), (0x1D61E, 'M', u'w'), (0x1D61F, 'M', u'x'), (0x1D620, 'M', u'y'), (0x1D621, 'M', u'z'), (0x1D622, 'M', u'a'), (0x1D623, 'M', u'b'), (0x1D624, 'M', u'c'), (0x1D625, 'M', u'd'), (0x1D626, 'M', u'e'), (0x1D627, 'M', u'f'), (0x1D628, 'M', u'g'), (0x1D629, 'M', u'h'), (0x1D62A, 'M', u'i'), (0x1D62B, 'M', u'j'), (0x1D62C, 'M', u'k'), (0x1D62D, 'M', u'l'), (0x1D62E, 'M', u'm'), (0x1D62F, 'M', u'n'), (0x1D630, 'M', u'o'), (0x1D631, 'M', u'p'), (0x1D632, 'M', u'q'), (0x1D633, 'M', u'r'), (0x1D634, 'M', u's'), (0x1D635, 'M', u't'), (0x1D636, 'M', u'u'), ] def _seg_59(): return [ (0x1D637, 'M', u'v'), (0x1D638, 'M', u'w'), (0x1D639, 'M', u'x'), (0x1D63A, 'M', u'y'), (0x1D63B, 'M', u'z'), (0x1D63C, 'M', u'a'), (0x1D63D, 'M', u'b'), (0x1D63E, 'M', u'c'), (0x1D63F, 'M', u'd'), (0x1D640, 'M', u'e'), (0x1D641, 'M', u'f'), (0x1D642, 'M', u'g'), (0x1D643, 'M', u'h'), (0x1D644, 'M', u'i'), (0x1D645, 'M', u'j'), (0x1D646, 'M', u'k'), (0x1D647, 'M', u'l'), (0x1D648, 'M', u'm'), (0x1D649, 'M', u'n'), (0x1D64A, 'M', u'o'), (0x1D64B, 'M', u'p'), (0x1D64C, 'M', u'q'), (0x1D64D, 'M', u'r'), (0x1D64E, 'M', u's'), (0x1D64F, 'M', u't'), (0x1D650, 'M', u'u'), (0x1D651, 'M', u'v'), (0x1D652, 'M', u'w'), (0x1D653, 'M', u'x'), (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), (0x1D65A, 'M', u'e'), (0x1D65B, 'M', u'f'), (0x1D65C, 'M', u'g'), (0x1D65D, 'M', u'h'), (0x1D65E, 'M', u'i'), (0x1D65F, 'M', u'j'), (0x1D660, 'M', u'k'), (0x1D661, 'M', u'l'), (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), (0x1D664, 'M', u'o'), (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), (0x1D667, 'M', u'r'), (0x1D668, 'M', u's'), (0x1D669, 'M', u't'), (0x1D66A, 'M', u'u'), (0x1D66B, 'M', u'v'), (0x1D66C, 'M', u'w'), (0x1D66D, 'M', u'x'), (0x1D66E, 'M', u'y'), (0x1D66F, 'M', u'z'), (0x1D670, 'M', u'a'), (0x1D671, 'M', u'b'), (0x1D672, 'M', u'c'), (0x1D673, 'M', u'd'), (0x1D674, 'M', u'e'), (0x1D675, 'M', u'f'), (0x1D676, 'M', u'g'), (0x1D677, 'M', u'h'), (0x1D678, 'M', u'i'), (0x1D679, 'M', u'j'), (0x1D67A, 'M', u'k'), (0x1D67B, 'M', u'l'), (0x1D67C, 'M', u'm'), (0x1D67D, 'M', u'n'), (0x1D67E, 'M', u'o'), (0x1D67F, 'M', u'p'), (0x1D680, 'M', u'q'), (0x1D681, 'M', u'r'), (0x1D682, 'M', u's'), (0x1D683, 'M', u't'), (0x1D684, 'M', u'u'), (0x1D685, 'M', u'v'), (0x1D686, 'M', u'w'), (0x1D687, 'M', u'x'), (0x1D688, 'M', u'y'), (0x1D689, 'M', u'z'), (0x1D68A, 'M', u'a'), (0x1D68B, 'M', u'b'), (0x1D68C, 'M', u'c'), (0x1D68D, 'M', u'd'), (0x1D68E, 'M', u'e'), (0x1D68F, 'M', u'f'), (0x1D690, 'M', u'g'), (0x1D691, 'M', u'h'), (0x1D692, 'M', u'i'), (0x1D693, 'M', u'j'), (0x1D694, 'M', u'k'), (0x1D695, 'M', u'l'), (0x1D696, 'M', u'm'), (0x1D697, 'M', u'n'), (0x1D698, 'M', u'o'), (0x1D699, 'M', u'p'), (0x1D69A, 'M', u'q'), ] def _seg_60(): return [ (0x1D69B, 'M', u'r'), (0x1D69C, 'M', u's'), (0x1D69D, 'M', u't'), (0x1D69E, 'M', u'u'), (0x1D69F, 'M', u'v'), (0x1D6A0, 'M', u'w'), (0x1D6A1, 'M', u'x'), (0x1D6A2, 'M', u'y'), (0x1D6A3, 'M', u'z'), (0x1D6A4, 'M', u'ı'), (0x1D6A5, 'M', u'ȷ'), (0x1D6A6, 'X'), (0x1D6A8, 'M', u'α'), (0x1D6A9, 'M', u'β'), (0x1D6AA, 'M', u'γ'), (0x1D6AB, 'M', u'δ'), (0x1D6AC, 'M', u'ε'), (0x1D6AD, 'M', u'ζ'), (0x1D6AE, 'M', u'η'), (0x1D6AF, 'M', u'θ'), (0x1D6B0, 'M', u'ι'), (0x1D6B1, 'M', u'κ'), (0x1D6B2, 'M', u'λ'), (0x1D6B3, 'M', u'μ'), (0x1D6B4, 'M', u'ν'), (0x1D6B5, 'M', u'ξ'), (0x1D6B6, 'M', u'ο'), (0x1D6B7, 'M', u'π'), (0x1D6B8, 'M', u'ρ'), (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), (0x1D6BF, 'M', u'ψ'), (0x1D6C0, 'M', u'ω'), (0x1D6C1, 'M', u'∇'), (0x1D6C2, 'M', u'α'), (0x1D6C3, 'M', u'β'), (0x1D6C4, 'M', u'γ'), (0x1D6C5, 'M', u'δ'), (0x1D6C6, 'M', u'ε'), (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), (0x1D6C9, 'M', u'θ'), (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), (0x1D6CC, 'M', u'λ'), (0x1D6CD, 'M', u'μ'), (0x1D6CE, 'M', u'ν'), (0x1D6CF, 'M', u'ξ'), (0x1D6D0, 'M', u'ο'), (0x1D6D1, 'M', u'π'), (0x1D6D2, 'M', u'ρ'), (0x1D6D3, 'M', u'σ'), (0x1D6D5, 'M', u'τ'), (0x1D6D6, 'M', u'υ'), (0x1D6D7, 'M', u'φ'), (0x1D6D8, 'M', u'χ'), (0x1D6D9, 'M', u'ψ'), (0x1D6DA, 'M', u'ω'), (0x1D6DB, 'M', u'∂'), (0x1D6DC, 'M', u'ε'), (0x1D6DD, 'M', u'θ'), (0x1D6DE, 'M', u'κ'), (0x1D6DF, 'M', u'φ'), (0x1D6E0, 'M', u'ρ'), (0x1D6E1, 'M', u'π'), (0x1D6E2, 'M', u'α'), (0x1D6E3, 'M', u'β'), (0x1D6E4, 'M', u'γ'), (0x1D6E5, 'M', u'δ'), (0x1D6E6, 'M', u'ε'), (0x1D6E7, 'M', u'ζ'), (0x1D6E8, 'M', u'η'), (0x1D6E9, 'M', u'θ'), (0x1D6EA, 'M', u'ι'), (0x1D6EB, 'M', u'κ'), (0x1D6EC, 'M', u'λ'), (0x1D6ED, 'M', u'μ'), (0x1D6EE, 'M', u'ν'), (0x1D6EF, 'M', u'ξ'), (0x1D6F0, 'M', u'ο'), (0x1D6F1, 'M', u'π'), (0x1D6F2, 'M', u'ρ'), (0x1D6F3, 'M', u'θ'), (0x1D6F4, 'M', u'σ'), (0x1D6F5, 'M', u'τ'), (0x1D6F6, 'M', u'υ'), (0x1D6F7, 'M', u'φ'), (0x1D6F8, 'M', u'χ'), (0x1D6F9, 'M', u'ψ'), (0x1D6FA, 'M', u'ω'), (0x1D6FB, 'M', u'∇'), (0x1D6FC, 'M', u'α'), (0x1D6FD, 'M', u'β'), (0x1D6FE, 'M', u'γ'), (0x1D6FF, 'M', u'δ'), (0x1D700, 'M', u'ε'), ] def _seg_61(): return [ (0x1D701, 'M', u'ζ'), (0x1D702, 'M', u'η'), (0x1D703, 'M', u'θ'), (0x1D704, 'M', u'ι'), (0x1D705, 'M', u'κ'), (0x1D706, 'M', u'λ'), (0x1D707, 'M', u'μ'), (0x1D708, 'M', u'ν'), (0x1D709, 'M', u'ξ'), (0x1D70A, 'M', u'ο'), (0x1D70B, 'M', u'π'), (0x1D70C, 'M', u'ρ'), (0x1D70D, 'M', u'σ'), (0x1D70F, 'M', u'τ'), (0x1D710, 'M', u'υ'), (0x1D711, 'M', u'φ'), (0x1D712, 'M', u'χ'), (0x1D713, 'M', u'ψ'), (0x1D714, 'M', u'ω'), (0x1D715, 'M', u'∂'), (0x1D716, 'M', u'ε'), (0x1D717, 'M', u'θ'), (0x1D718, 'M', u'κ'), (0x1D719, 'M', u'φ'), (0x1D71A, 'M', u'ρ'), (0x1D71B, 'M', u'π'), (0x1D71C, 'M', u'α'), (0x1D71D, 'M', u'β'), (0x1D71E, 'M', u'γ'), (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), (0x1D725, 'M', u'κ'), (0x1D726, 'M', u'λ'), (0x1D727, 'M', u'μ'), (0x1D728, 'M', u'ν'), (0x1D729, 'M', u'ξ'), (0x1D72A, 'M', u'ο'), (0x1D72B, 'M', u'π'), (0x1D72C, 'M', u'ρ'), (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), (0x1D72F, 'M', u'τ'), (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), (0x1D732, 'M', u'χ'), (0x1D733, 'M', u'ψ'), (0x1D734, 'M', u'ω'), (0x1D735, 'M', u'∇'), (0x1D736, 'M', u'α'), (0x1D737, 'M', u'β'), (0x1D738, 'M', u'γ'), (0x1D739, 'M', u'δ'), (0x1D73A, 'M', u'ε'), (0x1D73B, 'M', u'ζ'), (0x1D73C, 'M', u'η'), (0x1D73D, 'M', u'θ'), (0x1D73E, 'M', u'ι'), (0x1D73F, 'M', u'κ'), (0x1D740, 'M', u'λ'), (0x1D741, 'M', u'μ'), (0x1D742, 'M', u'ν'), (0x1D743, 'M', u'ξ'), (0x1D744, 'M', u'ο'), (0x1D745, 'M', u'π'), (0x1D746, 'M', u'ρ'), (0x1D747, 'M', u'σ'), (0x1D749, 'M', u'τ'), (0x1D74A, 'M', u'υ'), (0x1D74B, 'M', u'φ'), (0x1D74C, 'M', u'χ'), (0x1D74D, 'M', u'ψ'), (0x1D74E, 'M', u'ω'), (0x1D74F, 'M', u'∂'), (0x1D750, 'M', u'ε'), (0x1D751, 'M', u'θ'), (0x1D752, 'M', u'κ'), (0x1D753, 'M', u'φ'), (0x1D754, 'M', u'ρ'), (0x1D755, 'M', u'π'), (0x1D756, 'M', u'α'), (0x1D757, 'M', u'β'), (0x1D758, 'M', u'γ'), (0x1D759, 'M', u'δ'), (0x1D75A, 'M', u'ε'), (0x1D75B, 'M', u'ζ'), (0x1D75C, 'M', u'η'), (0x1D75D, 'M', u'θ'), (0x1D75E, 'M', u'ι'), (0x1D75F, 'M', u'κ'), (0x1D760, 'M', u'λ'), (0x1D761, 'M', u'μ'), (0x1D762, 'M', u'ν'), (0x1D763, 'M', u'ξ'), (0x1D764, 'M', u'ο'), (0x1D765, 'M', u'π'), (0x1D766, 'M', u'ρ'), ] def _seg_62(): return [ (0x1D767, 'M', u'θ'), (0x1D768, 'M', u'σ'), (0x1D769, 'M', u'τ'), (0x1D76A, 'M', u'υ'), (0x1D76B, 'M', u'φ'), (0x1D76C, 'M', u'χ'), (0x1D76D, 'M', u'ψ'), (0x1D76E, 'M', u'ω'), (0x1D76F, 'M', u'∇'), (0x1D770, 'M', u'α'), (0x1D771, 'M', u'β'), (0x1D772, 'M', u'γ'), (0x1D773, 'M', u'δ'), (0x1D774, 'M', u'ε'), (0x1D775, 'M', u'ζ'), (0x1D776, 'M', u'η'), (0x1D777, 'M', u'θ'), (0x1D778, 'M', u'ι'), (0x1D779, 'M', u'κ'), (0x1D77A, 'M', u'λ'), (0x1D77B, 'M', u'μ'), (0x1D77C, 'M', u'ν'), (0x1D77D, 'M', u'ξ'), (0x1D77E, 'M', u'ο'), (0x1D77F, 'M', u'π'), (0x1D780, 'M', u'ρ'), (0x1D781, 'M', u'σ'), (0x1D783, 'M', u'τ'), (0x1D784, 'M', u'υ'), (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), (0x1D78B, 'M', u'θ'), (0x1D78C, 'M', u'κ'), (0x1D78D, 'M', u'φ'), (0x1D78E, 'M', u'ρ'), (0x1D78F, 'M', u'π'), (0x1D790, 'M', u'α'), (0x1D791, 'M', u'β'), (0x1D792, 'M', u'γ'), (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), (0x1D795, 'M', u'ζ'), (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), (0x1D798, 'M', u'ι'), (0x1D799, 'M', u'κ'), (0x1D79A, 'M', u'λ'), (0x1D79B, 'M', u'μ'), (0x1D79C, 'M', u'ν'), (0x1D79D, 'M', u'ξ'), (0x1D79E, 'M', u'ο'), (0x1D79F, 'M', u'π'), (0x1D7A0, 'M', u'ρ'), (0x1D7A1, 'M', u'θ'), (0x1D7A2, 'M', u'σ'), (0x1D7A3, 'M', u'τ'), (0x1D7A4, 'M', u'υ'), (0x1D7A5, 'M', u'φ'), (0x1D7A6, 'M', u'χ'), (0x1D7A7, 'M', u'ψ'), (0x1D7A8, 'M', u'ω'), (0x1D7A9, 'M', u'∇'), (0x1D7AA, 'M', u'α'), (0x1D7AB, 'M', u'β'), (0x1D7AC, 'M', u'γ'), (0x1D7AD, 'M', u'δ'), (0x1D7AE, 'M', u'ε'), (0x1D7AF, 'M', u'ζ'), (0x1D7B0, 'M', u'η'), (0x1D7B1, 'M', u'θ'), (0x1D7B2, 'M', u'ι'), (0x1D7B3, 'M', u'κ'), (0x1D7B4, 'M', u'λ'), (0x1D7B5, 'M', u'μ'), (0x1D7B6, 'M', u'ν'), (0x1D7B7, 'M', u'ξ'), (0x1D7B8, 'M', u'ο'), (0x1D7B9, 'M', u'π'), (0x1D7BA, 'M', u'ρ'), (0x1D7BB, 'M', u'σ'), (0x1D7BD, 'M', u'τ'), (0x1D7BE, 'M', u'υ'), (0x1D7BF, 'M', u'φ'), (0x1D7C0, 'M', u'χ'), (0x1D7C1, 'M', u'ψ'), (0x1D7C2, 'M', u'ω'), (0x1D7C3, 'M', u'∂'), (0x1D7C4, 'M', u'ε'), (0x1D7C5, 'M', u'θ'), (0x1D7C6, 'M', u'κ'), (0x1D7C7, 'M', u'φ'), (0x1D7C8, 'M', u'ρ'), (0x1D7C9, 'M', u'π'), (0x1D7CA, 'M', u'ϝ'), (0x1D7CC, 'X'), (0x1D7CE, 'M', u'0'), ] def _seg_63(): return [ (0x1D7CF, 'M', u'1'), (0x1D7D0, 'M', u'2'), (0x1D7D1, 'M', u'3'), (0x1D7D2, 'M', u'4'), (0x1D7D3, 'M', u'5'), (0x1D7D4, 'M', u'6'), (0x1D7D5, 'M', u'7'), (0x1D7D6, 'M', u'8'), (0x1D7D7, 'M', u'9'), (0x1D7D8, 'M', u'0'), (0x1D7D9, 'M', u'1'), (0x1D7DA, 'M', u'2'), (0x1D7DB, 'M', u'3'), (0x1D7DC, 'M', u'4'), (0x1D7DD, 'M', u'5'), (0x1D7DE, 'M', u'6'), (0x1D7DF, 'M', u'7'), (0x1D7E0, 'M', u'8'), (0x1D7E1, 'M', u'9'), (0x1D7E2, 'M', u'0'), (0x1D7E3, 'M', u'1'), (0x1D7E4, 'M', u'2'), (0x1D7E5, 'M', u'3'), (0x1D7E6, 'M', u'4'), (0x1D7E7, 'M', u'5'), (0x1D7E8, 'M', u'6'), (0x1D7E9, 'M', u'7'), (0x1D7EA, 'M', u'8'), (0x1D7EB, 'M', u'9'), (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), (0x1D7F2, 'M', u'6'), (0x1D7F3, 'M', u'7'), (0x1D7F4, 'M', u'8'), (0x1D7F5, 'M', u'9'), (0x1D7F6, 'M', u'0'), (0x1D7F7, 'M', u'1'), (0x1D7F8, 'M', u'2'), (0x1D7F9, 'M', u'3'), (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), (0x1D7FC, 'M', u'6'), (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), (0x1D7FF, 'M', u'9'), (0x1D800, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), (0x1EE03, 'M', u'د'), (0x1EE04, 'X'), (0x1EE05, 'M', u'و'), (0x1EE06, 'M', u'ز'), (0x1EE07, 'M', u'ح'), (0x1EE08, 'M', u'ط'), (0x1EE09, 'M', u'ي'), (0x1EE0A, 'M', u'ك'), (0x1EE0B, 'M', u'ل'), (0x1EE0C, 'M', u'م'), (0x1EE0D, 'M', u'ن'), (0x1EE0E, 'M', u'س'), (0x1EE0F, 'M', u'ع'), (0x1EE10, 'M', u'ف'), (0x1EE11, 'M', u'ص'), (0x1EE12, 'M', u'ق'), (0x1EE13, 'M', u'ر'), (0x1EE14, 'M', u'ش'), (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), (0x1EE17, 'M', u'خ'), (0x1EE18, 'M', u'ذ'), (0x1EE19, 'M', u'ض'), (0x1EE1A, 'M', u'ظ'), (0x1EE1B, 'M', u'غ'), (0x1EE1C, 'M', u'ٮ'), (0x1EE1D, 'M', u'ں'), (0x1EE1E, 'M', u'ڡ'), (0x1EE1F, 'M', u'ٯ'), (0x1EE20, 'X'), (0x1EE21, 'M', u'ب'), (0x1EE22, 'M', u'ج'), (0x1EE23, 'X'), (0x1EE24, 'M', u'ه'), (0x1EE25, 'X'), (0x1EE27, 'M', u'ح'), (0x1EE28, 'X'), (0x1EE29, 'M', u'ي'), (0x1EE2A, 'M', u'ك'), (0x1EE2B, 'M', u'ل'), (0x1EE2C, 'M', u'م'), (0x1EE2D, 'M', u'ن'), (0x1EE2E, 'M', u'س'), (0x1EE2F, 'M', u'ع'), (0x1EE30, 'M', u'ف'), (0x1EE31, 'M', u'ص'), (0x1EE32, 'M', u'ق'), ] def _seg_64(): return [ (0x1EE33, 'X'), (0x1EE34, 'M', u'ش'), (0x1EE35, 'M', u'ت'), (0x1EE36, 'M', u'ث'), (0x1EE37, 'M', u'خ'), (0x1EE38, 'X'), (0x1EE39, 'M', u'ض'), (0x1EE3A, 'X'), (0x1EE3B, 'M', u'غ'), (0x1EE3C, 'X'), (0x1EE42, 'M', u'ج'), (0x1EE43, 'X'), (0x1EE47, 'M', u'ح'), (0x1EE48, 'X'), (0x1EE49, 'M', u'ي'), (0x1EE4A, 'X'), (0x1EE4B, 'M', u'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', u'ن'), (0x1EE4E, 'M', u'س'), (0x1EE4F, 'M', u'ع'), (0x1EE50, 'X'), (0x1EE51, 'M', u'ص'), (0x1EE52, 'M', u'ق'), (0x1EE53, 'X'), (0x1EE54, 'M', u'ش'), (0x1EE55, 'X'), (0x1EE57, 'M', u'خ'), (0x1EE58, 'X'), (0x1EE59, 'M', u'ض'), (0x1EE5A, 'X'), (0x1EE5B, 'M', u'غ'), (0x1EE5C, 'X'), (0x1EE5D, 'M', u'ں'), (0x1EE5E, 'X'), (0x1EE5F, 'M', u'ٯ'), (0x1EE60, 'X'), (0x1EE61, 'M', u'ب'), (0x1EE62, 'M', u'ج'), (0x1EE63, 'X'), (0x1EE64, 'M', u'ه'), (0x1EE65, 'X'), (0x1EE67, 'M', u'ح'), (0x1EE68, 'M', u'ط'), (0x1EE69, 'M', u'ي'), (0x1EE6A, 'M', u'ك'), (0x1EE6B, 'X'), (0x1EE6C, 'M', u'م'), (0x1EE6D, 'M', u'ن'), (0x1EE6E, 'M', u'س'), (0x1EE6F, 'M', u'ع'), (0x1EE70, 'M', u'ف'), (0x1EE71, 'M', u'ص'), (0x1EE72, 'M', u'ق'), (0x1EE73, 'X'), (0x1EE74, 'M', u'ش'), (0x1EE75, 'M', u'ت'), (0x1EE76, 'M', u'ث'), (0x1EE77, 'M', u'خ'), (0x1EE78, 'X'), (0x1EE79, 'M', u'ض'), (0x1EE7A, 'M', u'ظ'), (0x1EE7B, 'M', u'غ'), (0x1EE7C, 'M', u'ٮ'), (0x1EE7D, 'X'), (0x1EE7E, 'M', u'ڡ'), (0x1EE7F, 'X'), (0x1EE80, 'M', u'ا'), (0x1EE81, 'M', u'ب'), (0x1EE82, 'M', u'ج'), (0x1EE83, 'M', u'د'), (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), (0x1EE86, 'M', u'ز'), (0x1EE87, 'M', u'ح'), (0x1EE88, 'M', u'ط'), (0x1EE89, 'M', u'ي'), (0x1EE8A, 'X'), (0x1EE8B, 'M', u'ل'), (0x1EE8C, 'M', u'م'), (0x1EE8D, 'M', u'ن'), (0x1EE8E, 'M', u'س'), (0x1EE8F, 'M', u'ع'), (0x1EE90, 'M', u'ف'), (0x1EE91, 'M', u'ص'), (0x1EE92, 'M', u'ق'), (0x1EE93, 'M', u'ر'), (0x1EE94, 'M', u'ش'), (0x1EE95, 'M', u'ت'), (0x1EE96, 'M', u'ث'), (0x1EE97, 'M', u'خ'), (0x1EE98, 'M', u'ذ'), (0x1EE99, 'M', u'ض'), (0x1EE9A, 'M', u'ظ'), (0x1EE9B, 'M', u'غ'), (0x1EE9C, 'X'), (0x1EEA1, 'M', u'ب'), (0x1EEA2, 'M', u'ج'), (0x1EEA3, 'M', u'د'), (0x1EEA4, 'X'), ] def _seg_65(): return [ (0x1EEA5, 'M', u'و'), (0x1EEA6, 'M', u'ز'), (0x1EEA7, 'M', u'ح'), (0x1EEA8, 'M', u'ط'), (0x1EEA9, 'M', u'ي'), (0x1EEAA, 'X'), (0x1EEAB, 'M', u'ل'), (0x1EEAC, 'M', u'م'), (0x1EEAD, 'M', u'ن'), (0x1EEAE, 'M', u'س'), (0x1EEAF, 'M', u'ع'), (0x1EEB0, 'M', u'ف'), (0x1EEB1, 'M', u'ص'), (0x1EEB2, 'M', u'ق'), (0x1EEB3, 'M', u'ر'), (0x1EEB4, 'M', u'ش'), (0x1EEB5, 'M', u'ت'), (0x1EEB6, 'M', u'ث'), (0x1EEB7, 'M', u'خ'), (0x1EEB8, 'M', u'ذ'), (0x1EEB9, 'M', u'ض'), (0x1EEBA, 'M', u'ظ'), (0x1EEBB, 'M', u'غ'), (0x1EEBC, 'X'), (0x1EEF0, 'V'), (0x1EEF2, 'X'), (0x1F000, 'V'), (0x1F02C, 'X'), (0x1F030, 'V'), (0x1F094, 'X'), (0x1F0A0, 'V'), (0x1F0AF, 'X'), (0x1F0B1, 'V'), (0x1F0BF, 'X'), (0x1F0C1, 'V'), (0x1F0D0, 'X'), (0x1F0D1, 'V'), (0x1F0E0, 'X'), (0x1F101, '3', u'0,'), (0x1F102, '3', u'1,'), (0x1F103, '3', u'2,'), (0x1F104, '3', u'3,'), (0x1F105, '3', u'4,'), (0x1F106, '3', u'5,'), (0x1F107, '3', u'6,'), (0x1F108, '3', u'7,'), (0x1F109, '3', u'8,'), (0x1F10A, '3', u'9,'), (0x1F10B, 'X'), (0x1F110, '3', u'(a)'), (0x1F111, '3', u'(b)'), (0x1F112, '3', u'(c)'), (0x1F113, '3', u'(d)'), (0x1F114, '3', u'(e)'), (0x1F115, '3', u'(f)'), (0x1F116, '3', u'(g)'), (0x1F117, '3', u'(h)'), (0x1F118, '3', u'(i)'), (0x1F119, '3', u'(j)'), (0x1F11A, '3', u'(k)'), (0x1F11B, '3', u'(l)'), (0x1F11C, '3', u'(m)'), (0x1F11D, '3', u'(n)'), (0x1F11E, '3', u'(o)'), (0x1F11F, '3', u'(p)'), (0x1F120, '3', u'(q)'), (0x1F121, '3', u'(r)'), (0x1F122, '3', u'(s)'), (0x1F123, '3', u'(t)'), (0x1F124, '3', u'(u)'), (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), (0x1F127, '3', u'(x)'), (0x1F128, '3', u'(y)'), (0x1F129, '3', u'(z)'), (0x1F12A, 'M', u'〔s〕'), (0x1F12B, 'M', u'c'), (0x1F12C, 'M', u'r'), (0x1F12D, 'M', u'cd'), (0x1F12E, 'M', u'wz'), (0x1F12F, 'X'), (0x1F130, 'M', u'a'), (0x1F131, 'M', u'b'), (0x1F132, 'M', u'c'), (0x1F133, 'M', u'd'), (0x1F134, 'M', u'e'), (0x1F135, 'M', u'f'), (0x1F136, 'M', u'g'), (0x1F137, 'M', u'h'), (0x1F138, 'M', u'i'), (0x1F139, 'M', u'j'), (0x1F13A, 'M', u'k'), (0x1F13B, 'M', u'l'), (0x1F13C, 'M', u'm'), (0x1F13D, 'M', u'n'), (0x1F13E, 'M', u'o'), (0x1F13F, 'M', u'p'), (0x1F140, 'M', u'q'), (0x1F141, 'M', u'r'), (0x1F142, 'M', u's'), ] def _seg_66(): return [ (0x1F143, 'M', u't'), (0x1F144, 'M', u'u'), (0x1F145, 'M', u'v'), (0x1F146, 'M', u'w'), (0x1F147, 'M', u'x'), (0x1F148, 'M', u'y'), (0x1F149, 'M', u'z'), (0x1F14A, 'M', u'hv'), (0x1F14B, 'M', u'mv'), (0x1F14C, 'M', u'sd'), (0x1F14D, 'M', u'ss'), (0x1F14E, 'M', u'ppv'), (0x1F14F, 'M', u'wc'), (0x1F150, 'V'), (0x1F16A, 'M', u'mc'), (0x1F16B, 'M', u'md'), (0x1F16C, 'X'), (0x1F170, 'V'), (0x1F190, 'M', u'dj'), (0x1F191, 'V'), (0x1F19B, 'X'), (0x1F1E6, 'V'), (0x1F200, 'M', u'ほか'), (0x1F201, 'M', u'ココ'), (0x1F202, 'M', u'サ'), (0x1F203, 'X'), (0x1F210, 'M', u'手'), (0x1F211, 'M', u'字'), (0x1F212, 'M', u'双'), (0x1F213, 'M', u'デ'), (0x1F214, 'M', u'二'), (0x1F215, 'M', u'多'), (0x1F216, 'M', u'解'), (0x1F217, 'M', u'天'), (0x1F218, 'M', u'交'), (0x1F219, 'M', u'映'), (0x1F21A, 'M', u'無'), (0x1F21B, 'M', u'料'), (0x1F21C, 'M', u'前'), (0x1F21D, 'M', u'後'), (0x1F21E, 'M', u'再'), (0x1F21F, 'M', u'新'), (0x1F220, 'M', u'初'), (0x1F221, 'M', u'終'), (0x1F222, 'M', u'生'), (0x1F223, 'M', u'販'), (0x1F224, 'M', u'声'), (0x1F225, 'M', u'吹'), (0x1F226, 'M', u'演'), (0x1F227, 'M', u'投'), (0x1F228, 'M', u'捕'), (0x1F229, 'M', u'一'), (0x1F22A, 'M', u'三'), (0x1F22B, 'M', u'遊'), (0x1F22C, 'M', u'左'), (0x1F22D, 'M', u'中'), (0x1F22E, 'M', u'右'), (0x1F22F, 'M', u'指'), (0x1F230, 'M', u'走'), (0x1F231, 'M', u'打'), (0x1F232, 'M', u'禁'), (0x1F233, 'M', u'空'), (0x1F234, 'M', u'合'), (0x1F235, 'M', u'満'), (0x1F236, 'M', u'有'), (0x1F237, 'M', u'月'), (0x1F238, 'M', u'申'), (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), (0x1F23B, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), (0x1F242, 'M', u'〔二〕'), (0x1F243, 'M', u'〔安〕'), (0x1F244, 'M', u'〔点〕'), (0x1F245, 'M', u'〔打〕'), (0x1F246, 'M', u'〔盗〕'), (0x1F247, 'M', u'〔勝〕'), (0x1F248, 'M', u'〔敗〕'), (0x1F249, 'X'), (0x1F250, 'M', u'得'), (0x1F251, 'M', u'可'), (0x1F252, 'X'), (0x1F300, 'V'), (0x1F321, 'X'), (0x1F330, 'V'), (0x1F336, 'X'), (0x1F337, 'V'), (0x1F37D, 'X'), (0x1F380, 'V'), (0x1F394, 'X'), (0x1F3A0, 'V'), (0x1F3C5, 'X'), (0x1F3C6, 'V'), (0x1F3CB, 'X'), (0x1F3E0, 'V'), (0x1F3F1, 'X'), (0x1F400, 'V'), (0x1F43F, 'X'), (0x1F440, 'V'), ] def _seg_67(): return [ (0x1F441, 'X'), (0x1F442, 'V'), (0x1F4F8, 'X'), (0x1F4F9, 'V'), (0x1F4FD, 'X'), (0x1F500, 'V'), (0x1F53E, 'X'), (0x1F540, 'V'), (0x1F544, 'X'), (0x1F550, 'V'), (0x1F568, 'X'), (0x1F5FB, 'V'), (0x1F641, 'X'), (0x1F645, 'V'), (0x1F650, 'X'), (0x1F680, 'V'), (0x1F6C6, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x20000, 'V'), (0x2A6D7, 'X'), (0x2A700, 'V'), (0x2B735, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2F800, 'M', u'丽'), (0x2F801, 'M', u'丸'), (0x2F802, 'M', u'乁'), (0x2F803, 'M', u'𠄢'), (0x2F804, 'M', u'你'), (0x2F805, 'M', u'侮'), (0x2F806, 'M', u'侻'), (0x2F807, 'M', u'倂'), (0x2F808, 'M', u'偺'), (0x2F809, 'M', u'備'), (0x2F80A, 'M', u'僧'), (0x2F80B, 'M', u'像'), (0x2F80C, 'M', u'㒞'), (0x2F80D, 'M', u'𠘺'), (0x2F80E, 'M', u'免'), (0x2F80F, 'M', u'兔'), (0x2F810, 'M', u'兤'), (0x2F811, 'M', u'具'), (0x2F812, 'M', u'𠔜'), (0x2F813, 'M', u'㒹'), (0x2F814, 'M', u'內'), (0x2F815, 'M', u'再'), (0x2F816, 'M', u'𠕋'), (0x2F817, 'M', u'冗'), (0x2F818, 'M', u'冤'), (0x2F819, 'M', u'仌'), (0x2F81A, 'M', u'冬'), (0x2F81B, 'M', u'况'), (0x2F81C, 'M', u'𩇟'), (0x2F81D, 'M', u'凵'), (0x2F81E, 'M', u'刃'), (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), (0x2F825, 'M', u'勇'), (0x2F826, 'M', u'勉'), (0x2F827, 'M', u'勤'), (0x2F828, 'M', u'勺'), (0x2F829, 'M', u'包'), (0x2F82A, 'M', u'匆'), (0x2F82B, 'M', u'北'), (0x2F82C, 'M', u'卉'), (0x2F82D, 'M', u'卑'), (0x2F82E, 'M', u'博'), (0x2F82F, 'M', u'即'), (0x2F830, 'M', u'卽'), (0x2F831, 'M', u'卿'), (0x2F834, 'M', u'𠨬'), (0x2F835, 'M', u'灰'), (0x2F836, 'M', u'及'), (0x2F837, 'M', u'叟'), (0x2F838, 'M', u'𠭣'), (0x2F839, 'M', u'叫'), (0x2F83A, 'M', u'叱'), (0x2F83B, 'M', u'吆'), (0x2F83C, 'M', u'咞'), (0x2F83D, 'M', u'吸'), (0x2F83E, 'M', u'呈'), (0x2F83F, 'M', u'周'), (0x2F840, 'M', u'咢'), (0x2F841, 'M', u'哶'), (0x2F842, 'M', u'唐'), (0x2F843, 'M', u'啓'), (0x2F844, 'M', u'啣'), (0x2F845, 'M', u'善'), (0x2F847, 'M', u'喙'), (0x2F848, 'M', u'喫'), (0x2F849, 'M', u'喳'), (0x2F84A, 'M', u'嗂'), (0x2F84B, 'M', u'圖'), (0x2F84C, 'M', u'嘆'), (0x2F84D, 'M', u'圗'), ] def _seg_68(): return [ (0x2F84E, 'M', u'噑'), (0x2F84F, 'M', u'噴'), (0x2F850, 'M', u'切'), (0x2F851, 'M', u'壮'), (0x2F852, 'M', u'城'), (0x2F853, 'M', u'埴'), (0x2F854, 'M', u'堍'), (0x2F855, 'M', u'型'), (0x2F856, 'M', u'堲'), (0x2F857, 'M', u'報'), (0x2F858, 'M', u'墬'), (0x2F859, 'M', u'𡓤'), (0x2F85A, 'M', u'売'), (0x2F85B, 'M', u'壷'), (0x2F85C, 'M', u'夆'), (0x2F85D, 'M', u'多'), (0x2F85E, 'M', u'夢'), (0x2F85F, 'M', u'奢'), (0x2F860, 'M', u'𡚨'), (0x2F861, 'M', u'𡛪'), (0x2F862, 'M', u'姬'), (0x2F863, 'M', u'娛'), (0x2F864, 'M', u'娧'), (0x2F865, 'M', u'姘'), (0x2F866, 'M', u'婦'), (0x2F867, 'M', u'㛮'), (0x2F868, 'X'), (0x2F869, 'M', u'嬈'), (0x2F86A, 'M', u'嬾'), (0x2F86C, 'M', u'𡧈'), (0x2F86D, 'M', u'寃'), (0x2F86E, 'M', u'寘'), (0x2F86F, 'M', u'寧'), (0x2F870, 'M', u'寳'), (0x2F871, 'M', u'𡬘'), (0x2F872, 'M', u'寿'), (0x2F873, 'M', u'将'), (0x2F874, 'X'), (0x2F875, 'M', u'尢'), (0x2F876, 'M', u'㞁'), (0x2F877, 'M', u'屠'), (0x2F878, 'M', u'屮'), (0x2F879, 'M', u'峀'), (0x2F87A, 'M', u'岍'), (0x2F87B, 'M', u'𡷤'), (0x2F87C, 'M', u'嵃'), (0x2F87D, 'M', u'𡷦'), (0x2F87E, 'M', u'嵮'), (0x2F87F, 'M', u'嵫'), (0x2F880, 'M', u'嵼'), (0x2F881, 'M', u'巡'), (0x2F882, 'M', u'巢'), (0x2F883, 'M', u'㠯'), (0x2F884, 'M', u'巽'), (0x2F885, 'M', u'帨'), (0x2F886, 'M', u'帽'), (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), (0x2F88D, 'M', u'庶'), (0x2F88E, 'M', u'廊'), (0x2F88F, 'M', u'𪎒'), (0x2F890, 'M', u'廾'), (0x2F891, 'M', u'𢌱'), (0x2F893, 'M', u'舁'), (0x2F894, 'M', u'弢'), (0x2F896, 'M', u'㣇'), (0x2F897, 'M', u'𣊸'), (0x2F898, 'M', u'𦇚'), (0x2F899, 'M', u'形'), (0x2F89A, 'M', u'彫'), (0x2F89B, 'M', u'㣣'), (0x2F89C, 'M', u'徚'), (0x2F89D, 'M', u'忍'), (0x2F89E, 'M', u'志'), (0x2F89F, 'M', u'忹'), (0x2F8A0, 'M', u'悁'), (0x2F8A1, 'M', u'㤺'), (0x2F8A2, 'M', u'㤜'), (0x2F8A3, 'M', u'悔'), (0x2F8A4, 'M', u'𢛔'), (0x2F8A5, 'M', u'惇'), (0x2F8A6, 'M', u'慈'), (0x2F8A7, 'M', u'慌'), (0x2F8A8, 'M', u'慎'), (0x2F8A9, 'M', u'慌'), (0x2F8AA, 'M', u'慺'), (0x2F8AB, 'M', u'憎'), (0x2F8AC, 'M', u'憲'), (0x2F8AD, 'M', u'憤'), (0x2F8AE, 'M', u'憯'), (0x2F8AF, 'M', u'懞'), (0x2F8B0, 'M', u'懲'), (0x2F8B1, 'M', u'懶'), (0x2F8B2, 'M', u'成'), (0x2F8B3, 'M', u'戛'), (0x2F8B4, 'M', u'扝'), ] def _seg_69(): return [ (0x2F8B5, 'M', u'抱'), (0x2F8B6, 'M', u'拔'), (0x2F8B7, 'M', u'捐'), (0x2F8B8, 'M', u'𢬌'), (0x2F8B9, 'M', u'挽'), (0x2F8BA, 'M', u'拼'), (0x2F8BB, 'M', u'捨'), (0x2F8BC, 'M', u'掃'), (0x2F8BD, 'M', u'揤'), (0x2F8BE, 'M', u'𢯱'), (0x2F8BF, 'M', u'搢'), (0x2F8C0, 'M', u'揅'), (0x2F8C1, 'M', u'掩'), (0x2F8C2, 'M', u'㨮'), (0x2F8C3, 'M', u'摩'), (0x2F8C4, 'M', u'摾'), (0x2F8C5, 'M', u'撝'), (0x2F8C6, 'M', u'摷'), (0x2F8C7, 'M', u'㩬'), (0x2F8C8, 'M', u'敏'), (0x2F8C9, 'M', u'敬'), (0x2F8CA, 'M', u'𣀊'), (0x2F8CB, 'M', u'旣'), (0x2F8CC, 'M', u'書'), (0x2F8CD, 'M', u'晉'), (0x2F8CE, 'M', u'㬙'), (0x2F8CF, 'M', u'暑'), (0x2F8D0, 'M', u'㬈'), (0x2F8D1, 'M', u'㫤'), (0x2F8D2, 'M', u'冒'), (0x2F8D3, 'M', u'冕'), (0x2F8D4, 'M', u'最'), (0x2F8D5, 'M', u'暜'), (0x2F8D6, 'M', u'肭'), (0x2F8D7, 'M', u'䏙'), (0x2F8D8, 'M', u'朗'), (0x2F8D9, 'M', u'望'), (0x2F8DA, 'M', u'朡'), (0x2F8DB, 'M', u'杞'), (0x2F8DC, 'M', u'杓'), (0x2F8DD, 'M', u'𣏃'), (0x2F8DE, 'M', u'㭉'), (0x2F8DF, 'M', u'柺'), (0x2F8E0, 'M', u'枅'), (0x2F8E1, 'M', u'桒'), (0x2F8E2, 'M', u'梅'), (0x2F8E3, 'M', u'𣑭'), (0x2F8E4, 'M', u'梎'), (0x2F8E5, 'M', u'栟'), (0x2F8E6, 'M', u'椔'), (0x2F8E7, 'M', u'㮝'), (0x2F8E8, 'M', u'楂'), (0x2F8E9, 'M', u'榣'), (0x2F8EA, 'M', u'槪'), (0x2F8EB, 'M', u'檨'), (0x2F8EC, 'M', u'𣚣'), (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), (0x2F8F3, 'M', u'歲'), (0x2F8F4, 'M', u'殟'), (0x2F8F5, 'M', u'殺'), (0x2F8F6, 'M', u'殻'), (0x2F8F7, 'M', u'𣪍'), (0x2F8F8, 'M', u'𡴋'), (0x2F8F9, 'M', u'𣫺'), (0x2F8FA, 'M', u'汎'), (0x2F8FB, 'M', u'𣲼'), (0x2F8FC, 'M', u'沿'), (0x2F8FD, 'M', u'泍'), (0x2F8FE, 'M', u'汧'), (0x2F8FF, 'M', u'洖'), (0x2F900, 'M', u'派'), (0x2F901, 'M', u'海'), (0x2F902, 'M', u'流'), (0x2F903, 'M', u'浩'), (0x2F904, 'M', u'浸'), (0x2F905, 'M', u'涅'), (0x2F906, 'M', u'𣴞'), (0x2F907, 'M', u'洴'), (0x2F908, 'M', u'港'), (0x2F909, 'M', u'湮'), (0x2F90A, 'M', u'㴳'), (0x2F90B, 'M', u'滋'), (0x2F90C, 'M', u'滇'), (0x2F90D, 'M', u'𣻑'), (0x2F90E, 'M', u'淹'), (0x2F90F, 'M', u'潮'), (0x2F910, 'M', u'𣽞'), (0x2F911, 'M', u'𣾎'), (0x2F912, 'M', u'濆'), (0x2F913, 'M', u'瀹'), (0x2F914, 'M', u'瀞'), (0x2F915, 'M', u'瀛'), (0x2F916, 'M', u'㶖'), (0x2F917, 'M', u'灊'), (0x2F918, 'M', u'災'), ] def _seg_70(): return [ (0x2F919, 'M', u'灷'), (0x2F91A, 'M', u'炭'), (0x2F91B, 'M', u'𠔥'), (0x2F91C, 'M', u'煅'), (0x2F91D, 'M', u'𤉣'), (0x2F91E, 'M', u'熜'), (0x2F91F, 'X'), (0x2F920, 'M', u'爨'), (0x2F921, 'M', u'爵'), (0x2F922, 'M', u'牐'), (0x2F923, 'M', u'𤘈'), (0x2F924, 'M', u'犀'), (0x2F925, 'M', u'犕'), (0x2F926, 'M', u'𤜵'), (0x2F927, 'M', u'𤠔'), (0x2F928, 'M', u'獺'), (0x2F929, 'M', u'王'), (0x2F92A, 'M', u'㺬'), (0x2F92B, 'M', u'玥'), (0x2F92C, 'M', u'㺸'), (0x2F92E, 'M', u'瑇'), (0x2F92F, 'M', u'瑜'), (0x2F930, 'M', u'瑱'), (0x2F931, 'M', u'璅'), (0x2F932, 'M', u'瓊'), (0x2F933, 'M', u'㼛'), (0x2F934, 'M', u'甤'), (0x2F935, 'M', u'𤰶'), (0x2F936, 'M', u'甾'), (0x2F937, 'M', u'𤲒'), (0x2F938, 'M', u'異'), (0x2F939, 'M', u'𢆟'), (0x2F93A, 'M', u'瘐'), (0x2F93B, 'M', u'𤾡'), (0x2F93C, 'M', u'𤾸'), (0x2F93D, 'M', u'𥁄'), (0x2F93E, 'M', u'㿼'), (0x2F93F, 'M', u'䀈'), (0x2F940, 'M', u'直'), (0x2F941, 'M', u'𥃳'), (0x2F942, 'M', u'𥃲'), (0x2F943, 'M', u'𥄙'), (0x2F944, 'M', u'𥄳'), (0x2F945, 'M', u'眞'), (0x2F946, 'M', u'真'), (0x2F948, 'M', u'睊'), (0x2F949, 'M', u'䀹'), (0x2F94A, 'M', u'瞋'), (0x2F94B, 'M', u'䁆'), (0x2F94C, 'M', u'䂖'), (0x2F94D, 'M', u'𥐝'), (0x2F94E, 'M', u'硎'), (0x2F94F, 'M', u'碌'), (0x2F950, 'M', u'磌'), (0x2F951, 'M', u'䃣'), (0x2F952, 'M', u'𥘦'), (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), (0x2F959, 'M', u'穀'), (0x2F95A, 'M', u'穊'), (0x2F95B, 'M', u'穏'), (0x2F95C, 'M', u'𥥼'), (0x2F95D, 'M', u'𥪧'), (0x2F95F, 'X'), (0x2F960, 'M', u'䈂'), (0x2F961, 'M', u'𥮫'), (0x2F962, 'M', u'篆'), (0x2F963, 'M', u'築'), (0x2F964, 'M', u'䈧'), (0x2F965, 'M', u'𥲀'), (0x2F966, 'M', u'糒'), (0x2F967, 'M', u'䊠'), (0x2F968, 'M', u'糨'), (0x2F969, 'M', u'糣'), (0x2F96A, 'M', u'紀'), (0x2F96B, 'M', u'𥾆'), (0x2F96C, 'M', u'絣'), (0x2F96D, 'M', u'䌁'), (0x2F96E, 'M', u'緇'), (0x2F96F, 'M', u'縂'), (0x2F970, 'M', u'繅'), (0x2F971, 'M', u'䌴'), (0x2F972, 'M', u'𦈨'), (0x2F973, 'M', u'𦉇'), (0x2F974, 'M', u'䍙'), (0x2F975, 'M', u'𦋙'), (0x2F976, 'M', u'罺'), (0x2F977, 'M', u'𦌾'), (0x2F978, 'M', u'羕'), (0x2F979, 'M', u'翺'), (0x2F97A, 'M', u'者'), (0x2F97B, 'M', u'𦓚'), (0x2F97C, 'M', u'𦔣'), (0x2F97D, 'M', u'聠'), (0x2F97E, 'M', u'𦖨'), (0x2F97F, 'M', u'聰'), ] def _seg_71(): return [ (0x2F980, 'M', u'𣍟'), (0x2F981, 'M', u'䏕'), (0x2F982, 'M', u'育'), (0x2F983, 'M', u'脃'), (0x2F984, 'M', u'䐋'), (0x2F985, 'M', u'脾'), (0x2F986, 'M', u'媵'), (0x2F987, 'M', u'𦞧'), (0x2F988, 'M', u'𦞵'), (0x2F989, 'M', u'𣎓'), (0x2F98A, 'M', u'𣎜'), (0x2F98B, 'M', u'舁'), (0x2F98C, 'M', u'舄'), (0x2F98D, 'M', u'辞'), (0x2F98E, 'M', u'䑫'), (0x2F98F, 'M', u'芑'), (0x2F990, 'M', u'芋'), (0x2F991, 'M', u'芝'), (0x2F992, 'M', u'劳'), (0x2F993, 'M', u'花'), (0x2F994, 'M', u'芳'), (0x2F995, 'M', u'芽'), (0x2F996, 'M', u'苦'), (0x2F997, 'M', u'𦬼'), (0x2F998, 'M', u'若'), (0x2F999, 'M', u'茝'), (0x2F99A, 'M', u'荣'), (0x2F99B, 'M', u'莭'), (0x2F99C, 'M', u'茣'), (0x2F99D, 'M', u'莽'), (0x2F99E, 'M', u'菧'), (0x2F99F, 'M', u'著'), (0x2F9A0, 'M', u'荓'), (0x2F9A1, 'M', u'菊'), (0x2F9A2, 'M', u'菌'), (0x2F9A3, 'M', u'菜'), (0x2F9A4, 'M', u'𦰶'), (0x2F9A5, 'M', u'𦵫'), (0x2F9A6, 'M', u'𦳕'), (0x2F9A7, 'M', u'䔫'), (0x2F9A8, 'M', u'蓱'), (0x2F9A9, 'M', u'蓳'), (0x2F9AA, 'M', u'蔖'), (0x2F9AB, 'M', u'𧏊'), (0x2F9AC, 'M', u'蕤'), (0x2F9AD, 'M', u'𦼬'), (0x2F9AE, 'M', u'䕝'), (0x2F9AF, 'M', u'䕡'), (0x2F9B0, 'M', u'𦾱'), (0x2F9B1, 'M', u'𧃒'), (0x2F9B2, 'M', u'䕫'), (0x2F9B3, 'M', u'虐'), (0x2F9B4, 'M', u'虜'), (0x2F9B5, 'M', u'虧'), (0x2F9B6, 'M', u'虩'), (0x2F9B7, 'M', u'蚩'), (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), (0x2F9BE, 'M', u'螆'), (0x2F9BF, 'X'), (0x2F9C0, 'M', u'蟡'), (0x2F9C1, 'M', u'蠁'), (0x2F9C2, 'M', u'䗹'), (0x2F9C3, 'M', u'衠'), (0x2F9C4, 'M', u'衣'), (0x2F9C5, 'M', u'𧙧'), (0x2F9C6, 'M', u'裗'), (0x2F9C7, 'M', u'裞'), (0x2F9C8, 'M', u'䘵'), (0x2F9C9, 'M', u'裺'), (0x2F9CA, 'M', u'㒻'), (0x2F9CB, 'M', u'𧢮'), (0x2F9CC, 'M', u'𧥦'), (0x2F9CD, 'M', u'䚾'), (0x2F9CE, 'M', u'䛇'), (0x2F9CF, 'M', u'誠'), (0x2F9D0, 'M', u'諭'), (0x2F9D1, 'M', u'變'), (0x2F9D2, 'M', u'豕'), (0x2F9D3, 'M', u'𧲨'), (0x2F9D4, 'M', u'貫'), (0x2F9D5, 'M', u'賁'), (0x2F9D6, 'M', u'贛'), (0x2F9D7, 'M', u'起'), (0x2F9D8, 'M', u'𧼯'), (0x2F9D9, 'M', u'𠠄'), (0x2F9DA, 'M', u'跋'), (0x2F9DB, 'M', u'趼'), (0x2F9DC, 'M', u'跰'), (0x2F9DD, 'M', u'𠣞'), (0x2F9DE, 'M', u'軔'), (0x2F9DF, 'M', u'輸'), (0x2F9E0, 'M', u'𨗒'), (0x2F9E1, 'M', u'𨗭'), (0x2F9E2, 'M', u'邔'), (0x2F9E3, 'M', u'郱'), ] def _seg_72(): return [ (0x2F9E4, 'M', u'鄑'), (0x2F9E5, 'M', u'𨜮'), (0x2F9E6, 'M', u'鄛'), (0x2F9E7, 'M', u'鈸'), (0x2F9E8, 'M', u'鋗'), (0x2F9E9, 'M', u'鋘'), (0x2F9EA, 'M', u'鉼'), (0x2F9EB, 'M', u'鏹'), (0x2F9EC, 'M', u'鐕'), (0x2F9ED, 'M', u'𨯺'), (0x2F9EE, 'M', u'開'), (0x2F9EF, 'M', u'䦕'), (0x2F9F0, 'M', u'閷'), (0x2F9F1, 'M', u'𨵷'), (0x2F9F2, 'M', u'䧦'), (0x2F9F3, 'M', u'雃'), (0x2F9F4, 'M', u'嶲'), (0x2F9F5, 'M', u'霣'), (0x2F9F6, 'M', u'𩅅'), (0x2F9F7, 'M', u'𩈚'), (0x2F9F8, 'M', u'䩮'), (0x2F9F9, 'M', u'䩶'), (0x2F9FA, 'M', u'韠'), (0x2F9FB, 'M', u'𩐊'), (0x2F9FC, 'M', u'䪲'), (0x2F9FD, 'M', u'𩒖'), (0x2F9FE, 'M', u'頋'), (0x2FA00, 'M', u'頩'), (0x2FA01, 'M', u'𩖶'), (0x2FA02, 'M', u'飢'), (0x2FA03, 'M', u'䬳'), (0x2FA04, 'M', u'餩'), (0x2FA05, 'M', u'馧'), (0x2FA06, 'M', u'駂'), (0x2FA07, 'M', u'駾'), (0x2FA08, 'M', u'䯎'), (0x2FA09, 'M', u'𩬰'), (0x2FA0A, 'M', u'鬒'), (0x2FA0B, 'M', u'鱀'), (0x2FA0C, 'M', u'鳽'), (0x2FA0D, 'M', u'䳎'), (0x2FA0E, 'M', u'䳭'), (0x2FA0F, 'M', u'鵧'), (0x2FA10, 'M', u'𪃎'), (0x2FA11, 'M', u'䳸'), (0x2FA12, 'M', u'𪄅'), (0x2FA13, 'M', u'𪈎'), (0x2FA14, 'M', u'𪊑'), (0x2FA15, 'M', u'麻'), (0x2FA16, 'M', u'䵖'), (0x2FA17, 'M', u'黹'), (0x2FA18, 'M', u'黾'), (0x2FA19, 'M', u'鼅'), (0x2FA1A, 'M', u'鼏'), (0x2FA1B, 'M', u'鼖'), (0x2FA1C, 'M', u'鼻'), (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] uts46data = tuple( _seg_0() + _seg_1() + _seg_2() + _seg_3() + _seg_4() + _seg_5() + _seg_6() + _seg_7() + _seg_8() + _seg_9() + _seg_10() + _seg_11() + _seg_12() + _seg_13() + _seg_14() + _seg_15() + _seg_16() + _seg_17() + _seg_18() + _seg_19() + _seg_20() + _seg_21() + _seg_22() + _seg_23() + _seg_24() + _seg_25() + _seg_26() + _seg_27() + _seg_28() + _seg_29() + _seg_30() + _seg_31() + _seg_32() + _seg_33() + _seg_34() + _seg_35() + _seg_36() + _seg_37() + _seg_38() + _seg_39() + _seg_40() + _seg_41() + _seg_42() + _seg_43() + _seg_44() + _seg_45() + _seg_46() + _seg_47() + _seg_48() + _seg_49() + _seg_50() + _seg_51() + _seg_52() + _seg_53() + _seg_54() + _seg_55() + _seg_56() + _seg_57() + _seg_58() + _seg_59() + _seg_60() + _seg_61() + _seg_62() + _seg_63() + _seg_64() + _seg_65() + _seg_66() + _seg_67() + _seg_68() + _seg_69() + _seg_70() + _seg_71() + _seg_72() )
apache-2.0
a-sk/alot
alot/addressbook/abook.py
4
1088
# Copyright (C) 2011-2015 Patrick Totzke <patricktotzke@gmail.com> # This file is released under the GNU GPL, version 3 or a later revision. # For further details see the COPYING file import os from . import AddressBook from ..settings.utils import read_config class AbookAddressBook(AddressBook): """:class:`AddressBook` that parses abook's config/database files""" def __init__(self, path='~/.abook/addressbook', **kwargs): """ :param path: path to theme file :type path: str """ AddressBook.__init__(self, **kwargs) DEFAULTSPATH = os.path.join(os.path.dirname(__file__), 'defaults') self._spec = os.path.join(DEFAULTSPATH, 'abook_contacts.spec') path = os.path.expanduser(path) self._config = read_config(path, self._spec) del(self._config['format']) def get_contacts(self): c = self._config res = [] for id in c.sections: for email in c[id]['email']: if email: res.append((c[id]['name'], email)) return res
gpl-3.0
iModels/mbuild
mbuild/utils/conversion.py
1
6621
"""mBuild conversion utilities.""" import numpy as np def RB_to_OPLS(c0, c1, c2, c3, c4, c5): """Convert Ryckaert-Bellemans type dihedrals to OPLS type. Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) Returns ------- opls_coeffs : np.array, shape=(4,) Array containing the OPLS dihedrals coeffs f1, f2, f3, and f4 (in kcal/mol) """ f1 = (-1.5 * c3) - (2 * c1) f2 = c0 + c1 + c3 f3 = -0.5 * c3 f4 = -0.25 * c4 return np.array([f1, f2, f3, f4]) def RB_to_CHARMM(c0, c1, c2, c3, c4, c5): r"""Convert Ryckaert-Bellemans (RB) type dihedrals to CHARMM type. .. math:: RB_torsions &= c0 + c1*cos(psi) + c2*cos(psi)^2 + c3*cos(psi)^3 + \\ &= c4*cos(psi)^4 + c5*cos(5*psi)^5 where :math:`psi = t - pi = t - 180 degrees` .. math:: CHARMM_torsions &= K0 * (1 + cos(n0*t - d0)) + \\ &= K1 * (1 + cos(n1*t - d1)) + \\ &= K2 * (1 + cos(n2*t - d2)) + \\ &= K3 * (1 + cos(n3*t - d3)) + \\ &= K4 * (1 + cos(n4*t - d4)) + \\ &= K5 * (1 + cos(n5*t - d5)) CHARMM_torsions &= K0 + &= K1 * (1 + cos(n1*t - d1)) + \\ &= K2 * (1 + cos(n2*t - d2)) + \\ &= K3 * (1 + cos(n3*t - d3)) + \\ &= K4 * (1 + cos(n4*t - d4)) + \\ &= K5 * (1 + cos(n5*t - d5)) Parameters ---------- c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol) n0 = 0 n1 = 1 n2 = 2 n3 = 3 n4 = 4 n5 = 5 d0 = 90 d1 = 180 d2 = 0 d3 = 180 d4 = 0 d5 = 180 Returns ------- CHARMM_dihedral coeffs : np.matrix, shape=(6,3) Array containing the CHARMM dihedral coeffs (in kcal/mol): [[K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5]] """ # see below or the long version is, # K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5 K0 = c0 - c1 - c3 - (c4 / 4) - c5 K1 = c1 + (3 / 4) * c3 + (5 / 8) * c5 K2 = (1 / 2) * c2 + (1 / 2) * c4 K3 = (1 / 4) * c3 + (5 / 16) * c5 K4 = (1 / 8) * c4 K5 = (1 / 16) * c5 n0 = 0 n1 = 1 n2 = 2 n3 = 3 n4 = 4 n5 = 5 d0 = 90 d1 = 180 d2 = 0 d3 = 180 d4 = 0 d5 = 180 return np.array( [ [K0, n0, d0], [K1, n1, d1], [K2, n2, d2], [K3, n3, d3], [K4, n4, d4], [K5, n5, d5], ] ) def base10_to_base62_alph_num(base10_no): """Convert base-10 integer to base-62 alphanumeric system. This function provides a utility to write pdb/psf files such that it can add may more than 9999 atoms and 999 residues. Parameters ---------- base10_no: int The integer to convert to base-62 alphanumeric system Returns ------- str The converted base-62 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return _to_base(base10_no, base=62) def base10_to_base52_alph(base10_no): """Convert base-10 integer to base-52 alphabetic system. This function provides a utility to write pdb/psf files such that it can add more atom types in the 3 or 4 character limited pdb and psf files Parameters ---------- base10_no: int The integer to convert to base-52 alphabetic system Returns ------- str The converted base-52 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return _to_base(number=base10_no, base=52) def base10_to_base26_alph(base10_no): """Convert base-10 integer to base-26 alphabetic system. This function provides a utility to write pdb/psf files such that it can add many more than 9999 atoms and 999 residues. Parameters ---------- base10_no: int The integer to convert to base-26 alphabetic system Returns ------- str The converted base-26 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return _to_base(base10_no, base=26) def base10_to_base16_alph_num(base10_no): """Convert base-10 integer to base-16 hexadecimal system. This function provides a utility to write pdb/psf files such that it can add many more than 9999 atoms and 999 residues. Parameters ---------- base10_no: int The integer to convert to base-16 hexadecimal system Returns ------- str The converted base-16 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return hex(int(base10_no))[2:] # Helpers to convert base def _to_base(number, base=62): """Convert a base-10 number into base-n alpha-num.""" start_values = {62: "0", 52: "A", 26: "A"} if base not in start_values: raise ValueError( f"Base-{base} system is not supported. Supported bases are: " f"{list(start_values.keys())}" ) num = 1 number = int(number) remainder = _digit_to_alpha_num((number % base), base) base_n_values = str(remainder) power = 1 while num != 0: num = int(number / base ** power) if num == base: base_n_values = start_values[base] + base_n_values elif num != 0 and num > base: base_n_values = ( str(_digit_to_alpha_num(int(num % base), base)) + base_n_values ) elif (num != 0) and (num < base): base_n_values = ( str(_digit_to_alpha_num(int(num), base)) + base_n_values ) power += 1 return base_n_values def _digit_to_alpha_num(digit, base=52): """Convert digit to base-n.""" base_values = { 26: {j: chr(j + 65) for j in range(0, 26)}, 52: {j: chr(j + 65) if j < 26 else chr(j + 71) for j in range(0, 52)}, 62: {j: chr(j + 55) if j < 36 else chr(j + 61) for j in range(10, 62)}, } if base not in base_values: raise ValueError( f"Base-{base} system is not supported. Supported bases are: " f"{list(base_values.keys())}" ) return base_values[base].get(digit, digit)
mit
scipy/SciPyCentral
scipy_central/submission/views/show.py
3
13053
# django imports from django.conf import settings from django.shortcuts import render_to_response, redirect, HttpResponse from django.template import RequestContext from django.template.defaultfilters import slugify from django.contrib.sites.models import Site from django.contrib.auth.decorators import login_required from django.core.exceptions import ObjectDoesNotExist # SciPy Central imports from scipy_central.utils import paginated_queryset, ensuredir from scipy_central.pages.views import page_404_error from scipy_central.pagehit.views import create_hit, get_pagehits from scipy_central.submission.templatetags.core_tags import top_authors from scipy_central.submission import models # Python imports import logging import re import os import datetime import zipfile try: from cStringIO import StringIO except ImportError: from StringIO import StringIO logger = logging.getLogger('scipycentral') logger.debug('Initializing submission::views.show.py') def get_items_or_404(view_function): """ Decorator for views that ensures the revision and submission requested actually exist. If not, throws a 404, else, it calls the view function with the required inputs. """ def decorator(request, item_id, rev_id=None, slug=None, filename=None): """Retrieves the ``Submission`` and ``Revision`` objects when given, at a minimum the submission's primary key (``item_id``). Since the submission can have more than 1 revision, we can get a specific revision, ``rev_id``, otherwise we will always get the latest revision. ``slug`` is ignored for now - just used to create good SEO URLs. """ try: # Use the Submissions manager's ``all()`` function the_submission = models.Submission.objects.all().filter(id=item_id) except ObjectDoesNotExist: return page_404_error(request, 'You request a non-existant item') if len(the_submission) == 0: return page_404_error(request, 'This item does not exist yet') the_submission = the_submission[0] the_revision = the_submission.last_revision if rev_id: # can be None or '': all_revisions = the_submission.revisions.all() rev_index = int(rev_id)-1 if rev_index < 0: rev_index = len(all_revisions)-1 try: the_revision = all_revisions[rev_index] except (ValueError, IndexError): return page_404_error(request, ('The requested revision is ' 'non-existant.')) # Don't return revisions that are not approved for display yet if not isinstance(the_revision, models.Revision) or\ not(the_revision.is_displayed): return page_404_error(request, "That revision isn't available yet.") # Is the URL of the form: "..../NN/MM/edit"; if so, then edit the item path_split = request.path.split('/') if len(path_split)>4 and path_split[4] in ['download', 'show']: if path_split[4] == 'show' and len(path_split)>=6: return view_function(request, the_submission, the_revision, filename=path_split[5:]) else: return view_function(request, the_submission, the_revision) # Is the URL not the canonical URL for the item? .... redirect the user else: if rev_id is None: rev_id_str = '0' do_redirect = True else: rev_id_str = str(the_revision.rev_id+1) do_redirect = False if slug is None or the_revision.slug != slug or do_redirect: return redirect('/'.join(['/item', item_id, rev_id_str, the_revision.slug]), permanent=True) return view_function(request, the_submission, the_revision) return decorator @login_required def validate_submission(request, code): """ Validate a submission (via an emailed link to the user). From BSD licensed code, James Bennett https://bitbucket.org/ubernostrum/django-registration/src/58eef8330b0f/registration/models.py """ SHA1_RE = re.compile('^[a-f0-9]{40}$') if SHA1_RE.search(code): try: rev = models.Revision.objects.get(validation_hash=code) except ObjectDoesNotExist: return page_404_error(request, ('That validation code was invalid' ' or used already.')) rev.is_displayed = True rev.validation_hash = None rev.save() return redirect(rev.get_absolute_url()) else: return page_404_error(request, ('That validation code is invalid.')) @get_items_or_404 def view_item(request, submission, revision): """ Shows a submitted item to web users. The ``slug`` is always ignored, but appears in the URLs for the sake of search engines. The revision, if specified >= 0 will show the particular revision of the item, rather than the latest revision (default). """ create_hit(request, submission) permalink = settings.SPC['short_URL_root'] + str(submission.id) + '/' + \ str(revision.rev_id+1) + '/' latest_link = settings.SPC['short_URL_root'] + str(submission.id) + '/' pageviews = get_pagehits('submission', start_date=datetime.datetime.now()\ -datetime.timedelta(days=settings.SPC['hit_horizon']), item_pk=submission.id) package_files = [] if submission.sub_type == 'package': # Update the repo to the version required submission.fileset.checkout_revision(revision.hash_id) package_files = submission.fileset.list_iterator() return render_to_response('submission/item.html', {}, context_instance=RequestContext(request, {'item': revision, 'tag_list': revision.tags.all(), 'permalink': permalink, 'latest_link': latest_link, 'pageviews': pageviews, 'pageview_days': settings.SPC['hit_horizon'], 'package_files': package_files, })) @get_items_or_404 def download_submission(request, submission, revision): create_hit(request, submission, extra_info="download") if submission.sub_type == 'snippet': response = HttpResponse(mimetype="application/x-python") fname = submission.slug.replace('-', '_') + '.py' response["Content-Disposition"] = "attachment; filename=%s" % fname source = Site.objects.get_current().domain + \ submission.get_absolute_url() response.write('# Source: ' + source + '\n\n' + revision.item_code) return response if submission.sub_type == 'package': # Checkout the repo to the revision version checkout = submission.fileset.checkout_revision(revision.hash_id) if not checkout: logger.error('Could not checkout revision %s for rev.id %d' % (revision.hash_id, revision.pk)) return page_404_error('Could not create ZIP file. This error has ' 'been reported') logger.info('Checkout revision %s for rev.id %d' % (revision.hash_id, revision.pk)) package_data = StringIO() zipf = zipfile.ZipFile(package_data, 'w', zipfile.ZIP_DEFLATED) full_repo_path = os.path.abspath(os.path.join(settings.SPC['storage_dir'], submission.fileset.repo_path)) for path, dirs, files in os.walk(full_repo_path): # ignore revision backend dir if os.path.split(path)[1] == '.' + settings.SPC['revisioning_backend']: for entry in dirs[:]: dirs.remove(entry) continue for name in files: full_name = os.path.abspath(os.path.join(path, name)) zipf.write(full_name, full_name.partition(full_repo_path)[2]) for name in zipf.filelist: name.create_system = 0 zipf.close() package_data.seek(0) response = HttpResponse(mimetype='attachment; application/zip') response['Content-Disposition'] = 'filename=%s-%d-%d.zip' \ % (submission.slug, submission.pk, revision.rev_id_human) response.write(package_data.read()) package_data.close() # checkout the repo to latest revision last_revision = submission.last_revision checkout = submission.fileset.checkout_revision(last_revision.hash_id) if not checkout: logger.error('Could not checkout out revision %s for rev.id %d' % (last_revision.hash_id, last_revision.pk)) return response def sort_items_by_page_views(all_items, item_module_name): # TODO(KGD): Cache this reordering of ``items`` for a period of time today = datetime.datetime.now() start_date = today - datetime.timedelta(days=settings.SPC['hit_horizon']) page_order = get_pagehits(item_module_name, start_date=start_date, end_date=today) page_order.sort(reverse=True) #``page_order`` is a list of tuples; the 2nd entry in each tuple is the # primary key, that must exist in ``items_pk``. all_items = list(all_items) items_pk = [item.pk for item in all_items] entry_order = [] count_list = [] for count, pk in page_order: try: idx = items_pk.index(pk) except ValueError: pass else: items_pk[idx] = None entry_order.append(all_items[idx]) count_list.append(count) # Items that have never been viewed get added to the bottom: for idx, pk in enumerate(items_pk): if pk is not None: entry_order.append(all_items[idx]) count_list.append(0) return entry_order, count_list def show_items(request, what_view='', extra_info=''): """ Show different views onto all **revision** items (not submissions) """ what_view = what_view.lower() extra_info = extra_info.lower() entry_order = [] page_title = '' template_name = 'submission/show-entries.html' if what_view == 'tag': all_revs = models.Revision.objects.most_recent().\ filter(tags__slug=slugify(extra_info)) page_title = 'All entries tagged' entry_order = list(all_revs) elif what_view == 'show' and extra_info == 'all-tags': page_title = 'All tags' template_name = 'submission/show-tag-cloud.html' elif what_view == 'show' and extra_info == 'all-revisions': # Show all submissions in reverse time order all_revs = models.Revision.objects.all().order_by('-date_created') page_title = 'All revisions' extra_info = '' entry_order = list(all_revs) elif what_view == 'show' and extra_info == 'all-unique-revisions': all_subs = models.Submission.objects.all().order_by('-date_created') page_title = 'All submissions' extra_info = ' (only showing the latest revision)' entry_order = [sub.last_revision for sub in all_subs if sub.last_revision.is_displayed] elif what_view == 'sort' and extra_info == 'most-viewed': page_title = 'All submissions in order of most views' extra_info = '' all_subs = set() for rev in models.Revision.objects.all(): all_subs.add(rev.entry) entry_order, _ = sort_items_by_page_views(all_subs, 'submission') entry_order = [entry.last_revision for entry in entry_order] elif what_view == 'show' and extra_info == 'top-contributors': page_title = 'Top contributors' extra_info = '' entry_order = top_authors('', 0) elif what_view == 'validate': return validate_submission(request, code=extra_info) entries = paginated_queryset(request, entry_order) return render_to_response(template_name, {}, context_instance=RequestContext(request, {'entries': entries, 'page_title': page_title, 'extra_info': extra_info, 'what_view' : what_view,}))
bsd-3-clause
Stavitsky/neutron
neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py
48
11910
# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from testtools import matchers from neutron.common import exceptions as exc import neutron.db.api as db from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import config from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_vlan from neutron.tests.unit import testlib_api PROVIDER_NET = 'phys_net1' TENANT_NET = 'phys_net2' VLAN_MIN = 200 VLAN_MAX = 209 NETWORK_VLAN_RANGES = [PROVIDER_NET, "%s:%s:%s" % (TENANT_NET, VLAN_MIN, VLAN_MAX)] UPDATED_VLAN_RANGES = { PROVIDER_NET: [], TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], } class VlanTypeTest(testlib_api.SqlTestCase): def setUp(self): super(VlanTypeTest, self).setUp() config.cfg.CONF.set_override('network_vlan_ranges', NETWORK_VLAN_RANGES, group='ml2_type_vlan') self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( NETWORK_VLAN_RANGES) self.driver = type_vlan.VlanTypeDriver() self.driver._sync_vlan_allocations() self.session = db.get_session() self.driver.physnet_mtus = [] def test_parse_network_exception_handling(self): with mock.patch.object(plugin_utils, 'parse_network_vlan_ranges') as parse_ranges: parse_ranges.side_effect = Exception('any exception') self.assertRaises(SystemExit, self.driver._parse_network_vlan_ranges) def _get_allocation(self, session, segment): return session.query(type_vlan.VlanAllocation).filter_by( physical_network=segment[api.PHYSICAL_NETWORK], vlan_id=segment[api.SEGMENTATION_ID]).first() def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertIsNone(self.driver.validate_provider_segment(segment)) def test_validate_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: 'other_phys_net', api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 5000} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_input(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1, 'invalid': 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_sync_vlan_allocations(self): def check_in_ranges(network_vlan_ranges): vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0] segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} segment[api.SEGMENTATION_ID] = vlan_min - 1 self.assertIsNone( self._get_allocation(self.session, segment)) segment[api.SEGMENTATION_ID] = vlan_max + 1 self.assertIsNone( self._get_allocation(self.session, segment)) segment[api.SEGMENTATION_ID] = vlan_min self.assertFalse( self._get_allocation(self.session, segment).allocated) segment[api.SEGMENTATION_ID] = vlan_max self.assertFalse( self._get_allocation(self.session, segment).allocated) check_in_ranges(self.network_vlan_ranges) self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES self.driver._sync_vlan_allocations() check_in_ranges(UPDATED_VLAN_RANGES) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} alloc = self._get_allocation(self.session, segment) self.assertIsNone(alloc) observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_already_allocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.session, segment) self.assertRaises(exc.VlanIdInUse, self.driver.reserve_provider_segment, self.session, observed) def test_reserve_provider_segment_in_tenant_pools(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET, api.SEGMENTATION_ID: VLAN_MIN} alloc = self._get_allocation(self.session, segment) self.assertFalse(alloc.allocated) observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) def test_reserve_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} observed = self.driver.reserve_provider_segment(self.session, segment) alloc = self._get_allocation(self.session, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, observed[api.PHYSICAL_NETWORK]) def test_reserve_provider_segment_all_allocateds(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.session) segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertRaises(exc.NoNetworkAvailable, self.driver.reserve_provider_segment, self.session, segment) def test_get_mtu(self): config.cfg.CONF.set_override('segment_mtu', 1475, group='ml2') config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('segment_mtu', 1375, group='ml2') config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1375, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('segment_mtu', 0, group='ml2') config.cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) config.cfg.CONF.set_override('segment_mtu', 0, group='ml2') config.cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_allocate_tenant_segment(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.session) alloc = self._get_allocation(self.session, segment) self.assertTrue(alloc.allocated) vlan_id = segment[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK]) def test_allocate_tenant_segment_no_available(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.session) segment = self.driver.allocate_tenant_segment(self.session) self.assertIsNone(segment) def test_release_segment(self): segment = self.driver.allocate_tenant_segment(self.session) self.driver.release_segment(self.session, segment) alloc = self._get_allocation(self.session, segment) self.assertFalse(alloc.allocated) def test_release_segment_unallocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} with mock.patch.object(type_vlan.LOG, 'warning') as log_warn: self.driver.release_segment(self.session, segment) log_warn.assert_called_once_with( "No vlan_id %(vlan_id)s found on physical network " "%(physical_network)s", {'vlan_id': 101, 'physical_network': PROVIDER_NET})
apache-2.0
BurgyDev/mongoose
bindings/python/example.py
106
2433
# This is Python example on how to use Mongoose embeddable web server, # http://code.google.com/p/mongoose # # Before using the mongoose module, make sure that Mongoose shared library is # built and present in the current (or system library) directory import mongoose import sys # Handle /show and /form URIs. def EventHandler(event, conn): info = conn.info if event == mongoose.HTTP_ERROR: conn.printf('%s', 'HTTP/1.0 200 OK\r\n') conn.printf('%s', 'Content-Type: text/plain\r\n\r\n') conn.printf('HTTP error: %d\n', info.status_code) return True elif event == mongoose.NEW_REQUEST and info.uri == '/show': conn.printf('%s', 'HTTP/1.0 200 OK\r\n') conn.printf('%s', 'Content-Type: text/plain\r\n\r\n') conn.printf('%s %s\n', info.request_method, info.uri) if info.request_method == 'POST': content_len = conn.get_header('Content-Length') post_data = conn.read(int(content_len)) my_var = conn.get_var(post_data, 'my_var') else: my_var = conn.get_var(info.query_string, 'my_var') conn.printf('my_var: %s\n', my_var or '<not set>') conn.printf('HEADERS: \n') for header in info.http_headers[:info.num_headers]: conn.printf(' %s: %s\n', header.name, header.value) return True elif event == mongoose.NEW_REQUEST and info.uri == '/form': conn.write('HTTP/1.0 200 OK\r\n' 'Content-Type: text/html\r\n\r\n' 'Use GET: <a href="/show?my_var=hello">link</a>' '<form action="/show" method="POST">' 'Use POST: type text and submit: ' '<input type="text" name="my_var"/>' '<input type="submit"/>' '</form>') return True elif event == mongoose.NEW_REQUEST and info.uri == '/secret': conn.send_file('/etc/passwd') return True else: return False # Create mongoose object, and register '/foo' URI handler # List of options may be specified in the contructor server = mongoose.Mongoose(EventHandler, document_root='/tmp', listening_ports='8080') print ('Mongoose started on port %s, press enter to quit' % server.get_option('listening_ports')) sys.stdin.read(1) # Deleting server object stops all serving threads print 'Stopping server.' del server
mit
cython-testbed/pandas
pandas/tests/groupby/test_function.py
3
38905
import pytest import numpy as np import pandas as pd from pandas import (DataFrame, Index, compat, isna, Series, MultiIndex, Timestamp, date_range) from pandas.errors import UnsupportedFunctionCall from pandas.util import testing as tm import pandas.core.nanops as nanops from string import ascii_lowercase from pandas.compat import product as cart_product @pytest.mark.parametrize("agg_func", ['any', 'all']) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("vals", [ ['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''], [1, 2, 3], [1, 0, 0], [0, 0, 0], [1., 2., 3.], [1., 0., 0.], [0., 0., 0.], [True, True, True], [True, False, False], [False, False, False], [np.nan, np.nan, np.nan] ]) def test_groupby_bool_aggs(agg_func, skipna, vals): df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2}) # Figure out expectation using Python builtin exp = getattr(compat.builtins, agg_func)(vals) # edge case for missing data with skipna and 'any' if skipna and all(isna(vals)) and agg_func == 'any': exp = False exp_df = DataFrame([exp] * 2, columns=['val'], index=Index( ['a', 'b'], name='key')) result = getattr(df.groupby('key'), agg_func)(skipna=skipna) tm.assert_frame_equal(result, exp_df) def test_max_min_non_numeric(): # #2700 aa = DataFrame({'nn': [11, 11, 22, 22], 'ii': [1, 2, 3, 4], 'ss': 4 * ['mama']}) result = aa.groupby('nn').max() assert 'ss' in result result = aa.groupby('nn').max(numeric_only=False) assert 'ss' in result result = aa.groupby('nn').min() assert 'ss' in result result = aa.groupby('nn').min(numeric_only=False) assert 'ss' in result def test_intercept_builtin_sum(): s = Series([1., 2., np.nan, 3.]) grouped = s.groupby([0, 1, 2, 2]) result = grouped.agg(compat.builtins.sum) result2 = grouped.apply(compat.builtins.sum) expected = grouped.sum() tm.assert_series_equal(result, expected) tm.assert_series_equal(result2, expected) # @pytest.mark.parametrize("f", [max, min, sum]) # def test_builtins_apply(f): @pytest.mark.parametrize("f", [max, min, sum]) @pytest.mark.parametrize('keys', [ "jim", # Single key ["jim", "joe"] # Multi-key ]) def test_builtins_apply(keys, f): # see gh-8155 df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"]) df["jolie"] = np.random.randn(1000) fname = f.__name__ result = df.groupby(keys).apply(f) ngroups = len(df.drop_duplicates(subset=keys)) assert_msg = ("invalid frame shape: {} " "(expected ({}, 3))".format(result.shape, ngroups)) assert result.shape == (ngroups, 3), assert_msg tm.assert_frame_equal(result, # numpy's equivalent function df.groupby(keys).apply(getattr(np, fname))) if f != sum: expected = df.groupby(keys).agg(fname).reset_index() expected.set_index(keys, inplace=True, drop=False) tm.assert_frame_equal(result, expected, check_dtype=False) tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)()) def test_arg_passthru(): # make sure that we are passing thru kwargs # to our agg functions # GH3668 # GH5724 df = pd.DataFrame( {'group': [1, 1, 2], 'int': [1, 2, 3], 'float': [4., 5., 6.], 'string': list('abc'), 'category_string': pd.Series(list('abc')).astype('category'), 'category_int': [7, 8, 9], 'datetime': pd.date_range('20130101', periods=3), 'datetimetz': pd.date_range('20130101', periods=3, tz='US/Eastern'), 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')}, columns=['group', 'int', 'float', 'string', 'category_string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) expected_columns_numeric = Index(['int', 'float', 'category_int']) # mean / median expected = pd.DataFrame( {'category_int': [7.5, 9], 'float': [4.5, 6.], 'timedelta': [pd.Timedelta('1.5s'), pd.Timedelta('3s')], 'int': [1.5, 3], 'datetime': [pd.Timestamp('2013-01-01 12:00:00'), pd.Timestamp('2013-01-03 00:00:00')], 'datetimetz': [ pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'), pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]}, index=Index([1, 2], name='group'), columns=['int', 'float', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['mean', 'median']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_frame_equal(result.reindex_like(expected), expected) # TODO: min, max *should* handle # categorical (ordered) dtype expected_columns = Index(['int', 'float', 'string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['min', 'max']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'string', 'category_string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['first', 'last']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'string', 'category_int', 'timedelta']) for attr in ['sum']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'category_int']) for attr in ['prod', 'cumprod']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) # like min, max, but don't include strings expected_columns = Index(['int', 'float', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['cummin', 'cummax']: f = getattr(df.groupby('group'), attr) result = f() # GH 15561: numeric_only=False set by default like min/max tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'category_int', 'timedelta']) for attr in ['cumsum']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) def test_non_cython_api(): # GH5610 # non-cython calls should not include the grouper df = DataFrame( [[1, 2, 'foo'], [1, np.nan, 'bar'], [3, np.nan, 'baz']], columns=['A', 'B', 'C']) g = df.groupby('A') gni = df.groupby('A', as_index=False) # mad expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3]) expected.index.name = 'A' result = g.mad() tm.assert_frame_equal(result, expected) expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'], index=[0, 1]) result = gni.mad() tm.assert_frame_equal(result, expected) # describe expected_index = pd.Index([1, 3], name='A') expected_col = pd.MultiIndex(levels=[['B'], ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']], labels=[[0] * 8, list(range(8))]) expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0], [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]], index=expected_index, columns=expected_col) result = g.describe() tm.assert_frame_equal(result, expected) expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T, df[df.A == 3].describe().unstack().to_frame().T]) expected.index = pd.Index([0, 1]) result = gni.describe() tm.assert_frame_equal(result, expected) # any expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'], index=[1, 3]) expected.index.name = 'A' result = g.any() tm.assert_frame_equal(result, expected) # idxmax expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3]) expected.index.name = 'A' result = g.idxmax() tm.assert_frame_equal(result, expected) def test_cython_api2(): # this takes the fast apply path # cumsum (GH5614) df = DataFrame( [[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9] ], columns=['A', 'B', 'C']) expected = DataFrame( [[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C']) result = df.groupby('A').cumsum() tm.assert_frame_equal(result, expected) # GH 5755 - cumsum is a transformer and should ignore as_index result = df.groupby('A', as_index=False).cumsum() tm.assert_frame_equal(result, expected) # GH 13994 result = df.groupby('A').cumsum(axis=1) expected = df.cumsum(axis=1) tm.assert_frame_equal(result, expected) result = df.groupby('A').cumprod(axis=1) expected = df.cumprod(axis=1) tm.assert_frame_equal(result, expected) def test_cython_median(): df = DataFrame(np.random.randn(1000)) df.values[::2] = np.nan labels = np.random.randint(0, 50, size=1000).astype(float) labels[::17] = np.nan result = df.groupby(labels).median() exp = df.groupby(labels).agg(nanops.nanmedian) tm.assert_frame_equal(result, exp) df = DataFrame(np.random.randn(1000, 5)) rs = df.groupby(labels).agg(np.median) xp = df.groupby(labels).median() tm.assert_frame_equal(rs, xp) def test_median_empty_bins(observed): df = pd.DataFrame(np.random.randint(0, 44, 500)) grps = range(0, 55, 5) bins = pd.cut(df[0], grps) result = df.groupby(bins, observed=observed).median() expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype", [ 'int8', 'int16', 'int32', 'int64', 'float32', 'float64']) @pytest.mark.parametrize("method,data", [ ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], 'args': [1]}), ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], 'out_type': 'int64'}) ]) def test_groupby_non_arithmetic_agg_types(dtype, method, data): # GH9311, GH6620 df = pd.DataFrame( [{'a': 1, 'b': 1}, {'a': 1, 'b': 2}, {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]) df['b'] = df.b.astype(dtype) if 'args' not in data: data['args'] = [] if 'out_type' in data: out_type = data['out_type'] else: out_type = dtype exp = data['df'] df_out = pd.DataFrame(exp) df_out['b'] = df_out.b.astype(out_type) df_out.set_index('a', inplace=True) grpd = df.groupby('a') t = getattr(grpd, method)(*data['args']) tm.assert_frame_equal(t, df_out) @pytest.mark.parametrize("i", [ (Timestamp("2011-01-15 12:50:28.502376"), Timestamp("2011-01-20 12:50:28.593448")), (24650000000000001, 24650000000000002) ]) def test_groupby_non_arithmetic_agg_int_like_precision(i): # see gh-6620, gh-9311 df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) grp_exp = {"first": {"expected": i[0]}, "last": {"expected": i[1]}, "min": {"expected": i[0]}, "max": {"expected": i[1]}, "nth": {"expected": i[1], "args": [1]}, "count": {"expected": 2}} for method, data in compat.iteritems(grp_exp): if "args" not in data: data["args"] = [] grouped = df.groupby("a") res = getattr(grouped, method)(*data["args"]) assert res.iloc[0].b == data["expected"] def test_fill_consistency(): # GH9221 # pass thru keyword arguments to the generated wrapper # are set if the passed kw is None (only) df = DataFrame(index=pd.MultiIndex.from_product( [['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]), columns=Index( ['1', '2'], name='id')) df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan, np.nan, 22, np.nan] df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan, np.nan, 44, np.nan] expected = df.groupby(level=0, axis=0).fillna(method='ffill') result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T tm.assert_frame_equal(result, expected) def test_groupby_cumprod(): # GH 4095 df = pd.DataFrame({'key': ['b'] * 10, 'value': 2}) actual = df.groupby('key')['value'].cumprod() expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) expected.name = 'value' tm.assert_series_equal(actual, expected) df = pd.DataFrame({'key': ['b'] * 100, 'value': 2}) actual = df.groupby('key')['value'].cumprod() # if overflows, groupby product casts to float # while numpy passes back invalid values df['value'] = df['value'].astype(float) expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) expected.name = 'value' tm.assert_series_equal(actual, expected) def test_ops_general(): ops = [('mean', np.mean), ('median', np.median), ('std', np.std), ('var', np.var), ('sum', np.sum), ('prod', np.prod), ('min', np.min), ('max', np.max), ('first', lambda x: x.iloc[0]), ('last', lambda x: x.iloc[-1]), ('count', np.size), ] try: from scipy.stats import sem except ImportError: pass else: ops.append(('sem', sem)) df = DataFrame(np.random.randn(1000)) labels = np.random.randint(0, 50, size=1000).astype(float) for op, targop in ops: result = getattr(df.groupby(labels), op)().astype(float) expected = df.groupby(labels).agg(targop) try: tm.assert_frame_equal(result, expected) except BaseException as exc: exc.args += ('operation: %s' % op, ) raise def test_max_nan_bug(): raw = """,Date,app,File -04-23,2013-04-23 00:00:00,,log080001.log -05-06,2013-05-06 00:00:00,,log.log -05-07,2013-05-07 00:00:00,OE,xlsx""" df = pd.read_csv(compat.StringIO(raw), parse_dates=[0]) gb = df.groupby('Date') r = gb[['File']].max() e = gb['File'].max().to_frame() tm.assert_frame_equal(r, e) assert not r['File'].isna().any() def test_nlargest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list('a' * 5 + 'b' * 5)) gb = a.groupby(b) r = gb.nlargest(3) e = Series([ 7, 5, 3, 10, 9, 6 ], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]])) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series([ 3, 2, 1, 3, 3, 2 ], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]])) tm.assert_series_equal(gb.nlargest(3, keep='last'), e) def test_nsmallest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list('a' * 5 + 'b' * 5)) gb = a.groupby(b) r = gb.nsmallest(3) e = Series([ 1, 2, 3, 0, 4, 6 ], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]])) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series([ 0, 1, 1, 0, 1, 2 ], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]])) tm.assert_series_equal(gb.nsmallest(3, keep='last'), e) def test_numpy_compat(): # see gh-12811 df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]}) g = df.groupby('A') msg = "numpy operations are not valid with groupby" for func in ('mean', 'var', 'std', 'cumprod', 'cumsum'): tm.assert_raises_regex(UnsupportedFunctionCall, msg, getattr(g, func), 1, 2, 3) tm.assert_raises_regex(UnsupportedFunctionCall, msg, getattr(g, func), foo=1) def test_cummin_cummax(): # GH 15048 num_types = [np.int32, np.int64, np.float32, np.float64] num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min, np.finfo(np.float32).min, np.finfo(np.float64).min] num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max, np.finfo(np.float32).max, np.finfo(np.float64).max] base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2], 'B': [3, 4, 3, 2, 2, 3, 2, 1]}) expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] for dtype, min_val, max_val in zip(num_types, num_mins, num_max): df = base_df.astype(dtype) # cummin expected = pd.DataFrame({'B': expected_mins}).astype(dtype) result = df.groupby('A').cummin() tm.assert_frame_equal(result, expected) result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # Test cummin w/ min value for dtype df.loc[[2, 6], 'B'] = min_val expected.loc[[2, 3, 6, 7], 'B'] = min_val result = df.groupby('A').cummin() tm.assert_frame_equal(result, expected) expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # cummax expected = pd.DataFrame({'B': expected_maxs}).astype(dtype) result = df.groupby('A').cummax() tm.assert_frame_equal(result, expected) result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # Test cummax w/ max value for dtype df.loc[[2, 6], 'B'] = max_val expected.loc[[2, 3, 6, 7], 'B'] = max_val result = df.groupby('A').cummax() tm.assert_frame_equal(result, expected) expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # Test nan in some values base_df.loc[[0, 2, 4, 6], 'B'] = np.nan expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) result = base_df.groupby('A').cummin() tm.assert_frame_equal(result, expected) expected = (base_df.groupby('A') .B .apply(lambda x: x.cummin()) .to_frame()) tm.assert_frame_equal(result, expected) expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) result = base_df.groupby('A').cummax() tm.assert_frame_equal(result, expected) expected = (base_df.groupby('A') .B .apply(lambda x: x.cummax()) .to_frame()) tm.assert_frame_equal(result, expected) # Test nan in entire column base_df['B'] = np.nan expected = pd.DataFrame({'B': [np.nan] * 8}) result = base_df.groupby('A').cummin() tm.assert_frame_equal(expected, result) result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(expected, result) result = base_df.groupby('A').cummax() tm.assert_frame_equal(expected, result) result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(expected, result) # GH 15561 df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001']))) expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b') for method in ['cummax', 'cummin']: result = getattr(df.groupby('a')['b'], method)() tm.assert_series_equal(expected, result) # GH 15635 df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1])) result = df.groupby('a').b.cummax() expected = pd.Series([2, 1, 2], name='b') tm.assert_series_equal(result, expected) df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2])) result = df.groupby('a').b.cummin() expected = pd.Series([1, 2, 1], name='b') tm.assert_series_equal(result, expected) @pytest.mark.parametrize('in_vals, out_vals', [ # Basics: strictly increasing (T), strictly decreasing (F), # abs val increasing (F), non-strictly increasing (T) ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]), # Test with inf vals ([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf], [True, False, True, False]), # Test with nan vals; should always be False ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], [False, False, False, False]), ]) def test_is_monotonic_increasing(in_vals, out_vals): # GH 17015 source_dict = { 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'], 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'], 'C': in_vals} df = pd.DataFrame(source_dict) result = df.groupby('B').C.is_monotonic_increasing index = Index(list('abcd'), name='B') expected = pd.Series(index=index, data=out_vals, name='C') tm.assert_series_equal(result, expected) # Also check result equal to manually taking x.is_monotonic_increasing. expected = ( df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing)) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('in_vals, out_vals', [ # Basics: strictly decreasing (T), strictly increasing (F), # abs val decreasing (F), non-strictly increasing (T) ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]), # Test with inf vals ([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf], [True, True, False, True]), # Test with nan vals; should always be False ([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], [False, False, False, False]), ]) def test_is_monotonic_decreasing(in_vals, out_vals): # GH 17015 source_dict = { 'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'], 'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'], 'C': in_vals} df = pd.DataFrame(source_dict) result = df.groupby('B').C.is_monotonic_decreasing index = Index(list('abcd'), name='B') expected = pd.Series(index=index, data=out_vals, name='C') tm.assert_series_equal(result, expected) # describe # -------------------------------- def test_apply_describe_bug(mframe): grouped = mframe.groupby(level='first') grouped.describe() # it works! def test_series_describe_multikey(): ts = tm.makeTimeSeries() grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.describe() tm.assert_series_equal(result['mean'], grouped.mean(), check_names=False) tm.assert_series_equal(result['std'], grouped.std(), check_names=False) tm.assert_series_equal(result['min'], grouped.min(), check_names=False) def test_series_describe_single(): ts = tm.makeTimeSeries() grouped = ts.groupby(lambda x: x.month) result = grouped.apply(lambda x: x.describe()) expected = grouped.describe().stack() tm.assert_series_equal(result, expected) def test_series_index_name(df): grouped = df.loc[:, ['C']].groupby(df['A']) result = grouped.agg(lambda x: x.mean()) assert result.index.name == 'A' def test_frame_describe_multikey(tsframe): grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.describe() desc_groups = [] for col in tsframe: group = grouped[col].describe() # GH 17464 - Remove duplicate MultiIndex levels group_col = pd.MultiIndex( levels=[[col], group.columns], labels=[[0] * len(group.columns), range(len(group.columns))]) group = pd.DataFrame(group.values, columns=group_col, index=group.index) desc_groups.append(group) expected = pd.concat(desc_groups, axis=1) tm.assert_frame_equal(result, expected) groupedT = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1}, axis=1) result = groupedT.describe() expected = tsframe.describe().T expected.index = pd.MultiIndex( levels=[[0, 1], expected.index], labels=[[0, 0, 1, 1], range(len(expected.index))]) tm.assert_frame_equal(result, expected) def test_frame_describe_tupleindex(): # GH 14848 - regression from 0.19.0 to 0.19.1 df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3, 'y': [10, 20, 30, 40, 50] * 3, 'z': [100, 200, 300, 400, 500] * 3}) df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 df2 = df1.rename(columns={'k': 'key'}) pytest.raises(ValueError, lambda: df1.groupby('k').describe()) pytest.raises(ValueError, lambda: df2.groupby('key').describe()) def test_frame_describe_unstacked_format(): # GH 4792 prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990, pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499, pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499} volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000, pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000, pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000} df = pd.DataFrame({'PRICE': prices, 'VOLUME': volumes}) result = df.groupby('PRICE').VOLUME.describe() data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(), df[df.PRICE == 25499].VOLUME.describe().values.tolist()] expected = pd.DataFrame(data, index=pd.Index([24990, 25499], name='PRICE'), columns=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']) tm.assert_frame_equal(result, expected) # nunique # -------------------------------- @pytest.mark.parametrize('n', 10 ** np.arange(2, 6)) @pytest.mark.parametrize('m', [10, 100, 1000]) @pytest.mark.parametrize('sort', [False, True]) @pytest.mark.parametrize('dropna', [False, True]) def test_series_groupby_nunique(n, m, sort, dropna): def check_nunique(df, keys, as_index=True): gr = df.groupby(keys, as_index=as_index, sort=sort) left = gr['julie'].nunique(dropna=dropna) gr = df.groupby(keys, as_index=as_index, sort=sort) right = gr['julie'].apply(Series.nunique, dropna=dropna) if not as_index: right = right.reset_index(drop=True) tm.assert_series_equal(left, right, check_names=False) days = date_range('2015-08-23', periods=10) frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n), 'joe': np.random.choice(days, n), 'julie': np.random.randint(0, m, n)}) check_nunique(frame, ['jim']) check_nunique(frame, ['jim', 'joe']) frame.loc[1::17, 'jim'] = None frame.loc[3::37, 'joe'] = None frame.loc[7::19, 'julie'] = None frame.loc[8::19, 'julie'] = None frame.loc[9::19, 'julie'] = None check_nunique(frame, ['jim']) check_nunique(frame, ['jim', 'joe']) check_nunique(frame, ['jim'], as_index=False) check_nunique(frame, ['jim', 'joe'], as_index=False) def test_nunique(): df = DataFrame({ 'A': list('abbacc'), 'B': list('abxacc'), 'C': list('abbacx'), }) expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]}) result = df.groupby('A', as_index=False).nunique() tm.assert_frame_equal(result, expected) # as_index expected.index = list('abc') expected.index.name = 'A' result = df.groupby('A').nunique() tm.assert_frame_equal(result, expected) # with na result = df.replace({'x': None}).groupby('A').nunique(dropna=False) tm.assert_frame_equal(result, expected) # dropna expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3}, index=list('abc')) expected.index.name = 'A' result = df.replace({'x': None}).groupby('A').nunique() tm.assert_frame_equal(result, expected) def test_nunique_with_object(): # GH 11077 data = pd.DataFrame( [[100, 1, 'Alice'], [200, 2, 'Bob'], [300, 3, 'Charlie'], [-400, 4, 'Dan'], [500, 5, 'Edith']], columns=['amount', 'id', 'name'] ) result = data.groupby(['id', 'amount'])['name'].nunique() index = MultiIndex.from_arrays([data.id, data.amount]) expected = pd.Series([1] * 5, name='name', index=index) tm.assert_series_equal(result, expected) def test_nunique_with_empty_series(): # GH 12553 data = pd.Series(name='name') result = data.groupby(level=0).nunique() expected = pd.Series(name='name', dtype='int64') tm.assert_series_equal(result, expected) def test_nunique_with_timegrouper(): # GH 13453 test = pd.DataFrame({ 'time': [Timestamp('2016-06-28 09:35:35'), Timestamp('2016-06-28 16:09:30'), Timestamp('2016-06-28 16:46:28')], 'data': ['1', '2', '3']}).set_index('time') result = test.groupby(pd.Grouper(freq='h'))['data'].nunique() expected = test.groupby( pd.Grouper(freq='h') )['data'].apply(pd.Series.nunique) tm.assert_series_equal(result, expected) # count # -------------------------------- def test_groupby_timedelta_cython_count(): df = DataFrame({'g': list('ab' * 2), 'delt': np.arange(4).astype('timedelta64[ns]')}) expected = Series([ 2, 2 ], index=pd.Index(['a', 'b'], name='g'), name='delt') result = df.groupby('g').delt.count() tm.assert_series_equal(expected, result) def test_count(): n = 1 << 15 dr = date_range('2015-08-30', periods=n // 10, freq='T') df = DataFrame({ '1st': np.random.choice( list(ascii_lowercase), n), '2nd': np.random.randint(0, 5, n), '3rd': np.random.randn(n).round(3), '4th': np.random.randint(-10, 10, n), '5th': np.random.choice(dr, n), '6th': np.random.randn(n).round(3), '7th': np.random.randn(n).round(3), '8th': np.random.choice(dr, n) - np.random.choice(dr, 1), '9th': np.random.choice( list(ascii_lowercase), n) }) for col in df.columns.drop(['1st', '2nd', '4th']): df.loc[np.random.choice(n, n // 10), col] = np.nan df['9th'] = df['9th'].astype('category') for key in '1st', '2nd', ['1st', '2nd']: left = df.groupby(key).count() right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1) tm.assert_frame_equal(left, right) # GH5610 # count counts non-nulls df = pd.DataFrame([[1, 2, 'foo'], [1, np.nan, 'bar'], [3, np.nan, np.nan]], columns=['A', 'B', 'C']) count_as = df.groupby('A').count() count_not_as = df.groupby('A', as_index=False).count() expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'], index=[1, 3]) expected.index.name = 'A' tm.assert_frame_equal(count_not_as, expected.reset_index()) tm.assert_frame_equal(count_as, expected) count_B = df.groupby('A')['B'].count() tm.assert_series_equal(count_B, expected['B']) def test_count_object(): df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3}) result = df.groupby('c').a.count() expected = pd.Series([ 3, 3 ], index=pd.Index([2, 3], name='c'), name='a') tm.assert_series_equal(result, expected) df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3, 'c': [2] * 3 + [3] * 3}) result = df.groupby('c').a.count() expected = pd.Series([ 1, 3 ], index=pd.Index([2, 3], name='c'), name='a') tm.assert_series_equal(result, expected) def test_count_cross_type(): # GH8169 vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint( 0, 2, (100, 2)))) df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd']) df[df == 2] = np.nan expected = df.groupby(['c', 'd']).count() for t in ['float32', 'object']: df['a'] = df['a'].astype(t) df['b'] = df['b'].astype(t) result = df.groupby(['c', 'd']).count() tm.assert_frame_equal(result, expected) def test_lower_int_prec_count(): df = DataFrame({'a': np.array( [0, 1, 2, 100], np.int8), 'b': np.array( [1, 2, 3, 6], np.uint32), 'c': np.array( [4, 5, 6, 8], np.int16), 'grp': list('ab' * 2)}) result = df.groupby('grp').count() expected = DataFrame({'a': [2, 2], 'b': [2, 2], 'c': [2, 2]}, index=pd.Index(list('ab'), name='grp')) tm.assert_frame_equal(result, expected) def test_count_uses_size_on_exception(): class RaisingObjectException(Exception): pass class RaisingObject(object): def __init__(self, msg='I will raise inside Cython'): super(RaisingObject, self).__init__() self.msg = msg def __eq__(self, other): # gets called in Cython to check that raising calls the method raise RaisingObjectException(self.msg) df = DataFrame({'a': [RaisingObject() for _ in range(4)], 'grp': list('ab' * 2)}) result = df.groupby('grp').count() expected = DataFrame({'a': [2, 2]}, index=pd.Index( list('ab'), name='grp')) tm.assert_frame_equal(result, expected) # size # -------------------------------- def test_size(df): grouped = df.groupby(['A', 'B']) result = grouped.size() for key, group in grouped: assert result[key] == len(group) grouped = df.groupby('A') result = grouped.size() for key, group in grouped: assert result[key] == len(group) grouped = df.groupby('B') result = grouped.size() for key, group in grouped: assert result[key] == len(group) df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc')) for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])): left = df.groupby(key, sort=sort).size() right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0]) tm.assert_series_equal(left, right, check_names=False) # GH11699 df = DataFrame([], columns=['A', 'B']) out = Series([], dtype='int64', index=Index([], name='A')) tm.assert_series_equal(df.groupby('A').size(), out) # pipe # -------------------------------- def test_pipe(): # Test the pipe method of DataFrameGroupBy. # Issue #17871 random_state = np.random.RandomState(1234567890) df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': random_state.randn(8), 'C': random_state.randn(8)}) def f(dfgb): return dfgb.B.max() - dfgb.C.min().min() def square(srs): return srs ** 2 # Note that the transformations are # GroupBy -> Series # Series -> Series # This then chains the GroupBy.pipe and the # NDFrame.pipe methods result = df.groupby('A').pipe(f).pipe(square) index = Index([u'bar', u'foo'], dtype='object', name=u'A') expected = pd.Series([8.99110003361, 8.17516964785], name='B', index=index) tm.assert_series_equal(expected, result) def test_pipe_args(): # Test passing args to the pipe method of DataFrameGroupBy. # Issue #17871 df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'], 'x': [1.0, 2.0, 3.0, 2.0, 5.0], 'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]}) def f(dfgb, arg1): return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False) .groupby(dfgb.grouper)) def g(dfgb, arg2): return dfgb.sum() / dfgb.sum().sum() + arg2 def h(df, arg3): return df.x + df.y - arg3 result = (df .groupby('group') .pipe(f, 0) .pipe(g, 10) .pipe(h, 100)) # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) tm.assert_series_equal(expected, result) # test SeriesGroupby.pipe ser = pd.Series([1, 1, 2, 2, 3, 3]) result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count()) expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3])) tm.assert_series_equal(result, expected) def test_groupby_mean_no_overflow(): # Regression test for (#22487) df = pd.DataFrame({ "user": ["A", "A", "A", "A", "A"], "connections": [4970, 4749, 4719, 4704, 18446744073699999744] }) assert df.groupby('user')['connections'].mean()['A'] == 3689348814740003840
bsd-3-clause
libscie/liberator
liberator/lib/python3.6/site-packages/django/apps/config.py
51
8187
import os from importlib import import_module from django.core.exceptions import ImproperlyConfigured from django.utils._os import upath from django.utils.module_loading import module_has_submodule MODELS_MODULE_NAME = 'models' class AppConfig(object): """ Class representing a Django application and its configuration. """ def __init__(self, app_name, app_module): # Full Python path to the application eg. 'django.contrib.admin'. self.name = app_name # Root module for the application eg. <module 'django.contrib.admin' # from 'django/contrib/admin/__init__.pyc'>. self.module = app_module # Reference to the Apps registry that holds this AppConfig. Set by the # registry when it registers the AppConfig instance. self.apps = None # The following attributes could be defined at the class level in a # subclass, hence the test-and-set pattern. # Last component of the Python path to the application eg. 'admin'. # This value must be unique across a Django project. if not hasattr(self, 'label'): self.label = app_name.rpartition(".")[2] # Human-readable name for the application eg. "Admin". if not hasattr(self, 'verbose_name'): self.verbose_name = self.label.title() # Filesystem path to the application directory eg. # u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on # Python 2 and a str on Python 3. if not hasattr(self, 'path'): self.path = self._path_from_module(app_module) # Module containing models eg. <module 'django.contrib.admin.models' # from 'django/contrib/admin/models.pyc'>. Set by import_models(). # None if the application doesn't have a models module. self.models_module = None # Mapping of lower case model names to model classes. Initially set to # None to prevent accidental access before import_models() runs. self.models = None def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.label) def _path_from_module(self, module): """Attempt to determine app's filesystem path from its module.""" # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert paths to list because Python 3's _NamespacePath does not # support indexing. paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] else: # For unknown reasons, sometimes the list returned by __path__ # contains duplicates that must be removed (#25246). paths = list(set(paths)) if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module,)) return upath(paths[0]) @classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ try: # If import_module succeeds, entry is a path to an app module, # which may specify an app config class with default_app_config. # Otherwise, entry is a path to an app config class or an error. module = import_module(entry) except ImportError: # Track that importing as an app module failed. If importing as an # app config class fails too, we'll trigger the ImportError again. module = None mod_path, _, cls_name = entry.rpartition('.') # Raise the original exception when entry cannot be a path to an # app config class. if not mod_path: raise else: try: # If this works, the app module specifies an app config class. entry = module.default_app_config except AttributeError: # Otherwise, it simply uses the default app config class. return cls(entry, module) else: mod_path, _, cls_name = entry.rpartition('.') # If we're reaching this point, we must attempt to load the app config # class located at <mod_path>.<cls_name> mod = import_module(mod_path) try: cls = getattr(mod, cls_name) except AttributeError: if module is None: # If importing as an app module failed, that error probably # contains the most informative traceback. Trigger it again. import_module(entry) else: raise # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(cls, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. try: app_name = cls.name except AttributeError: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry) # Ensure app_name points to a valid module. try: app_module = import_module(app_name) except ImportError: raise ImproperlyConfigured( "Cannot import '%s'. Check that '%s.%s.name' is correct." % ( app_name, mod_path, cls_name, ) ) # Entry is a path to an app config class. return cls(app_name, app_module) def get_model(self, model_name, require_ready=True): """ Returns the model with the given case-insensitive model_name. Raises LookupError if no model exists with this name. """ if require_ready: self.apps.check_models_ready() else: self.apps.check_apps_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name)) def get_models(self, include_auto_created=False, include_swapped=False): """ Returns an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API. """ self.apps.check_models_ready() for model in self.models.values(): if model._meta.auto_created and not include_auto_created: continue if model._meta.swapped and not include_swapped: continue yield model def import_models(self): # Dictionary of models for this app, primarily maintained in the # 'all_models' attribute of the Apps this AppConfig is attached to. self.models = self.apps.all_models[self.label] if module_has_submodule(self.module, MODELS_MODULE_NAME): models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME) self.models_module = import_module(models_module_name) def ready(self): """ Override this method in subclasses to run code when Django starts. """
cc0-1.0
piohhmy/euler
p010.py
1
1626
import math from timeit import Timer def solve_p10_sieve(): return sum(sieve_primes_up_to(2000000)) def solve_p10_sieve_w_generator(): return sum(prime_sieve_generator(2000000)) def sieve_primes_up_to(num): num_list = list(range(num)) num_list[1] = 0 # cross out 1, special case i = 2 while num_list[i] <= int(math.sqrt(num))+1: cross_out_composite_multiples(num_list[i], num_list) i = next_non_crossed_out_index(i, num_list) return [x for x in num_list if x != 0] def cross_out_composite_multiples(multiple, num_list): for composite in range(multiple*2, len(num_list), multiple): num_list[composite] = 0 def prime_sieve_generator(limit): num_list = [True] * limit num_list[0] = num_list[1] = False square_root_limit = int(math.sqrt(limit))+1 for i, isprime in enumerate(num_list): if isprime: yield i if i > square_root_limit: continue #optimization, composites already marked for composite in range(i*2, len(num_list), i): num_list[composite] = False def next_non_crossed_out_index(i, num_list): increment_index = True while increment_index: i += 1 increment_index = num_list[i] == 0 return i if __name__ == '__main__': sieve_timer = Timer(solve_p10_sieve) sieve_generator_timer = Timer(solve_p10_sieve_w_generator) print("Filter approach: {0}".format(sieve_timer.timeit(1))) print("Generator approach: {0}".format(sieve_generator_timer.timeit(1))) print(solve_p10_sieve()) print(solve_p10_sieve_w_generator())
mit
xujun10110/golismero
thirdparty_libs/django/views/decorators/debug.py
48
2337
import functools def sensitive_variables(*variables): """ Indicates which variables used in the decorated function are sensitive, so that those variables can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified variable names: @sensitive_variables('user', 'password', 'credit_card') def my_function(user): password = user.pass_word credit_card = user.credit_card_number ... * without any specified variable names, in which case it is assumed that all variables are considered sensitive: @sensitive_variables() def my_function() ... """ def decorator(func): @functools.wraps(func) def sensitive_variables_wrapper(*func_args, **func_kwargs): if variables: sensitive_variables_wrapper.sensitive_variables = variables else: sensitive_variables_wrapper.sensitive_variables = '__ALL__' return func(*func_args, **func_kwargs) return sensitive_variables_wrapper return decorator def sensitive_post_parameters(*parameters): """ Indicates which POST parameters used in the decorated view are sensitive, so that those parameters can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Two forms are accepted: * with specified parameters: @sensitive_post_parameters('password', 'credit_card') def my_view(request): pw = request.POST['password'] cc = request.POST['credit_card'] ... * without any specified parameters, in which case it is assumed that all parameters are considered sensitive: @sensitive_post_parameters() def my_view(request) ... """ def decorator(view): @functools.wraps(view) def sensitive_post_parameters_wrapper(request, *args, **kwargs): if parameters: request.sensitive_post_parameters = parameters else: request.sensitive_post_parameters = '__ALL__' return view(request, *args, **kwargs) return sensitive_post_parameters_wrapper return decorator
gpl-2.0
garbled1/ansible
lib/ansible/module_utils/exoscale.py
39
6661
# -*- coding: utf-8 -*- # # (c) 2016, René Moser <mail@renemoser.net> # # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os from ansible.module_utils.six.moves import configparser from ansible.module_utils.six import integer_types, string_types from ansible.module_utils._text import to_native, to_text from ansible.module_utils.urls import fetch_url EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1" def exo_dns_argument_spec(): return dict( api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True), api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True), api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10), api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'), validate_certs=dict(default='yes', type='bool'), ) def exo_dns_required_together(): return [['api_key', 'api_secret']] class ExoDns(object): def __init__(self, module): self.module = module self.api_key = self.module.params.get('api_key') self.api_secret = self.module.params.get('api_secret') if not (self.api_key and self.api_secret): try: region = self.module.params.get('api_region') config = self.read_config(ini_group=region) self.api_key = config['key'] self.api_secret = config['secret'] except Exception as e: self.module.fail_json(msg="Error while processing config: %s" % to_native(e)) self.headers = { 'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret), 'Content-Type': 'application/json', 'Accept': 'application/json', } self.result = { 'changed': False, 'diff': { 'before': {}, 'after': {}, } } def read_config(self, ini_group=None): if not ini_group: ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack') keys = ['key', 'secret'] env_conf = {} for key in keys: if 'CLOUDSTACK_%s' % key.upper() not in os.environ: break else: env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()] else: return env_conf # Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini # Last read wins in configparser paths = ( os.path.join(os.path.expanduser('~'), '.cloudstack.ini'), os.path.join(os.getcwd(), 'cloudstack.ini'), ) # Look at CLOUDSTACK_CONFIG first if present if 'CLOUDSTACK_CONFIG' in os.environ: paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),) if not any([os.path.exists(c) for c in paths]): self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths)) conf = configparser.ConfigParser() conf.read(paths) return dict(conf.items(ini_group)) def api_query(self, resource="/domains", method="GET", data=None): url = EXO_DNS_BASEURL + resource if data: data = self.module.jsonify(data) response, info = fetch_url( module=self.module, url=url, data=data, method=method, headers=self.headers, timeout=self.module.params.get('api_timeout'), ) if info['status'] not in (200, 201, 204): self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) try: return self.module.from_json(to_text(response.read())) except Exception as e: self.module.fail_json(msg="Could not process response into json: %s" % to_native(e)) def has_changed(self, want_dict, current_dict, only_keys=None): changed = False for key, value in want_dict.items(): # Optionally limit by a list of keys if only_keys and key not in only_keys: continue # Skip None values if value is None: continue if key in current_dict: if isinstance(current_dict[key], integer_types): if value != current_dict[key]: self.result['diff']['before'][key] = current_dict[key] self.result['diff']['after'][key] = value changed = True elif isinstance(current_dict[key], string_types): if value.lower() != current_dict[key].lower(): self.result['diff']['before'][key] = current_dict[key] self.result['diff']['after'][key] = value changed = True else: self.module.fail_json(msg="Unable to determine comparison for key %s" % key) else: self.result['diff']['after'][key] = value changed = True return changed
gpl-3.0
ChanderG/scikit-learn
sklearn/neighbors/tests/test_kde.py
208
5556
import numpy as np from sklearn.utils.testing import (assert_allclose, assert_raises, assert_equal) from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors from sklearn.neighbors.ball_tree import kernel_norm from sklearn.pipeline import make_pipeline from sklearn.datasets import make_blobs from sklearn.grid_search import GridSearchCV from sklearn.preprocessing import StandardScaler def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_kernel_density(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for bandwidth in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, bandwidth) def check_results(kernel, bandwidth, atol, rtol): kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1E-7, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1E-7, rtol)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, bandwidth, atol, rtol) def test_kernel_density_sampling(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) bandwidth = 0.2 for kernel in ['gaussian', 'tophat']: # draw a tophat sample kde = KernelDensity(bandwidth, kernel=kernel).fit(X) samp = kde.sample(100) assert_equal(X.shape, samp.shape) # check that samples are in the right range nbrs = NearestNeighbors(n_neighbors=1).fit(X) dist, ind = nbrs.kneighbors(X, return_distance=True) if kernel == 'tophat': assert np.all(dist < bandwidth) elif kernel == 'gaussian': # 5 standard deviations is safe for 100 samples, but there's a # very small chance this test could fail. assert np.all(dist < 5 * bandwidth) # check unsupported kernels for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']: kde = KernelDensity(bandwidth, kernel=kernel).fit(X) assert_raises(NotImplementedError, kde.sample, 100) # non-regression test: used to return a scalar X = rng.randn(4, 1) kde = KernelDensity(kernel="gaussian").fit(X) assert_equal(kde.sample().shape, (1, 1)) def test_kde_algorithm_metric_choice(): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev', 'haversine']: if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1]) def test_kde_score(n_samples=100, n_features=3): pass #FIXME #np.random.seed(0) #X = np.random.random((n_samples, n_features)) #Y = np.random.random((n_samples, n_features)) def test_kde_badargs(): assert_raises(ValueError, KernelDensity, algorithm='blah') assert_raises(ValueError, KernelDensity, bandwidth=0) assert_raises(ValueError, KernelDensity, kernel='blah') assert_raises(ValueError, KernelDensity, metric='blah') assert_raises(ValueError, KernelDensity, algorithm='kd_tree', metric='blah') def test_kde_pipeline_gridsearch(): # test that kde plays nice in pipelines and grid-searches X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False), KernelDensity(kernel="gaussian")) params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10]) search = GridSearchCV(pipe1, param_grid=params, cv=5) search.fit(X) assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
bsd-3-clause
xiejianying/pjsip
tests/automated/configure.py
93
11490
#!/usr/bin/python import optparse import os import platform import socket import subprocess import sys PROG = "r" + "$Rev: 17 $".strip("$ ").replace("Rev: ", "") PYTHON = os.path.basename(sys.executable) build_type = "" vs_target = "" s60_target = "" no_test = False no_pjsua_test = False # # Get gcc version # def gcc_version(gcc): proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) ver = "" while True: s = proc.stdout.readline() if not s: break if s.find("gcc version") >= 0: ver = s.split(None, 3)[2] break proc.wait() return "gcc-" + ver # # Get Visual Studio info # class VSVersion: def __init__(self): self.version = "8" self.release = "2005" proc = subprocess.Popen("cl", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: s = proc.stdout.readline() if s=="": break pos = s.find("Version") if pos > 0: proc.wait() s = s[pos+8:] ver = s.split(None, 1)[0] major = ver[0:2] if major=="12": self.version = "6" self.release = "98" break elif major=="13": self.version = "7" self.release = "2003" break elif major=="14": self.version = "8" self.release = "2005" break elif major=="15": self.version = "9" self.release = "2008" break elif major=="16": self.version = "10" self.release = "2010" break else: self.version = "11" self.release = "2012" break proc.wait() self.vs_version = "vs" + self.version self.vs_release = "vs" + self.release # # Get S60 SDK info # class S60SDK: def __init__(self): self.epocroot = "" self.sdk = "" self.device = "" # Check that EPOCROOT is set if not "EPOCROOT" in os.environ: sys.stderr.write("Error: EPOCROOT environment variable is not set\n") sys.exit(1) epocroot = os.environ["EPOCROOT"] # EPOCROOT must have trailing backslash if epocroot[-1] != "\\": epocroot = epocroot + "\\" os.environ["EPOCROOT"] = epocroot self.epocroot = epocroot self.sdk = sdk1 = epocroot.split("\\")[-2] self.device = "@" + self.sdk + ":com.nokia.s60" # Check that correct device is set proc = subprocess.Popen("devices", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) sdk2 = "" while True: line = proc.stdout.readline() if line.find("- default") > 0: sdk2 = line.split(":",1)[0] break proc.wait() if sdk1 != sdk2: sys.stderr.write("Error: default SDK in device doesn't match EPOCROOT\n") sys.stderr.write("Default device SDK = '" + sdk2 + "'\n") sys.stderr.write("EPOCROOT SDK = '" + sdk1 + "'\n") sys.exit(1) self.name = sdk2.replace("_", "-") def replace_vars(text): global vs_target, s60_target, build_type, no_test, no_pjsua_test suffix = "" os_info = platform.system() + platform.release() + "-" + platform.machine() # osinfo s60sdk_var = None if build_type == "s60": s60sdk_var = S60SDK() os_info = s60sdk_var.name elif platform.system().lower() == "windows" or platform.system().lower() == "microsoft": if platform.system().lower() == "microsoft": os_info = platform.release() + "-" + platform.version() + "-" + platform.win32_ver()[2] elif platform.system().lower() == "linux": os_info = "-" + "-".join(platform.linux_distribution()[0:2]) # vs_target if not vs_target and text.find("$(VSTARGET)") >= 0: if build_type != "vs": sys.stderr.write("Warning: $(VSTARGET) only valid for Visual Studio\n") print "Enter Visual Studio vs_target name (e.g. Release, Debug) [Release]: ", vs_target = sys.stdin.readline().replace("\n", "").replace("\r", "") if not vs_target: vs_target = "Release" # s60_target if not s60_target and text.find("$(S60TARGET)") >= 0: if build_type != "s60": sys.stderr.write("Warning: $(S60TARGET) only valid for S60\n") print "Enter S60 target name (e.g. \"gcce urel\") [gcce urel]: ", s60_target = sys.stdin.readline().replace("\n", "").replace("\r", "") if not s60_target: s60_target = "gcce urel" # Suffix if build_type == "vs": suffix = "i386-Win32-vc8-" + vs_target elif build_type == "s60": suffix = s60sdk_var.name + "-" + s60_target.replace(" ", "-") elif build_type == "gnu": proc = subprocess.Popen("sh config.guess", cwd="../..", shell=True, stdout=subprocess.PIPE) suffix = proc.stdout.readline().rstrip(" \r\n") else: sys.stderr.write("Error: unsupported build type '" + build_type + "'\n") sys.exit(1) while True: if text.find("$(PJSUA-TESTS)") >= 0: if no_test==False and no_pjsua_test==False: # Determine pjsua exe to use exe = "../../pjsip-apps/bin/pjsua-" + suffix proc = subprocess.Popen(PYTHON + " runall.py --list-xml -e " + exe, cwd="../pjsua", shell=True, stdout=subprocess.PIPE) content = proc.stdout.read() else: content = "" text = text.replace("$(PJSUA-TESTS)", content) elif text.find("$(GCC)") >= 0: text = text.replace("$(GCC)", gcc_version("gcc")) elif text.find("$(VS)") >= 0: vsver = VSVersion() text = text.replace("$(VS)", VSVersion().vs_release) elif text.find("$(VSTARGET)") >= 0: text = text.replace("$(VSTARGET)", vs_target) elif text.find("$(S60TARGET)") >= 0: text = text.replace("$(S60TARGET)", s60_target) elif text.find("$(S60TARGETNAME)") >= 0: text = text.replace("$(S60TARGETNAME)", s60_target.replace(" ", "-")) elif text.find("$(S60DEVICE)") >= 0: text = text.replace("$(S60DEVICE)", s60sdk_var.device) elif text.find("$(EPOCROOT)") >= 0: text = text.replace("$(EPOCROOT)", s60sdk_var.epocroot) elif text.find("$(DISABLED)") >= 0: text = text.replace("$(DISABLED)", "0") elif text.find("$(IPPROOT)") >= 0: if not os.environ.has_key("IPPROOT"): sys.stderr.write("Error: environment variable IPPROOT is needed but not set\n") sys.exit(1) text = text.replace("$(IPPROOT)", os.environ["IPPROOT"]) elif text.find("$(IPPSAMPLES)") >= 0: if not os.environ.has_key("IPPSAMPLES"): sys.stderr.write("Error: environment variable IPPSAMPLES is needed but not set\n") sys.exit(1) text = text.replace("$(IPPSAMPLES)", os.environ["IPPSAMPLES"]) elif text.find("$(IPPARCH)") >= 0: if not os.environ.has_key("IPPARCH"): text = text.replace("$(IPPARCH)", "") else: text = text.replace("$(IPPARCH)", os.environ["IPPARCH"]) elif text.find("$(OS)") >= 0: text = text.replace("$(OS)", os_info) elif text.find("$(SUFFIX)") >= 0: text = text.replace("$(SUFFIX)", suffix) elif text.find("$(HOSTNAME)") >= 0: text = text.replace("$(HOSTNAME)", socket.gethostname()) elif text.find("$(PJDIR)") >= 0: wdir = os.path.join(os.getcwd(), "../..") wdir = os.path.normpath(wdir) text = text.replace("$(PJDIR)", wdir) elif text.find("$(NOP)") >= 0: if platform.system().lower() == "windows" or platform.system().lower() == "microsoft": cmd = "CMD /C echo Success" else: cmd = "echo Success" text = text.replace("$(NOP)", cmd) elif text.find("$(NOTEST)") >= 0: if no_test: str = '"1"' else: str = '"0"' text = text.replace("$(NOTEST)", str) else: break return text def main(args): global vs_target, s60_target, build_type, no_test, no_pjsua_test output = sys.stdout usage = """Usage: configure.py [OPTIONS] scenario_template_file Where OPTIONS: -o FILE Output to file, otherwise to stdout. -t TYPE Specify build type. If not specified, it will be asked if necessary. Values are: vs: Visual Studio gnu: Makefile based s60: Symbian S60 -vstarget TARGETNAME Specify Visual Studio target name if build type is set to vs. If not specified then it will be asked. Sample target names: - Debug - Release - or any other target in the project file -s60target TARGETNAME Specify S60 target name if build type is set to s60. If not specified then it will be asked. Sample target names: - "gcce udeb" - "gcce urel" -notest Disable all tests in the scenario. -nopjsuatest Disable pjsua tests in the scenario. """ args.pop(0) while len(args): if args[0]=='-o': args.pop(0) if len(args): output = open(args[0], "wt") args.pop(0) else: sys.stderr.write("Error: needs value for -o\n") sys.exit(1) elif args[0]=='-vstarget': args.pop(0) if len(args): vs_target = args[0] args.pop(0) else: sys.stderr.write("Error: needs value for -vstarget\n") sys.exit(1) elif args[0]=='-s60target': args.pop(0) if len(args): s60_target = args[0] args.pop(0) else: sys.stderr.write("Error: needs value for -s60target\n") sys.exit(1) elif args[0]=='-t': args.pop(0) if len(args): build_type = args[0].lower() args.pop(0) else: sys.stderr.write("Error: needs value for -t\n") sys.exit(1) if not ["vs", "gnu", "s60"].count(build_type): sys.stderr.write("Error: invalid -t argument value\n") sys.exit(1) elif args[0]=='-notest' or args[0]=='-notests': args.pop(0) no_test = True elif args[0]=='-nopjsuatest' or args[0]=='-nopjsuatests': args.pop(0) no_pjsua_test = True else: break if len(args) != 1: sys.stderr.write(usage + "\n") return 1 if not build_type: defval = "vs" if "SHELL" in os.environ: shell = os.environ["SHELL"] if shell.find("sh") > -1: defval = "gnu" print "Enter the build type (values: vs, gnu, s60) [%s]: " % (defval), build_type = sys.stdin.readline().replace("\n", "").replace("\r", "") if not build_type: build_type = defval tpl_file = args[len(args)-1] if not os.path.isfile(tpl_file): print "Error: unable to find template file '%s'" % (tpl_file) return 1 f = open(tpl_file, "r") tpl = f.read() f.close() tpl = replace_vars(tpl) output.write(tpl) if output != sys.stdout: output.close() return 0 if __name__ == "__main__": rc = main(sys.argv) sys.exit(rc)
gpl-2.0
dnozay/lettuce
tests/integration/lib/Django-1.3/django/contrib/gis/gdal/__init__.py
397
2173
""" This module houses ctypes interfaces for GDAL objects. The following GDAL objects are supported: CoordTransform: Used for coordinate transformations from one spatial reference system to another. Driver: Wraps an OGR data source driver. DataSource: Wrapper for the OGR data source object, supports OGR-supported data sources. Envelope: A ctypes structure for bounding boxes (GDAL library not required). OGRGeometry: Object for accessing OGR Geometry functionality. OGRGeomType: A class for representing the different OGR Geometry types (GDAL library not required). SpatialReference: Represents OSR Spatial Reference objects. The GDAL library will be imported from the system path using the default library name for the current OS. The default library path may be overridden by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C library on your system. GDAL links to a large number of external libraries that consume RAM when loaded. Thus, it may desirable to disable GDAL on systems with limited RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH` to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`; setting to None/False/'' will not work as a string must be given). """ # Attempting to import objects that depend on the GDAL library. The # HAS_GDAL flag will be set to True if the library is present on # the system. try: from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.datasource import DataSource from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform from django.contrib.gis.gdal.geometries import OGRGeometry HAS_GDAL = True except: HAS_GDAL, GEOJSON = False, False try: from django.contrib.gis.gdal.envelope import Envelope except ImportError: # No ctypes, but don't raise an exception. pass from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException from django.contrib.gis.gdal.geomtype import OGRGeomType
gpl-3.0
runarberg/servo
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_whitespace_filter.py
453
5642
from __future__ import absolute_import, division, unicode_literals import unittest from html5lib.filters.whitespace import Filter from html5lib.constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) try: unittest.TestCase.assertEqual except AttributeError: unittest.TestCase.assertEqual = unittest.TestCase.assertEquals class TestCase(unittest.TestCase): def runTest(self, input, expected): output = list(Filter(input)) errorMsg = "\n".join(["\n\nInput:", str(input), "\nExpected:", str(expected), "\nReceived:", str(output)]) self.assertEqual(output, expected, errorMsg) def runTestUnmodifiedOutput(self, input): self.runTest(input, input) def testPhrasingElements(self): self.runTestUnmodifiedOutput( [{"type": "Characters", "data": "This is a "}, {"type": "StartTag", "name": "span", "data": []}, {"type": "Characters", "data": "phrase"}, {"type": "EndTag", "name": "span", "data": []}, {"type": "SpaceCharacters", "data": " "}, {"type": "Characters", "data": "with"}, {"type": "SpaceCharacters", "data": " "}, {"type": "StartTag", "name": "em", "data": []}, {"type": "Characters", "data": "emphasised text"}, {"type": "EndTag", "name": "em", "data": []}, {"type": "Characters", "data": " and an "}, {"type": "StartTag", "name": "img", "data": [["alt", "image"]]}, {"type": "Characters", "data": "."}]) def testLeadingWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "SpaceCharacters", "data": " "}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "p", "data": []}]) def testLeadingWhitespaceAsCharacters(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": spaceCharacters + "foo"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": " foo"}, {"type": "EndTag", "name": "p", "data": []}]) def testTrailingWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": " "}, {"type": "EndTag", "name": "p", "data": []}]) def testTrailingWhitespaceAsCharacters(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo "}, {"type": "EndTag", "name": "p", "data": []}]) def testWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters + "bar"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo bar"}, {"type": "EndTag", "name": "p", "data": []}]) def testLeadingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "pre", "data": []}]) def testLeadingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": spaceCharacters + "foo"}, {"type": "EndTag", "name": "pre", "data": []}]) def testTrailingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "EndTag", "name": "pre", "data": []}]) def testTrailingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters}, {"type": "EndTag", "name": "pre", "data": []}]) def testWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters + "bar"}, {"type": "EndTag", "name": "pre", "data": []}]) def buildTestSuite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) def main(): buildTestSuite() unittest.main() if __name__ == "__main__": main()
mpl-2.0
calaldees/KaraKara
website/tests/conftest.py
1
3304
import pytest import logging log = logging.getLogger(__name__) from .data.queue import queue from .data.tracks import tags, attachments, tracks, tracks_volume, track_unicode_special from .data.tracks_random import random_tracks from .data.auth import users # Markers ---------------------------------------------------------------------- def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", help="run slow tests") parser.addoption("--ini_file", action="store", default="test.ini", help="pyramid ini file profile") def pytest_runtest_setup(item): if 'unimplemented' in item.keywords: #and not item.config.getoption("--runslow"): pytest.skip('unimplemented functionality') if 'unfinished' in item.keywords: pytest.skip('unfinished functionality') try: runslow = item.config.getoption("--runslow") except ValueError: runslow = False if 'slow' in item.keywords and not runslow: pytest.skip("need --runslow option to run") logging.basicConfig(level=logging.DEBUG) unimplemented = pytest.mark.unimplemented # Server dose not support the functionlity this test is asserting yet unfinished = pytest.mark.unfinished # The test is unfinished and currently is know to fail xfail = pytest.mark.xfail slow = pytest.mark.slow # Fixtures --------------------------------------------------------------------- @pytest.fixture(scope="session") def ini_file(request): return request.config.getoption("--ini_file") @pytest.fixture(scope="session") def app_ini(request, ini_file): """ Settings object derived from .ini file required to start Pyramid app """ from pyramid.paster import get_appsettings return get_appsettings(ini_file) @pytest.fixture(scope="session") def app(request, app_ini, DBSession): """ Start KaraKara application """ from webtest import TestApp from karakara import main as karakara_main app = TestApp(karakara_main({}, **app_ini)) def finalizer(): #print('tearDown WebApp') pass request.addfinalizer(finalizer) return app @pytest.fixture(scope="session") def DBSession(request, app_ini): """ Init/Clear database """ from karakara.model import DBSession, init_DBSession, init_DBSession_tables, clear_DBSession_tables init_DBSession(app_ini) clear_DBSession_tables() from karakara.model.init_data import init_initial_tags # TODO: this janky import before init is also present in __init__:main - can we remove the duplication? init_DBSession_tables() return DBSession @pytest.fixture(scope="session") def commit(request, DBSession): """ Save data attached to DBSession """ from karakara.model import commit return commit @pytest.fixture(scope="session") def cache_store(request, app): return app.app.registry.settings['cache.store'] @pytest.fixture() def registry_settings(request, app): """ registry.settings dictionary used in the running KaraKara app can be used with with patch.dict(settings, {'key': 'value'}): """ return app.app.registry.settings @pytest.fixture(autouse=True) def mock_send_websocket_message(mocker): return mocker.patch('karakara.websocket._send_websocket_message')
gpl-3.0
PokemonGoF/PokemonGo-Bot-Desktop
build/pywin/Lib/curses/textpad.py
212
7338
"""Simple textbox editing widget with Emacs-like keybindings.""" import curses import curses.ascii def rectangle(win, uly, ulx, lry, lrx): """Draw a rectangle with corners at the provided upper-left and lower-right coordinates. """ win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1) win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1) win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1) win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1) win.addch(uly, ulx, curses.ACS_ULCORNER) win.addch(uly, lrx, curses.ACS_URCORNER) win.addch(lry, lrx, curses.ACS_LRCORNER) win.addch(lry, ulx, curses.ACS_LLCORNER) class Textbox: """Editing widget using the interior of a window object. Supports the following Emacs-like key bindings: Ctrl-A Go to left edge of window. Ctrl-B Cursor left, wrapping to previous line if appropriate. Ctrl-D Delete character under cursor. Ctrl-E Go to right edge (stripspaces off) or end of line (stripspaces on). Ctrl-F Cursor right, wrapping to next line when appropriate. Ctrl-G Terminate, returning the window contents. Ctrl-H Delete character backward. Ctrl-J Terminate if the window is 1 line, otherwise insert newline. Ctrl-K If line is blank, delete it, otherwise clear to end of line. Ctrl-L Refresh screen. Ctrl-N Cursor down; move down one line. Ctrl-O Insert a blank line at cursor location. Ctrl-P Cursor up; move up one line. Move operations do nothing if the cursor is at an edge where the movement is not possible. The following synonyms are supported where possible: KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N KEY_BACKSPACE = Ctrl-h """ def __init__(self, win, insert_mode=False): self.win = win self.insert_mode = insert_mode (self.maxy, self.maxx) = win.getmaxyx() self.maxy = self.maxy - 1 self.maxx = self.maxx - 1 self.stripspaces = 1 self.lastcmd = None win.keypad(1) def _end_of_line(self, y): """Go to the location of the first blank on the given line, returning the index of the last non-blank character.""" last = self.maxx while True: if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP: last = min(self.maxx, last+1) break elif last == 0: break last = last - 1 return last def _insert_printable_char(self, ch): (y, x) = self.win.getyx() if y < self.maxy or x < self.maxx: if self.insert_mode: oldch = self.win.inch() # The try-catch ignores the error we trigger from some curses # versions by trying to write into the lowest-rightmost spot # in the window. try: self.win.addch(ch) except curses.error: pass if self.insert_mode: (backy, backx) = self.win.getyx() if curses.ascii.isprint(oldch): self._insert_printable_char(oldch) self.win.move(backy, backx) def do_command(self, ch): "Process a single editing command." (y, x) = self.win.getyx() self.lastcmd = ch if curses.ascii.isprint(ch): if y < self.maxy or x < self.maxx: self._insert_printable_char(ch) elif ch == curses.ascii.SOH: # ^a self.win.move(y, 0) elif ch in (curses.ascii.STX,curses.KEY_LEFT, curses.ascii.BS,curses.KEY_BACKSPACE): if x > 0: self.win.move(y, x-1) elif y == 0: pass elif self.stripspaces: self.win.move(y-1, self._end_of_line(y-1)) else: self.win.move(y-1, self.maxx) if ch in (curses.ascii.BS, curses.KEY_BACKSPACE): self.win.delch() elif ch == curses.ascii.EOT: # ^d self.win.delch() elif ch == curses.ascii.ENQ: # ^e if self.stripspaces: self.win.move(y, self._end_of_line(y)) else: self.win.move(y, self.maxx) elif ch in (curses.ascii.ACK, curses.KEY_RIGHT): # ^f if x < self.maxx: self.win.move(y, x+1) elif y == self.maxy: pass else: self.win.move(y+1, 0) elif ch == curses.ascii.BEL: # ^g return 0 elif ch == curses.ascii.NL: # ^j if self.maxy == 0: return 0 elif y < self.maxy: self.win.move(y+1, 0) elif ch == curses.ascii.VT: # ^k if x == 0 and self._end_of_line(y) == 0: self.win.deleteln() else: # first undo the effect of self._end_of_line self.win.move(y, x) self.win.clrtoeol() elif ch == curses.ascii.FF: # ^l self.win.refresh() elif ch in (curses.ascii.SO, curses.KEY_DOWN): # ^n if y < self.maxy: self.win.move(y+1, x) if x > self._end_of_line(y+1): self.win.move(y+1, self._end_of_line(y+1)) elif ch == curses.ascii.SI: # ^o self.win.insertln() elif ch in (curses.ascii.DLE, curses.KEY_UP): # ^p if y > 0: self.win.move(y-1, x) if x > self._end_of_line(y-1): self.win.move(y-1, self._end_of_line(y-1)) return 1 def gather(self): "Collect and return the contents of the window." result = "" for y in range(self.maxy+1): self.win.move(y, 0) stop = self._end_of_line(y) if stop == 0 and self.stripspaces: continue for x in range(self.maxx+1): if self.stripspaces and x > stop: break result = result + chr(curses.ascii.ascii(self.win.inch(y, x))) if self.maxy > 0: result = result + "\n" return result def edit(self, validate=None): "Edit in the widget window and collect the results." while 1: ch = self.win.getch() if validate: ch = validate(ch) if not ch: continue if not self.do_command(ch): break self.win.refresh() return self.gather() if __name__ == '__main__': def test_editbox(stdscr): ncols, nlines = 9, 4 uly, ulx = 15, 20 stdscr.addstr(uly-2, ulx, "Use Ctrl-G to end editing.") win = curses.newwin(nlines, ncols, uly, ulx) rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols) stdscr.refresh() return Textbox(win).edit() str = curses.wrapper(test_editbox) print 'Contents of text box:', repr(str)
mit
jendap/tensorflow
tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
25
59018
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the GTFlow prediction Ops. The tests cover tree traversal and additive models for single and multi class problems. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.boosted_trees.proto import learner_pb2 from tensorflow.contrib.boosted_trees.proto import tree_config_pb2 from tensorflow.contrib.boosted_trees.python.ops import model_ops from tensorflow.contrib.boosted_trees.python.ops import prediction_ops from tensorflow.python.framework import test_util from tensorflow.python.ops import resources from tensorflow.python.platform import googletest def _append_to_leaf(leaf, c_id, w): """Helper method for building tree leaves. Appends weight contributions for the given class index to a leaf node. Args: leaf: leaf node to append to. c_id: class Id for the weight update. w: weight contribution value. """ leaf.sparse_vector.index.append(c_id) leaf.sparse_vector.value.append(w) def _append_multi_values_to_leaf(leaf, c_ids, w): """Helper method for building tree leaves with sparse vector of values. Appends weight contributions for the given class index to a leaf node. Args: leaf: leaf node to append to. c_ids: list of class ids w: corresponding weight contributions for the classes in c_ids """ for i in range(len(c_ids)): leaf.sparse_vector.index.append(c_ids[i]) leaf.sparse_vector.value.append(w[i]) def _append_multi_values_to_dense_leaf(leaf, w): """Helper method for building tree leaves with dense vector of values. Appends weight contributions to a leaf. w is assumed to be for all classes. Args: leaf: leaf node to append to. w: corresponding weight contributions for all classes. """ for x in w: leaf.vector.value.append(x) def _set_float_split(split, feat_col, thresh, l_id, r_id, feature_dim_id=None): """Helper method for building tree float splits. Sets split feature column, threshold and children. Args: split: split node to update. feat_col: feature column for the split. thresh: threshold to split on forming rule x <= thresh. l_id: left child Id. r_id: right child Id. feature_dim_id: dimension of the feature column to be used in the split. """ split.feature_column = feat_col split.threshold = thresh split.left_id = l_id split.right_id = r_id if feature_dim_id is not None: split.dimension_id = feature_dim_id def _set_float_oblivious_split(split, feat_col, thresh): """Helper method for building tree float splits. Sets split feature column and threshold. Args: split: split node to update. feat_col: feature column for the split. thresh: threshold to split on forming rule x <= thresh. """ split.feature_column = feat_col split.threshold = thresh def _set_categorical_id_split(split, feat_col, feat_id, l_id, r_id): """Helper method for building tree categorical id splits. Sets split feature column, feature id and children. Args: split: categorical id split node. feat_col: feature column for the split. feat_id: feature id forming rule x == id. l_id: left child Id. r_id: right child Id. """ split.feature_column = feat_col split.feature_id = feat_id split.left_id = l_id split.right_id = r_id class PredictionOpsTest(test_util.TensorFlowTestCase): def setUp(self): """Sets up the prediction tests. Creates, a batch of two examples having three dense float, two sparse float single valued, one sparse float multidimensional and one sparse int features. The data looks like the following: |Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 |SparseM | 0 | 7 | 1 | 2 | -3 | | 9,1 | __, 5.0 | 1 | -2 | 2 | 0.5 | | 4 | | 3, ___ """ super(PredictionOpsTest, self).setUp() self._dense_float_tensor1 = np.array([[7.0], [-2.0]]) self._dense_float_tensor2 = np.array([[1.0], [2.0]]) self._dense_float_tensor3 = np.array([[2.0], [0.5]]) self._sparse_float_indices1 = np.array([[0, 0]]) self._sparse_float_values1 = np.array([-3.0]) self._sparse_float_shape1 = np.array([2, 1]) self._sparse_float_indices2 = np.array([[1, 0]]) self._sparse_float_values2 = np.array([4.0]) self._sparse_float_shape2 = np.array([2, 1]) # Multi dimensional sparse float self._sparse_float_indices_m = np.array([[0, 1], [1, 0]]) self._sparse_float_values_m = np.array([5.0, 3.0]) self._sparse_float_shape_m = np.array([2, 2]) self._sparse_int_indices1 = np.array([[0, 0], [0, 1]]) self._sparse_int_values1 = np.array([9, 1]) self._sparse_int_shape1 = np.array([2, 2]) self._seed = 123 def _get_predictions(self, tree_ensemble_handle, learner_config, apply_dropout=False, apply_averaging=False, center_bias=False, reduce_dim=False): return prediction_ops.gradient_trees_prediction( tree_ensemble_handle, self._seed, [self._dense_float_tensor1], [self._sparse_float_indices1, self._sparse_float_indices2], [self._sparse_float_values1, self._sparse_float_values2], [self._sparse_float_shape1, self._sparse_float_shape2], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1], learner_config=learner_config, apply_dropout=apply_dropout, apply_averaging=apply_averaging, center_bias=center_bias, reduce_dim=reduce_dim) def _get_predictions_oblivious_case(self, tree_ensemble_handle, learner_config, apply_dropout=False, apply_averaging=False, center_bias=False, reduce_dim=False): return prediction_ops.gradient_trees_prediction( tree_ensemble_handle, self._seed, [ self._dense_float_tensor1, self._dense_float_tensor2, self._dense_float_tensor3 ], [], [], [], [], [], [], learner_config=learner_config, apply_dropout=apply_dropout, apply_averaging=apply_averaging, center_bias=center_bias, reduce_dim=reduce_dim) def testEmptyEnsemble(self): with self.cached_session(): # Empty tree ensenble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="empty") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) self.assertAllEqual([[0], [0]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testBiasEnsembleSingleClass(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="bias") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) self.assertAllClose([[-0.4], [-0.4]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testBiasEnsembleMultiClass(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True leaf = tree.nodes.add().leaf _append_to_leaf(leaf, 0, -0.4) _append_to_leaf(leaf, 1, 0.9) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="multiclass") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 3 result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testFullEnsembleSingleClass(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2) _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # The first example will get bias -0.4 from first tree and # leaf 4 payload of -0.9 hence -1.3, the second example will # get the same bias -0.4 and leaf 3 payload (sparse feature missing) # of 1.2 hence 0.8. self.assertAllClose([[-1.3], [0.8]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testObliviousEnsemble(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() _set_float_oblivious_split( tree2.nodes.add().oblivious_dense_float_binary_split, 0, 5.0) _set_float_oblivious_split( tree2.nodes.add().oblivious_dense_float_binary_split, 1, 3.0) _set_float_oblivious_split( tree2.nodes.add().oblivious_dense_float_binary_split, 2, 1.0) for i in range(1, 9): _append_to_leaf(tree2.nodes.add().leaf, 0, i / 10.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = self._get_predictions_oblivious_case( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # The first example will get bias -0.4 from first tree and 0.6 from # the 5th leaf of the second tree corresponding to node_id = 8, hence a # prediction of 0.2. # The second example will get bias -0.4 and 0.1 from the 0th leaf of the # second tree corresponding to node_id = 3, hence a prediction of -0.3 self.assertAllClose([[0.2], [-0.3]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testFullEnsembleWithMultidimensionalSparseSingleClass(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True # Use feature column 2 (sparse multidimensional), split on first value # node 0. _set_float_split( tree2.nodes.add().sparse_float_binary_split_default_right.split, 2, 7.0, 1, 2, feature_dim_id=0) # Leafs split on second dimension of sparse multidimensional feature. # Node 1. _set_float_split( tree2.nodes.add().sparse_float_binary_split_default_left.split, 2, 4.5, 3, 4, feature_dim_id=1) # Node 2. _set_float_split( tree2.nodes.add().sparse_float_binary_split_default_right.split, 2, 9, 5, 6, feature_dim_id=1) # Node 3. _append_to_leaf(tree2.nodes.add().leaf, 0, 0.6) # Node 4. _append_to_leaf(tree2.nodes.add().leaf, 0, 1.3) # Node 5. _append_to_leaf(tree2.nodes.add().leaf, 0, -0.1) # Node 6. _append_to_leaf(tree2.nodes.add().leaf, 0, 0.8) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = prediction_ops.gradient_trees_prediction( tree_ensemble_handle, self._seed, [self._dense_float_tensor1], [ self._sparse_float_indices1, self._sparse_float_indices2, self._sparse_float_indices_m ], [ self._sparse_float_values1, self._sparse_float_values2, self._sparse_float_values_m ], [ self._sparse_float_shape1, self._sparse_float_shape2, self._sparse_float_shape_m ], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1], learner_config=learner_config.SerializeToString(), apply_dropout=False, apply_averaging=False, center_bias=False, reduce_dim=True) # The first example will get bias -0.4 from first tree and # leaf 5 payload of -0.1 hence -0.5, the second example will # get the same bias -0.4 and leaf 3 payload (0.6) hence 0.2 self.assertAllClose([[-0.5], [0.2]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testExcludeNonFinalTree(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = False _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2) _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # All the examples should get only the bias since the second tree is # non-finalized self.assertAllClose([[-0.4], [-0.4]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testIncludeNonFinalTree(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = False _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2) _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # The first example will get bias -0.4 from first tree and # leaf 4 payload of -0.9 hence -1.3, the second example will # get the same bias -0.4 and leaf 3 payload (sparse feature missing) # of 1.2 hence 0.8. Note that the non-finalized tree is included. self.assertAllClose([[-1.3], [0.8]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testMetadataMissing(self): # Sometimes we want to do prediction on trees that are not added to ensemble # (for example in with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree. tree1 = tree_ensemble_config.trees.add() _append_to_leaf(tree1.nodes.add().leaf, 0, -0.4) # Depth 3 tree. tree2 = tree_ensemble_config.trees.add() # We are not setting the tree_ensemble_config.tree_metadata in this test. _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree2.nodes.add().leaf, 0, 1.2) _set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.7) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # The first example will get bias -0.4 from first tree and # leaf 4 payload of -0.9 hence -1.3, the second example will # get the same bias -0.4 and leaf 3 payload (sparse feature missing) # of 1.2 hence 0.8. self.assertAllClose([[-1.3], [0.8]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) # For TREE_PER_CLASS strategy, predictions size is num_classes-1 def testFullEnsembleMultiClassTreePerClassStrategy(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree only for second class. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2) # Depth 2 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_right.split, 1, 4.0, 1, 2) _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="ensemble_multi_class") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 3 learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.TREE_PER_CLASS) result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=True) # The first example will get bias class 1 -0.2 from first tree and # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2], # the second example will get the same bias class 1 -0.2 and leaf 3 # payload of class 1 1.2 hence [0.0, 1.0]. self.assertAllClose([[0.5, -0.2], [0, 1.0]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) # For tree-per-class multiclass handling strategies, predictions vec # will have the size of the number of classes. # This test is when leafs have SPARSE weights stored (class id and # contribution). def testFullEnsembleMultiNotClassTreePerClassStrategySparseVector(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree only for second class. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2) # Depth 2 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_right.split, 1, 4.0, 1, 2) _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4) _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5) _append_multi_values_to_leaf(tree2.nodes.add().leaf, [1, 2], [1.2, -0.7]) _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="ensemble_multi_class") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 3 learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.FULL_HESSIAN) result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=False) # The first example will get bias class 1 -0.2 from first tree and # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2], # the second example will get the same bias class 1 -0.2 and leaf 3 # payload of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -0.7]. self.assertAllClose([[0.5, -0.2, 0.0], [0, 1.0, -0.7]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) # For all non-tree-per class multiclass handling strategies, predictions vec # will have the size of the number of classes. # This test is when leafs have DENSE weights stored (weight for each class) def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Bias tree only for second class. tree1 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2]) # Depth 2 tree. tree2 = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _set_float_split(tree2.nodes.add() .sparse_float_binary_split_default_right.split, 1, 4.0, 1, 2) _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4) _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0]) _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7]) _append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0]) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="ensemble_multi_class") resources.initialize_resources(resources.shared_resources()).run() # Prepare learner config. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 3 learner_config.multi_class_strategy = ( learner_pb2.LearnerConfig.FULL_HESSIAN) result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), reduce_dim=False) # The first example will get bias class 1 -0.2 and -2 for class 2 from # first tree and leaf 2 payload (sparse feature missing) of 0.5 hence # 0.5, -0.2], the second example will get the same bias and leaf 3 payload # of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7]. self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval()) # Empty dropout. self.assertAllEqual([[], []], dropout_info.eval()) def testDropout(self): with self.cached_session(): # Empty tree ensenble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Add 1000 trees with some weights. for i in range(0, 999): tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(i + 1) # Prepare learner/dropout config. learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5 learner_config.learning_rate_tuner.dropout.learning_rate = 1.0 learner_config.num_classes = 2 # Apply dropout. tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") resources.initialize_resources(resources.shared_resources()).run() result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) # We expect approx 500 trees were dropped. dropout_info = dropout_info.eval() self.assertIn(dropout_info[0].size, range(400, 601)) self.assertEqual(dropout_info[0].size, dropout_info[1].size) for i in range(dropout_info[0].size): dropped_index = dropout_info[0][i] dropped_weight = dropout_info[1][i] # We constructed the trees so tree number + 1 is the tree weight, so # we can check here the weights for dropped trees. self.assertEqual(dropped_index + 1, dropped_weight) # Don't apply dropout. result_no_dropout, no_dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=False, apply_averaging=False, center_bias=False, reduce_dim=True) self.assertEqual(result.eval().size, result_no_dropout.eval().size) for i in range(result.eval().size): self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i]) # We expect none of the trees were dropped. self.assertAllEqual([[], []], no_dropout_info.eval()) def testDropoutCenterBiasNoGrowingMeta(self): # This is for normal non-batch mode where ensemble does not contain the tree # that is being built currently. num_trees = 10 with self.cached_session(): # Empty tree ensemble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Add 10 trees with some weights. for i in range(0, num_trees): tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(i + 1) # Prepare learner/dropout config. learner_config = learner_pb2.LearnerConfig() # Drop all the trees. learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0 learner_config.learning_rate_tuner.dropout.learning_rate = 1.0 learner_config.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") resources.initialize_resources(resources.shared_resources()).run() result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) result_center, dropout_info_center = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=True, reduce_dim=True) dropout_info = dropout_info.eval() dropout_info_center = dropout_info_center.eval() # With centering, the bias tree is not dropped. num_dropped = dropout_info[0].size self.assertEqual(num_dropped, num_trees) num_dropped_center = dropout_info_center[0].size self.assertEqual(num_dropped_center, num_trees - 1) result = result.eval() result_center = result_center.eval() for i in range(result.size): self.assertNotEqual(result[i], result_center[i]) # First dropped tree is a bias tree 0. self.assertEqual(0, dropout_info[0][0]) # Last dropped tree is the last tree. self.assertEqual(num_trees - 1, dropout_info[0][num_dropped - 1]) # First dropped tree is a tree 1. self.assertEqual(1, dropout_info_center[0][0]) # Last dropped tree is the last tree. self.assertEqual(num_trees - 1, dropout_info_center[0][num_dropped_center - 1]) def testDropoutCenterBiasWithGrowingMeta(self): # This is batch mode where ensemble already contains the tree that we are # building. This tree should never be dropped. num_trees = 10 with self.cached_session(): # Empty tree ensemble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Add 10 trees with some weights. for i in range(0, num_trees): tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(i + 1) # Add growing metadata to indicate batch mode. tree_ensemble_config.growing_metadata.num_trees_attempted = num_trees tree_ensemble_config.growing_metadata.num_layers_attempted = num_trees # Prepare learner/dropout config. learner_config = learner_pb2.LearnerConfig() # Drop all the trees. learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0 learner_config.learning_rate_tuner.dropout.learning_rate = 1.0 learner_config.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") resources.initialize_resources(resources.shared_resources()).run() result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) result_center, dropout_info_center = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=True, reduce_dim=True) dropout_info = dropout_info.eval() dropout_info_center = dropout_info_center.eval() # Last tree is never dropped, the bias tree can be dropped. num_dropped = dropout_info[0].size self.assertEqual(num_dropped, num_trees - 1) num_dropped_center = dropout_info_center[0].size self.assertEqual(num_dropped_center, num_trees - 2) result = result.eval() result_center = result_center.eval() for i in range(result.size): self.assertNotEqual(result[i], result_center[i]) # First dropped tree is a bias tree 0. self.assertEqual(0, dropout_info[0][0]) # Last dropped tree is not the last tree (not tree num_trees-1). self.assertNotEqual(num_trees - 1, dropout_info[0][num_dropped - 1]) # First dropped tree is a tree 1. self.assertEqual(1, dropout_info_center[0][0]) # Last dropped tree is not the last tree in ensemble. self.assertNotEqual(num_trees - 1, dropout_info_center[0][num_dropped_center - 1]) def testDropoutSeed(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Add 10 trees with some weights. for i in range(0, 999): tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(i + 1) # Prepare learner/dropout config. learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5 learner_config.learning_rate_tuner.dropout.learning_rate = 1.0 learner_config.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="empty") resources.initialize_resources(resources.shared_resources()).run() _, dropout_info_1 = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) _, dropout_info_2 = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) # Different seed. _, dropout_info_3 = prediction_ops.gradient_trees_prediction( tree_ensemble_handle, 112314, [self._dense_float_tensor1], [self._sparse_float_indices1, self._sparse_float_indices2], [self._sparse_float_values1, self._sparse_float_values2], [self._sparse_float_shape1, self._sparse_float_shape2], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1], learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) # First seed with centering bias. _, dropout_info_4 = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=True, reduce_dim=True) # The same seed returns the same results. self.assertAllEqual(dropout_info_1.eval(), dropout_info_2.eval()) # Different seeds give diff results. self.assertNotEqual(dropout_info_3.eval().shape, dropout_info_2.eval().shape) # With centering bias and the same seed does not give the same result. self.assertNotEqual(dropout_info_4.eval(), dropout_info_1.eval()) # With centering bias has 1 less tree dropped (bias tree is not dropped). self.assertEqual( len(dropout_info_4.eval()[0]) + 1, len(dropout_info_1.eval()[0])) def testDropOutZeroProb(self): with self.cached_session(): # Empty tree ensemble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Add 1000 trees with some weights. for i in range(0, 999): tree = tree_ensemble_config.trees.add() tree_ensemble_config.tree_metadata.add().is_finalized = True _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_weights.append(i + 1) # Dropout with 0 probability. learner_config = learner_pb2.LearnerConfig() learner_config.learning_rate_tuner.dropout.dropout_probability = 0.0 learner_config.learning_rate_tuner.dropout.learning_rate = 1.0 learner_config.num_classes = 2 # Apply dropout, but expect nothing dropped. tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") resources.initialize_resources(resources.shared_resources()).run() result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=True, apply_averaging=False, center_bias=False, reduce_dim=True) result_no_dropout, _ = self._get_predictions( tree_ensemble_handle, learner_config=learner_config.SerializeToString(), apply_dropout=False, apply_averaging=False, center_bias=False, reduce_dim=True) self.assertAllEqual([[], []], dropout_info.eval()) self.assertAllClose(result.eval(), result_no_dropout.eval()) def testAveragingAllTrees(self): with self.cached_session(): # Empty tree ensemble. tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() adjusted_tree_ensemble_config = ( tree_config_pb2.DecisionTreeEnsembleConfig()) # Add 100 trees with some weights. # When averaging is applied, the tree weights will essentially change to # 1, 98/99, 97/99 etc, so lets create the ensemble with such weights. # too total_num = 100 for i in range(0, total_num): tree = tree_ensemble_config.trees.add() _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_metadata.add().is_finalized = True tree_ensemble_config.tree_weights.append(1.0) # This is how the weight will look after averaging copy_tree = adjusted_tree_ensemble_config.trees.add() _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4) adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True adjusted_tree_ensemble_config.tree_weights.append( 1.0 * (total_num - i) / total_num) # Prepare learner config WITH AVERAGING. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.averaging_config.average_last_percent_trees = 1.0 # No averaging config. learner_config_no_averaging = learner_pb2.LearnerConfig() learner_config_no_averaging.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") # This is how our ensemble will "look" during averaging adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString( ), name="adjusted") resources.initialize_resources(resources.shared_resources()).run() # Do averaging. result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config.SerializeToString(), apply_averaging=True, reduce_dim=True) pattern_result, pattern_dropout_info = self._get_predictions( adjusted_tree_ensemble_handle, learner_config_no_averaging.SerializeToString(), apply_averaging=False, reduce_dim=True) self.assertAllEqual(result.eval(), pattern_result.eval()) self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval()) def testAveragingSomeTrees(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() adjusted_tree_ensemble_config = ( tree_config_pb2.DecisionTreeEnsembleConfig()) # Add 1000 trees with some weights. total_num = 100 num_averaged = 25 j = 0 for i in range(0, total_num): tree = tree_ensemble_config.trees.add() _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_metadata.add().is_finalized = True tree_ensemble_config.tree_weights.append(1.0) # This is how the weight will look after averaging - we are adjusting # the weights of the last 25 trees copy_tree = adjusted_tree_ensemble_config.trees.add() _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4) adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True if i >= 75: adjusted_tree_ensemble_config.tree_weights.append( 1.0 * (num_averaged - j) / num_averaged) j += 1 else: adjusted_tree_ensemble_config.tree_weights.append(1.0) # Prepare learner config WITH AVERAGING. learner_config_1 = learner_pb2.LearnerConfig() learner_config_1.num_classes = 2 learner_config_1.averaging_config.average_last_percent_trees = 0.25 # This is equivalent. learner_config_2 = learner_pb2.LearnerConfig() learner_config_2.num_classes = 2 learner_config_2.averaging_config.average_last_n_trees = 25 # No averaging config. learner_config_no_averaging = learner_pb2.LearnerConfig() learner_config_no_averaging.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") # This is how our ensemble will "look" during averaging adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString( ), name="adjusted") resources.initialize_resources(resources.shared_resources()).run() result_1, dropout_info_1 = self._get_predictions( tree_ensemble_handle, learner_config_1.SerializeToString(), apply_averaging=True, reduce_dim=True) result_2, dropout_info_2 = self._get_predictions( tree_ensemble_handle, learner_config_2.SerializeToString(), apply_averaging=True, reduce_dim=True) pattern_result, pattern_dropout_info = self._get_predictions( adjusted_tree_ensemble_handle, learner_config_no_averaging.SerializeToString(), apply_averaging=False, reduce_dim=True) self.assertAllEqual(result_1.eval(), pattern_result.eval()) self.assertAllEqual(result_2.eval(), pattern_result.eval()) self.assertAllEqual(dropout_info_1.eval(), pattern_dropout_info.eval()) self.assertAllEqual(dropout_info_2.eval(), pattern_dropout_info.eval()) def testAverageMoreThanNumTreesExist(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() adjusted_tree_ensemble_config = ( tree_config_pb2.DecisionTreeEnsembleConfig()) # When we say to average over more trees than possible, it is averaging # across all trees. total_num = 100 for i in range(0, total_num): tree = tree_ensemble_config.trees.add() _append_to_leaf(tree.nodes.add().leaf, 0, -0.4) tree_ensemble_config.tree_metadata.add().is_finalized = True tree_ensemble_config.tree_weights.append(1.0) # This is how the weight will look after averaging copy_tree = adjusted_tree_ensemble_config.trees.add() _append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4) adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True adjusted_tree_ensemble_config.tree_weights.append( 1.0 * (total_num - i) / total_num) # Prepare learner config WITH AVERAGING. learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 # We have only 100 trees but we ask to average over 250. learner_config.averaging_config.average_last_n_trees = 250 # No averaging config. learner_config_no_averaging = learner_pb2.LearnerConfig() learner_config_no_averaging.num_classes = 2 tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="existing") # This is how our ensemble will "look" during averaging adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString( ), name="adjusted") resources.initialize_resources(resources.shared_resources()).run() result, dropout_info = self._get_predictions( tree_ensemble_handle, learner_config.SerializeToString(), apply_averaging=True, reduce_dim=True) pattern_result, pattern_dropout_info = self._get_predictions( adjusted_tree_ensemble_handle, learner_config_no_averaging.SerializeToString(), apply_averaging=False, reduce_dim=True) self.assertAllEqual(result.eval(), pattern_result.eval()) self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval()) class PartitionExamplesOpsTest(test_util.TensorFlowTestCase): def setUp(self): """Sets up the prediction tests. Create a batch of two examples having three dense float, two sparse float and one sparse int features. The data looks like the following: |Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 | | 0 | 7 | 1 | 2 | -3 | | 9,1 | | 1 | -2 | 2 | 0.5 | | 4 | | """ super(PartitionExamplesOpsTest, self).setUp() self._dense_float_tensor1 = np.array([[7.0], [-2.0]]) self._dense_float_tensor2 = np.array([[1.0], [2.0]]) self._dense_float_tensor3 = np.array([[2.0], [0.5]]) self._sparse_float_indices1 = np.array([[0, 0]]) self._sparse_float_values1 = np.array([-3.0]) self._sparse_float_shape1 = np.array([2, 1]) self._sparse_float_indices2 = np.array([[1, 0]]) self._sparse_float_values2 = np.array([4.0]) self._sparse_float_shape2 = np.array([2, 1]) self._sparse_int_indices1 = np.array([[0, 0], [0, 1]]) self._sparse_int_values1 = np.array([9, 1]) self._sparse_int_shape1 = np.array([2, 2]) def testEnsembleEmpty(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() result = prediction_ops.gradient_trees_partition_examples( tree_ensemble_handle, [self._dense_float_tensor1], [self._sparse_float_indices1, self._sparse_float_indices2], [self._sparse_float_values1, self._sparse_float_values2], [self._sparse_float_shape1, self._sparse_float_shape2], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1]) self.assertAllEqual([0, 0], result.eval()) def testTreeNonFinalized(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Depth 3 tree. tree1 = tree_ensemble_config.trees.add() _set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree1.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.2) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.3) _set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.6) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_metadata.add().is_finalized = False tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() result = prediction_ops.gradient_trees_partition_examples( tree_ensemble_handle, [self._dense_float_tensor1], [self._sparse_float_indices1, self._sparse_float_indices2], [self._sparse_float_values1, self._sparse_float_values2], [self._sparse_float_shape1, self._sparse_float_shape2], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1]) self.assertAllEqual([5, 3], result.eval()) def testTreeFinalized(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Depth 3 tree. tree1 = tree_ensemble_config.trees.add() _set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2) _set_float_split(tree1.nodes.add() .sparse_float_binary_split_default_left.split, 0, -20.0, 3, 4) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.2) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.3) _set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split, 0, 9, 5, 6) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.5) _append_to_leaf(tree1.nodes.add().leaf, 0, 0.6) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_metadata.add().is_finalized = True tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() result = prediction_ops.gradient_trees_partition_examples( tree_ensemble_handle, [self._dense_float_tensor1], [self._sparse_float_indices1, self._sparse_float_indices2], [self._sparse_float_values1, self._sparse_float_values2], [self._sparse_float_shape1, self._sparse_float_shape2], [self._sparse_int_indices1], [self._sparse_int_values1], [self._sparse_int_shape1]) self.assertAllEqual([0, 0], result.eval()) def testObliviousTreeNonFinalized(self): with self.cached_session(): tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig() # Depth 3 tree. tree1 = tree_ensemble_config.trees.add() _set_float_oblivious_split( tree1.nodes.add().oblivious_dense_float_binary_split, 0, 5.0) _set_float_oblivious_split( tree1.nodes.add().oblivious_dense_float_binary_split, 1, 3.0) _set_float_oblivious_split( tree1.nodes.add().oblivious_dense_float_binary_split, 2, 1.0) for i in range(1, 9): _append_to_leaf(tree1.nodes.add().leaf, 0, i / 10.0) tree_ensemble_config.tree_weights.append(1.0) tree_ensemble_config.tree_metadata.add().is_finalized = False tree_ensemble_handle = model_ops.tree_ensemble_variable( stamp_token=0, tree_ensemble_config=tree_ensemble_config.SerializeToString(), name="full_ensemble") resources.initialize_resources(resources.shared_resources()).run() result = prediction_ops.gradient_trees_partition_examples( tree_ensemble_handle, [ self._dense_float_tensor1, self._dense_float_tensor2, self._dense_float_tensor3 ], [], [], [], [], [], []) # The first example goes right, left, right in the tree and the second # example goes lef, left, left. Since the depth of the tree is 3, the # partition id's are as follows: # First example: 3 + 5 = 8 # Second exampel: 3 + 0 = 3 self.assertAllEqual([8, 3], result.eval()) if __name__ == "__main__": googletest.main()
apache-2.0
EmanueleCannizzaro/scons
test/MSVC/batch-longlines.py
1
2071
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/MSVC/batch-longlines.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify operation of Visual C/C++ batch builds with long lines. Only runs on Windows. """ import TestSCons test = TestSCons.TestSCons() test.skip_if_not_msvc() _python_ = TestSCons._python_ for i in xrange(1,200): test.write('source-file-with-quite-a-long-name-maybe-unrealistic-but-who-cares-%05d.cxx'%i, '/* source file %d */\nint var%d;\n'%(i,i)) test.write('SConstruct', """ env = Environment(tools=['msvc', 'mslink'], MSVC_BATCH=ARGUMENTS.get('MSVC_BATCH')) env.SharedLibrary('mylib', Glob('source*.cxx')) """ % locals()) test.run(arguments = 'MSVC_BATCH=1 .') test.must_exist('mylib.dll') test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit